query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Metainformation for RDF output
def rdfMeta(self): return { 'label' : {'uri' : [ RdfURI('skos:prefLabel'), RdfURI('dcel:title') ] }, 'description' : {'uri' : [ RdfURI('v:description'), RdfURI('dcel:description'), RdfURI('rev:text'), RdfURI('bibtex:abstract') ], 'property' : 'get_description' }, 'context' : {'uri' : 'skos:inScheme', 'co...
[ "def get_metadata(self, g, item, type='Dataset'):\n DCAT = Namespace('http://www.w3.org/ns/dcat#')\n SMA = Namespace('http://schema.org/')\n meta = dict()\n #default sparql\n #meta = self.get_default_metadata(g)\n self.logger.info('FsF-F2-01M : Trying to get some core domai...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save an image as a collection of tiles. The image is split into a set of fixedsized (with the exception of rightmost and bottommost) tiles.
def save_tiled_image(img, root, level, tile_geom, img_type="jpeg"): assert(img.ndim == 2 or (img.ndim == 3 and img.shape[2] <= 3)) n_channels = 1 if img.ndim == 2 else img.shape[2] dst_path = root + os.path.sep + 'level_{:d}'.format(level) tg = (min(tile_geom[0], img.shape[1]), min(tile_geom[1], img.s...
[ "def make_tiles(self):\n num_tiles = self._puzzle_height * self._puzzle_width\n #subsurface is a ract(left, top, width, height\n \n for idx in xrange(num_tiles):\n self._tiles.append(self._tiles_sprite.subsurface(\n (idx * TILE_SIZE, 0, TILE_SIZE, TILE_SIZE)))",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load a tiled image. All the information about the tile geometry and tile paths is taken from img_meta.
def load_tiled_image(img_meta): img_w, img_h = long(img_meta['level_image_width']), long(img_meta['level_image_height']) nh, nv = long(img_meta['n_tiles_horiz']), long(img_meta['n_tiles_vert']) img = np.zeros((img_h, img_w, 3), dtype=np.uint8) for i in range(nv): for j in range(nh): ...
[ "def load_tile(path, tile_size):\n img = pyglet.resource.image(path)\n img.width = tile_size\n img.height = tile_size\n return img", "def load_images_pygame(tmxdata, mapping, *args, **kwargs):\n from itertools import product\n from pygame import Surface\n import pygame, os\n\n\n def handle...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves the sound for the given animal, and prints it with animation.
async def speak(animal, session): response = await session.get( 'https://ericappelt.com/animals/{0}'.format(animal) ) sound = await response.text() radprint('The {0} says "{1}".'.format(animal, sound))
[ "def make_a_sound():\n print('quack')", "def random_animal_sound():\n\n sounds = ['moo', 'quack', 'bark', 'roar', 'meow']\n return random.choice(sounds)", "def goes(self):\n\n animal_name = self.__class__.__name__.lower()\n print('The %s goes \"%s!\"' % (animal_name, self.sound))", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Context manager to patch joblib to report into tqdm progress bar given as argument
def tqdm_joblib(tqdm_object): def tqdm_print_progress(self): if self.n_completed_tasks > tqdm_object.n: n_completed = self.n_completed_tasks - tqdm_object.n tqdm_object.update(n=n_completed) original_print_progress = joblib.parallel.Parallel.print_progress joblib.parallel.P...
[ "def tqdm(self, iterable, **kwargs):\n if self.verbose:\n if \"file\" not in kwargs:\n kwargs[\"file\"] = sys.stdout\n return tqdm(iterable, **kwargs)\n return iterable", "def test_set_progress(self):\n pass", "def get_progress_bar():\n if isnotebook(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns portal wide groups.
def getPortalGroups(self): for principal in principalRegistry.getPrincipals(''): if IGroupAwarePrincipal.providedBy(principal): continue yield principal.id, principal.title
[ "def Groups(self) -> GroupCollection:", "def get_public_groups():\n public_groups = MrMapGroup.objects.filter(\n is_public_group=True\n )\n return public_groups", "def category_groups(self):\n\n return self._GET('category_groups')", "def get_all_template_groups(self) -> dict:\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a User Credential for an S3 Storage Gateway
def s3( login_manager: LoginManager, *, endpoint_id: uuid.UUID, storage_gateway: uuid.UUID, globus_identity: str, local_username: str, s3_key_id: str, s3_secret_key: str, display_name: str | None, ) -> None: gcs_client = login_manager.get_gcs_client(endpoint_id=endpoint_id) a...
[ "def _get_s3_creds(client):\n access_key = client.config.plugin_get_value('access-key')\n secret_key = client.config.plugin_get_value('secret-key')\n\n if access_key is None:\n # this means there are no stored s3 creds for this user - set them up\n\n # before we do anything, can they do objec...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns AuthUser.profile object, creates record if it doesn't exist.
def get_profile(self, request=None): if not request: request = get_current_request() auth_profile = request.registry.settings.get('apex.auth_profile') if auth_profile: resolver = DottedNameResolver(auth_profile.split('.')[0]) profile_cls = resolver.resolve(au...
[ "def create_user_profile(self, user: User) -> Person:\n # The user already has a profile.\n if hasattr(user, \"profile\"):\n user.profile.login_count += 1\n user.profile.save()\n\n return user.profile\n\n # The user doesn't have a profile, so let's try and find ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Flatten comments into a list of Comment objects using a tree traversal.
def flatten_comments(root_comments): all_comments = [] nodes = root_comments[:] while nodes: node = nodes.pop() data = node['data'] if 'body' not in data: # # weird child node # continue comment = Comment(data['body'], int(data[...
[ "def get_flattened_comments(self) -> List[Comment]:\n return self.comments.list()", "def flatten(self, comment=None):\n\t\tprint 'flattening'\n\t\tif comment is None:\n\t\t\tprint 'comment is none'\n\t\t\tcomment = self.commentlist[0]\n\t\twhile isinstance(comment, praw.models.Comment):\n\t\t\tprint commen...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the number of files that will be proccessed.
def file_count(self): return len(self.file_names)
[ "def number_of_files(self) -> int:\n return pulumi.get(self, \"number_of_files\")", "def file_count(self):\n\t\tif self.mode != PF_READ:\n\t\t\treturn -1\n\t\treturn len(self.files)", "def file_count(self):\n return sum([len(fls) for fls in self.file_list.values()])", "def count():\n\n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Implement this function in a subclass to handle DB commits per file processed.
def _commit(self): pass
[ "def abstract_attempt_commit():", "def c_commit(self, args):\n log.info('forcing commit')\n self.db.commit()", "def commit(self):\n if self._dblog:\n self._feedlgr.commit()", "def process_post_commit(self, svn_commit):\n\n raise NotImplementedError()", "def commit(self):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Call a JSON data processor function given it's base name.
def call_process_func(self, name, id, json_data): process_function = '_process_' + name try: function = getattr(self, process_function, None) if function is not None: function(id, json_data) else: self.root_logger.warning("No handler %s...
[ "def main(str_json):\n data_dict = convert_json_to_dictionary(str_json)\n\n if data_dict['request_type'] == 'load_data':\n load_data(data_dict)\n\n elif data_dict['request_type'] == 'modify_data':\n modify_data(data_dict)\n\n elif data_dict['request_type'] == 'export_data':\n export...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Where the magic happens. Finds a threshold that will limit the number of params in the network to the tracked_size, and resets those params to the initial value to emulate how DropBack would work in real hardware. Chainer will calculate all grads, and this updater inserts itself before the next forward pass can occur t...
def update(self): if self.first_iter: self.first_iter = False self.params = [i for i in self.opt.target.params()] for i, p in enumerate(self.params): self.init_params.append(xp.copy(p.data)) if not os.path.exists(self.output_dir): o...
[ "def reset_parameters(self) -> None:\n if hasattr(self.hopfield, r'reset_parameters'):\n self.hopfield.reset_parameters()\n\n # Explicitly initialise pooling weights.\n nn.init.normal_(self.pooling_weights, mean=0.0, std=0.02)", "def reset_grads(self):\n for dparam in self.d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse file and create list of dictionaries of url parameters, if key 'pageName' is present
def create_parsed_dicts(file, list_of_var=None): req = [] firstlines = [] parsed_urls = [] with_pageName_urls = [] lower_list_of_keys = [i.lower() for i in list_of_var] specified_key_list_of_dicts = [] with open(file) as json_file: data = json.load(json_file) for p in data: ...
[ "def parse_page(fpath):\n f = open(fpath, 'r')\n content_begin = False\n d = {}\n f_link = []\n for line in f:\n if line == '---------------------\\n':\n content_begin = True\n continue\n if content_begin == False:\n page_id = int( line.strip().replace('...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts Pandas DataFrame to Excel readable format
def convert_to_excel(df, file_name): df_excel = df.to_excel(file_name) return df_excel
[ "def test_convert_df_to_excel_file():\n in_df = pd.DataFrame([[1, 2], [1, 2]])\n expected_df = pd.DataFrame([[1, 2], [1, 2]])\n\n out_excel_file = convert_df_to_excel_file(in_df, index=False)\n out_df = pd.read_excel(out_excel_file)\n\n assert_frame_equal(out_df, expected_df)", "def export_data_fra...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates writers for files
def create_writers( image_path: Path, files: list, output_folder: Path, tmp_folder: Path, ) -> list: writers = [] # get info with WholeSlideImage(image_path) as wsi: shape = wsi.shapes[wsi.get_level_from_spacing(SPACING)] real_spacing = wsi.get_real_spacing(SPACING) fo...
[ "def createFiles() -> None:\n\n try:\n mkdir('C:/tmp/')\n except:\n pass\n try:\n mkdir(path)\n except:\n pass\n open(dirfile, 'w+')\n open(path + 'Bank.txt', 'w+')\n open(expenseDtbPath, 'w+')\n open(path + 'FirstTime.txt', 'w+')\n open(path + 'LastOpened.txt'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delay search Kinopoisk links in references of wikipedia page
def search_link_signal(content_type_id, object_id, page, **_): # https://github.com/goldsmith/Wikipedia/issues/78 try: links = page.references except KeyError: return else: search_link.delay(content_type_id, object_id, links, page.html())
[ "def wiki_thread(goal, article, queue, visited, sub_articles, keywords, sleeptime=0.01):\n title_exp = re.compile(\"<title>(.+) -.+</title>\")\n\n while True:\n time.sleep(sleeptime) # Slight delay to avoid denied responses\n l = sub_articles.next_article()\n # Stop when article list is e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run the sendreceive loop. For each cycle, receive from the server, and perform a response. Break the loop when the final message (BYE) is received from the server, or the message is empty.
def send_recv_loop(conn): while True: message = conn.recv(256).decode() if message == "": raise EmptyMessageException("Message from server empty. Something went wrong.") final = parse_message(conn, message) if final: break
[ "def server_loop(self):\n \n self.sock.listen(1)\n\n #Wait for connection from client\n while(True):\n\n self.logger.info(\"Waiting for client to connect...\")\n\n connection, client_address = self.sock.accept()\n data = \"\"\n\n self.logger.in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is the entry point for the program. To run the program, run the following in the command line. ./client [hostname] [NEU ID] Run "./client h" for more info
def main(args): try: conn = make_connection(args.secure, args.hostname, args.port) conn.sendall("cs5700spring2015 HELLO {}\n".format(args.id).encode()) send_recv_loop(conn) except Exception as e: print(e) sys.exit(0)
[ "def main():\n global sock\n global handlers\n\n load_configuration()\n log(\"%s, starting...\" % const.SERVER_INFO, \"info\")\n # create queue for threading\n q = queue.Queue()\n for i in range(const.THREADS):\n handler = ClientHandler(q)\n handler.setDaemon(True)\n handle...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get (completed) decommission_device workflows for device.
def get_decommission_device_wfs(self, device_id, state='COMPLETED'): qs = { 'q': 'workflowType IN (%s) AND status IN (%s) AND input.device_id IN (%s)' % \ ('decommission_device', state, device_id) } return self.__get_workflows(qs)
[ "def decommission_device():\n rsp = self.api_devauth_mgmt.with_auth(user.token).call(\n \"DELETE\", deviceauth.URL_DEVICE.format(id=dev.id),\n )\n assert rsp.status_code == 204", "def get_degradations(graph):\n return get_nodes(graph, is_degraded)", "def get_de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get workflows according to a search query.
def __get_workflows(self, query): qs = { 'q': query, } rsp = requests_retry().get(self.addr+self.API_WF_SEARCH, params=qs) rsp.raise_for_status() return rsp.json()
[ "def search(cls, email=\"\", name=\"\", tags=None, client=None):\n if client is None:\n client = get_global_grpc_client()\n\n stream = client.api[\"SearchWorkflows\"](\n workflow_pb2.SearchWorkflowsRequest(\n email=email, name_prefix=name, tags=tags\n ),...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that defines the movement of bacteria in XY direction
def plane_move(self): #Move bacteria in xy plane # Generate random number from which xy movement will be decided randnum = random.random() # 5% chance of bacteria moving in -ve x direction if randnum <= self.prob_west: ...
[ "def move(self):\n if self.direction == \"n\":\n self.position = (self.position[0]-1, self.position[1])\n\n elif self.direction == \"s\":\n self.position = (self.position[0]+1, self.position[1])\n\n elif self.direction == \"e\":\n self.position = (self.position[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate git repo access (via HTTP GET) [EXPERIMENTAL] url /magen/policy/v2/validation/repository// request.args['username'] scm system username request.args['client_id'] magen client id request.args['application'] e.g. git request.args['action'] e.g. clone
def policy_validate_repository_access(repositoryId): pstate = PolicyState() pstate.test_mode = True logger = logging.getLogger(LogDefaults.default_log_name) logger.debug("validate_repo_access: request: %s request.args: %s", request, request.args) args_ok, badargs_cause = pstate.rest_api_required_ar...
[ "def test_rejects_non_github_urls():\r\n\tassert sanityCheck(\"https://google.com/angular/angular.git\") == False", "def test_accepts_github_urls_only():\r\n\tassert sanityCheck(\"https://github.com/facebook/react.git\") == True\r\n\tassert sanityCheck(\"https://github.com/angular/angular.git\") == True", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return list of entitlements (policy instances) for client (via HTTP GUT) [TROUBLESHOOTING] url /magen/policy/v2/entitlements/ request.args['midToken'] magen_id token, to filter to client (mandatory) request.args['action'] access action, to filter by action (optional) request.args['application'] application for which ac...
def policy_entitlements_get_by_client(): pstate = PolicyState() # pstate.test_mode = True logger = logging.getLogger(LogDefaults.default_log_name) logger.debug("get entitlements v2: request: %s request.args: %s", request, request.args) args_ok, badargs_cause = pstate.rest_api_required_args_validate...
[ "def policy_entitlements_get_one_by_pi():\n pstate = PolicyState()\n\n args_ok, badargs_cause = pstate.rest_api_required_args_validate(\n request.args, ['midToken', 'pi_uuid'])\n if not args_ok:\n return RestServerApis.respond(\n HTTPStatus.NOT_FOUND, \"Client Entitlement\",\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return client entitlement for supplied policy instance (via HTTP GUT) [TROUBLESHOOTING] url /magen/policy/v2/entitlements/entitlement? request.args['midToken'] magen_id token, to filter to client (mandatory) request.args['pi_uuid'] policy instance identifiier
def policy_entitlements_get_one_by_pi(): pstate = PolicyState() args_ok, badargs_cause = pstate.rest_api_required_args_validate( request.args, ['midToken', 'pi_uuid']) if not args_ok: return RestServerApis.respond( HTTPStatus.NOT_FOUND, "Client Entitlement", {"succes...
[ "def policy_entitlements_get_by_client():\n pstate = PolicyState()\n # pstate.test_mode = True\n logger = logging.getLogger(LogDefaults.default_log_name)\n logger.debug(\"get entitlements v2: request: %s request.args: %s\", request, request.args)\n\n args_ok, badargs_cause = pstate.rest_api_required_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
execute the condition of the device, If all condition are true then add time into true_time. If true time is excedd the threshold time (mht) flag the excess operation
def on_schedule(self): conditions = self.condition_list.get("conditions") if all([parse_expr(condition).subs(self.condition_data)\ for condition in conditions]): self.device_true_time += self.interval self.device_status = True _log.debug('All condit...
[ "def cond_test(self):\n self.vert_cond.home()\n self.horz_cond.home()\n # 4000 is the right step for cond_probe horizontal move to analyse\n self.horz_cond.move_to(4000)\n self.vert_cond.move_to(40000)\n print('conductivity analysing')\n time.sleep(10)\n self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
calculate trajectory using SciPy ode integrator
def scipy_trajectory(self): self.xv = odeint(self.derivative, self.xv0, self.tarray)
[ "def integrate(f, x0, times, algorithm='lsoda'):\n integrator = spint.ode(algorithm)\n integrator.set_initial_value(x0)\n trajectory = np.empty((times.shape[0], x0.shape[0]))\n for i, t in enumerate(times):\n trajectory[i] = integrator.integrate(t)\n if not integrator.successful():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test DataElement.description for Group Length element
def test_description_group_length(self): elem = DataElement(0x00100000, 'LO', 12345) assert 'Group Length' == elem.description()
[ "def __len__(self) -> int:\n return len(self.groups[0])", "def test_grouping_attribute() -> None:\n g = Grouping()\n assert g._groups == []", "def test_description_unknown(self):\n elem = DataElement(0x00000004, 'LO', 12345)\n assert '' == elem.description()", "def getDataUnitCount(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test DataElement.description with an unknown private element
def test_description_unknown_private(self): elem = DataElement(0x00110010, 'LO', 12345) elem.private_creator = 'TEST' assert 'Private tag data' == elem.description() elem = DataElement(0x00110F00, 'LO', 12345) assert elem.tag.is_private assert elem.private_creator is None...
[ "def test_description_unknown(self):\n elem = DataElement(0x00000004, 'LO', 12345)\n assert '' == elem.description()", "def test_private_repeater_tag(self):\n ds = Dataset()\n ds[0x60210012] = RawDataElement(\n Tag(0x60210012), None, 12, b'PAPYRUS 3.0 ', 0, True, True)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test DataElement.description with an unknown element
def test_description_unknown(self): elem = DataElement(0x00000004, 'LO', 12345) assert '' == elem.description()
[ "def test_description_unknown_private(self):\n elem = DataElement(0x00110010, 'LO', 12345)\n elem.private_creator = 'TEST'\n assert 'Private tag data' == elem.description()\n elem = DataElement(0x00110F00, 'LO', 12345)\n assert elem.tag.is_private\n assert elem.private_crea...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test DataElement.__ne__ for standard element
def test_inequality_standard(self): dd = DataElement(0x00100010, 'PN', 'ANON') assert not dd != dd assert DataElement(0x00100010, 'PN', 'ANONA') != dd # Check tag assert DataElement(0x00100011, 'PN', 'ANON') != dd # Check VR assert DataElement(0x00100010, 'SH', ...
[ "def __neq__(self, block_data):\n return not self == block_data", "def __ne__(self, node2):\n\t\t#return self._element == node2._element and self._name == node2._name\n\t\treturn not self == node2", "def _attr_ne(self, name, value):\n self._attr_present(name)\n self.filters.append(lambda el...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test DataElement.__ne__ for sequence element
def test_inequality_sequence(self): dd = DataElement(0x300A00B0, 'SQ', []) assert not dd != dd assert not DataElement(0x300A00B0, 'SQ', []) != dd ee = DataElement(0x300A00B0, 'SQ', [Dataset()]) assert ee != dd # Check value dd.value = [Dataset()] dd[0].Pa...
[ "def test_inequality_standard(self):\n dd = DataElement(0x00100010, 'PN', 'ANON')\n assert not dd != dd\n assert DataElement(0x00100010, 'PN', 'ANONA') != dd\n\n # Check tag\n assert DataElement(0x00100011, 'PN', 'ANON') != dd\n\n # Check VR\n assert DataElement(0x00...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test hash(DataElement) raises TypeError
def test_hash(self): with pytest.raises(TypeError, match=r"unhashable"): hash(DataElement(0x00100010, 'PN', 'ANON'))
[ "def validate_hash_data(hashtype, hashsum):\n if hashtype not in hashlib.algorithms_available:\n return False\n try:\n int(hashsum, 16)\n except ValueError:\n return False\n hashd = getattr(hashlib, hashtype)()\n hashd.update('blah')\n if le...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test DataElement.__str__ output with no VR
def test_str_no_vr(self): elem = DataElement(0x00100010, 'PN', 'ANON') assert "(0010, 0010) Patient's Name" in str(elem) assert "PN: 'ANON'" in str(elem) elem.showVR = False assert "(0010, 0010) Patient's Name" in str(elem) assert 'PN' not in str(elem)
[ "def as_string(self, element):\n raise NotImplementedError()", "def reprLRData(s):\n return repr(s)", "def test_str_method(self):\n sq8 = Square(2, id=99)\n str_s = sq8.__str__()\n self.assertEqual(str_s, '[Square] (99) 0/0 - 2')", "def test_repr(self):\n attr = Attribute...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test DataElement.__repr__ with a sequence
def test_repr_seq(self): elem = DataElement(0x300A00B0, 'SQ', [Dataset()]) elem[0].PatientID = '1234' assert repr(elem) == repr(elem.value)
[ "def test_repr(self):\n attr = Attribute(\"device\", \"name\")\n assert repr(attr) == '<Attribute(\"device\", \"name\")>'", "def test_sparsearray_repr():\n sa = SparseArray(LIST1)\n\n print(repr(LIST1))\n assert repr(sa) == repr(LIST1)", "def reprLRData(s):\n return repr(s)", "def te...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test DataElement.__getitem__ raise if value not indexable
def test_getitem_raises(self): elem = DataElement(0x00100010, 'LO', 12345) with pytest.raises(TypeError): elem[0]
[ "def test_getitem_setitem_not_implemented():", "def testAccessIncorrectIndex(self):\n self.assertRaises(ValueError,\n self.manager.ifDescr.__getitem__, (47, 18))\n self.assertRaises(ValueError,\n self.manager.ifDescr.__getitem__, \"nothing\")", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test DataElement.repval doesn't return a huge string for a large value
def test_repval_large_elem(self): elem = DataElement(0x00820003, 'UT', 'a' * 1000) assert len(elem.repval) < 100
[ "def test_repval_large_vm(self):\n elem = DataElement(0x00080054, 'AE', 'a\\\\' * 1000 + 'a')\n assert len(elem.repval) < 100", "def test_repval_strange_type(self):\n elem = DataElement(0x00020001, 'OB', 0)\n assert len(elem.repval) < 100", "def _get_cleaned_value(self, data_element)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test DataElement.repval doesn't return a huge string for a large vm
def test_repval_large_vm(self): elem = DataElement(0x00080054, 'AE', 'a\\' * 1000 + 'a') assert len(elem.repval) < 100
[ "def test_repval_large_elem(self):\n elem = DataElement(0x00820003, 'UT', 'a' * 1000)\n assert len(elem.repval) < 100", "def test_repval_strange_type(self):\n elem = DataElement(0x00020001, 'OB', 0)\n assert len(elem.repval) < 100", "def _reduce_response(data: str) -> str:\n try:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test DataElement.repval doesn't break with bad types
def test_repval_strange_type(self): elem = DataElement(0x00020001, 'OB', 0) assert len(elem.repval) < 100
[ "def test_repval_large_elem(self):\n elem = DataElement(0x00820003, 'UT', 'a' * 1000)\n assert len(elem.repval) < 100", "def test_repval_large_vm(self):\n elem = DataElement(0x00080054, 'AE', 'a\\\\' * 1000 + 'a')\n assert len(elem.repval) < 100", "def test_inequality_sequence(self):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that an unknown private tag (e.g. a tag not in the private dictionary) in the repeater range is not handled as a repeater tag if using Implicit Little Endian transfer syntax.
def test_private_tag_in_repeater_range(self): # regression test for #689 ds = Dataset() ds[0x50f10010] = RawDataElement( Tag(0x50f10010), None, 8, b'FDMS 1.0', 0, True, True) ds[0x50f1100a] = RawDataElement( Tag(0x50f1100a), None, 6, b'ACC0.6', 0, True, True) ...
[ "def test_private_repeater_tag(self):\n ds = Dataset()\n ds[0x60210012] = RawDataElement(\n Tag(0x60210012), None, 12, b'PAPYRUS 3.0 ', 0, True, True)\n ds[0x60211200] = RawDataElement(\n Tag(0x60211200), None, 6, b'123456', 0, True, True)\n private_creator_data_ele...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that a known private tag in the repeater range is correctly handled using Implicit Little Endian transfer syntax.
def test_private_repeater_tag(self): ds = Dataset() ds[0x60210012] = RawDataElement( Tag(0x60210012), None, 12, b'PAPYRUS 3.0 ', 0, True, True) ds[0x60211200] = RawDataElement( Tag(0x60211200), None, 6, b'123456', 0, True, True) private_creator_data_elem = ds[0x60...
[ "def test_private_tag_in_repeater_range(self):\n # regression test for #689\n ds = Dataset()\n ds[0x50f10010] = RawDataElement(\n Tag(0x50f10010), None, 8, b'FDMS 1.0', 0, True, True)\n ds[0x50f1100a] = RawDataElement(\n Tag(0x50f1100a), None, 6, b'ACC0.6', 0, True,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Known tags with VR UN are correctly decoded.
def test_known_tags_with_UN_VR(self, replace_un_with_known_vr): ds = Dataset() ds[0x00080005] = DataElement(0x00080005, 'UN', b'ISO_IR 126') ds[0x00100010] = DataElement(0x00100010, 'UN', 'Διονυσιος'.encode('iso_ir_126')) ds.decode() assert 'C...
[ "def test_unknown_tags_with_UN_VR(self):\n ds = Dataset()\n ds[0x00080005] = DataElement(0x00080005, 'CS', b'ISO_IR 126')\n ds[0x00111010] = DataElement(0x00111010, 'UN',\n 'Διονυσιος'.encode('iso_ir_126'))\n ds.decode()\n assert 'UN' == ds[0x00...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Known tags with VR UN are correctly read.
def test_reading_ds_with_known_tags_with_UN_VR( self, replace_un_with_known_vr): test_file = get_testdata_file('explicit_VR-UN.dcm') ds = dcmread(test_file) assert 'CS' == ds[0x00080005].VR assert 'TM' == ds[0x00080030].VR assert 'PN' == ds[0x00100010].VR asse...
[ "def test_unknown_tags_with_UN_VR(self):\n ds = Dataset()\n ds[0x00080005] = DataElement(0x00080005, 'CS', b'ISO_IR 126')\n ds[0x00111010] = DataElement(0x00111010, 'UN',\n 'Διονυσιος'.encode('iso_ir_126'))\n ds.decode()\n assert 'UN' == ds[0x00...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unknown tags with VR UN are not decoded.
def test_unknown_tags_with_UN_VR(self): ds = Dataset() ds[0x00080005] = DataElement(0x00080005, 'CS', b'ISO_IR 126') ds[0x00111010] = DataElement(0x00111010, 'UN', 'Διονυσιος'.encode('iso_ir_126')) ds.decode() assert 'UN' == ds[0x00111010].VR ...
[ "def test_known_tags_with_UN_VR(self, replace_un_with_known_vr):\n ds = Dataset()\n ds[0x00080005] = DataElement(0x00080005, 'UN', b'ISO_IR 126')\n ds[0x00100010] = DataElement(0x00100010, 'UN',\n 'Διονυσιος'.encode('iso_ir_126'))\n ds.decode()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tag with length > 64kb with VR UN is not changed.
def test_tag_with_long_value_UN_VR(self): ds = Dataset() ds[0x00080005] = DataElement(0x00080005, 'CS', b'ISO_IR 126') single_value = b'123456.789012345' large_value = b'\\'.join([single_value] * 4500) ds[0x30040058] = DataElement(0x30040058, 'UN', ...
[ "def encodeTagLength(cls, tag, length):\n raise NotImplementedError", "def set_vlen(self, vec_length):\n return _radio_astro_swig.detect_set_vlen(self, vec_length)", "def set_len(self, length):\n pass", "def get_tag(self, tag_len=16):\n tag = _FFI.new(\"unsigned char []\", tag_len)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test converting a raw element with unknown VR
def test_unknown_vr(self): raw = RawDataElement(Tag(0x00080000), 'AA', 8, b'20170101', 0, False, True) with pytest.raises(NotImplementedError): DataElement_from_raw(raw, default_encoding)
[ "def test_unknown_tags_with_UN_VR(self):\n ds = Dataset()\n ds[0x00080005] = DataElement(0x00080005, 'CS', b'ISO_IR 126')\n ds[0x00111010] = DataElement(0x00111010, 'UN',\n 'Διονυσιος'.encode('iso_ir_126'))\n ds.decode()\n assert 'UN' == ds[0x00...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Change working directory to project folder and call mainloop.
def main(): srcDir = os.path.dirname(__file__) os.chdir(srcDir) Application().mainloop()
[ "def main_loop(self):\n # Start main loop thread (loop() handler)\n while True:\n if self._looping:\n # Call loop() handler\n self._berry.loop_client()", "def run(self):\r\n self.root.after(3000, self.__my_mainloop)\r\n self.root.mainloop()", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch UDHR xml bundle from unicode.org to fetch_dir.
def fetch_udhr(fetch_dir): fetch_dir = tool_utils.ensure_dir_exists(fetch_dir) dstfile = os.path.join(fetch_dir, UDHR_XML_ZIP_NAME) result = urllib.urlretrieve(UDHR_XML_ZIP_URL, dstfile) print 'Fetched: ' + result[0]
[ "def update_udhr(udhr_dir, fetch_dir, in_repo):\n\n zippath = os.path.join(fetch_dir, UDHR_XML_ZIP_NAME)\n tool_utils.check_file_exists(zippath)\n\n if in_repo and os.path.isdir(udhr_dir) and not tool_utils.git_is_clean(udhr_dir):\n raise ValueError('Please clean %s.' % udhr_dir)\n\n if os.path.isdir(udhr_di...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete udhr_dir and rebuild with files extracted from udhr_xml.zip in fetch_dir. Stage if udhr_dir is in the repo.
def update_udhr(udhr_dir, fetch_dir, in_repo): zippath = os.path.join(fetch_dir, UDHR_XML_ZIP_NAME) tool_utils.check_file_exists(zippath) if in_repo and os.path.isdir(udhr_dir) and not tool_utils.git_is_clean(udhr_dir): raise ValueError('Please clean %s.' % udhr_dir) if os.path.isdir(udhr_dir): shuti...
[ "def fetch_udhr(fetch_dir):\n fetch_dir = tool_utils.ensure_dir_exists(fetch_dir)\n dstfile = os.path.join(fetch_dir, UDHR_XML_ZIP_NAME)\n result = urllib.urlretrieve(UDHR_XML_ZIP_URL, dstfile)\n print 'Fetched: ' + result[0]", "def remove_update_files(self):\n tar_file = os.path.join(htpc.RUNDIR, '%s....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse the index.xml file in src_dir and return a map from bcp to a set of file codes. Skip files at stages 1 (missing) or 2 (not started). Stage 3 files have article 1, which is what we want. Stage 4 and 5 are ok, the vast majority are unreviewed (4). In some cases more than one file is mapped to the same bcp47 code, t...
def parse_index(src_dir): tree = ET.parse(os.path.join(src_dir, 'index.xml')) bcp_to_codes = collections.defaultdict(set) code_to_ohchr = {} for e in tree.getroot().iter('udhr'): s = int(e.attrib.get('stage')) if s < 3: continue code = e.attrib.get('f') bcp = e.attrib.get('bcp47') ...
[ "def source_index(self):\n return os.path.join(self.data_directory, 'sources')", "def load_source_files(self):\n self.stage = [f for f in self.stage_filenames]\n\n for stage in self.STAGES:\n if self.stage_filenames[stage]:\n with open(self.stage_filenames[stage]) as...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add script subtags where they are not present in the bcp code.
def add_likely_scripts(bcp_to_code): result= {} for bcp, code in bcp_to_code.iteritems(): if code in CODE_TO_BCP: new_bcp = CODE_TO_BCP[code] else: new_bcp = bcp parts = bcp.split('-') try: script = generate_website_data.find_likely_script(parts[0]) if len(parts) == 1...
[ "def _filter_script_tags(input_xml):\n output_lines = []\n in_script = False\n for line in input_xml.splitlines():\n if \"<script>\" in line:\n in_script = True\n if not in_script:\n output_lines.append(line)\n if \"</script>\" in line:\n in_script = Fa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
When we query this data, typically we have only language and script. Some of the bcp codes have variants or regions as well. Select one of these to be the default when we have only language and script.
def add_default_lang_script(bcp_to_code): options = collections.defaultdict(set) long_keys = {} for key in bcp_to_code: tags = key.split('-') if len(tags) > 2: long_keys[key] = tags for key in sorted(long_keys): tags = long_keys[key] lang_scr = tags[0] + '-' + tags[1] if lang_scr in b...
[ "def get_default_value_in_Libgen_translate_script_by_hw_type():\n file_root_and_name = '/opt/nokiasiemens/configure/py/Libgen_translate.py'\n command='echo $HW_PLATFORM'\n out = connections.execute_mml_without_check(command)\n hw_platform = out.strip()\n\n command1=\"\"\"sed -n \"/ATCA/,/^'''/p\" %s\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create samples in sample_dir from the sources in udhr_dir, based on the bcp_to_code mapping. Stage if sample_dir is in the repo. If sample_dir is in the repo, don't overwrite samples whose most recent log entry does not start with 'Updated by tool'
def update_samples(sample_dir, udhr_dir, bcp_to_code_attrib, in_repo): tool_utils.check_dir_exists(udhr_dir) if in_repo and os.path.isdir(sample_dir) and not tool_utils.git_is_clean(sample_dir): raise ValueError('Please clean %s.' % sample_dir) if in_repo: repo, subdir = os.path.split(sample_dir) t...
[ "def test_setup_merged_samples(self):\n flist = find_samples(j_doe_00_05)\n setup_merged_samples(flist, **{'dry_run':False})\n with open(os.path.join(j_doe_00_05, \"P001_101_index3\", \"TOTAL\", \"P001_101_index3-bcbb-config.yaml\")) as fh:\n conf = yaml.load(fh)\n self.assert...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the set of scripts in this text. Excludes some common chars.
def get_scripts(text): # ignore these chars, we assume they are ok in any script exclusions = {0x00, 0x0A, 0x0D, 0x20, 0xA0, 0xFEFF} zyyy_chars = set() scripts = set() ustr = unicode(text, 'utf8') for cp in ustr: if ord(cp) in exclusions: continue script = unicode_data.script(cp) if script...
[ "def get_scripts(text):\n\tstart = text.find(\"OTHER SCRIPTS\")\n\tend = text.find(\"\\n\", start)\n\treturn text[start:end].strip()", "def get_scripts(self):\n return []", "def script(self):\n return [\n p.text.strip()\n for p in self.xml.findall('p')\n if p.text ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Report on differences between samples in source and target directories. The trg_to_src_name fn takes a target file name and returns the source file name to use in the comparisons.
def compare_samples(src_dir, trg_dir, trg_to_src_name=lambda x: x, opts=None): if not os.path.isdir(src_dir): print 'Original sample dir \'%s\' does not exist' % src_dir return if not os.path.isdir(trg_dir): print 'New sample dir \'%s\' does not exist' % trg_dir return print 'Base dir: %s' % src...
[ "def _make_source_file_name(self):\n source = self.target\n if isinstance(self.target_suffix, tuple):\n if self.target_suffix[0] and self.source_suffix:\n source = rreplace(source, self.target_suffix[0], self.source_suffix, 1)\n else:\n if self.target_suffix...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterate over toplevel transients of this subgraph.
def top_level_transients(self): schildren = self.scope_children() sdfg = self.parent result = set() for node in schildren[self.entry]: if isinstance(node, nd.AccessNode) and node.desc(sdfg).transient: result.add(node.data) return result
[ "def all_toplevel_checkboxes(self):\n\t\tif not self.checkboxes:\n\t\t\traise StopIteration()\n\n\t\tc = self.first_checkbox\n\t\twhile c:\n\t\t\tyield c\n\t\t\tc = c.next_sibling\n\t\traise StopIteration()", "def walk(self, topdown=True):\n\n if topdown:\n yield (self, self.subcollections, self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns true iff scope of `node` contains the scope of `other_node`.
def scope_contains_scope(sdict: ScopeDictType, node: NodeType, other_node: NodeType) -> bool: curnode = other_node nodescope = sdict[node] while curnode is not None: curnode = sdict[curnode] if curnode == nodescope: return True return False
[ "def are_siblings(self, node, node_other):\n if node.parent is node_other.parent:\n return True\n else:\n return False", "def is_descendant_of(self, other, include_self=False):\n if other.pk == self.pk:\n return include_self\n\n return self._closure_mod...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds a common parent scope for both input scopes, or None if the scopes are in different connected components.
def common_parent_scope(sdict: ScopeDictType, scope_a: NodeType, scope_b: NodeType) -> NodeType: if scope_a is scope_b: return scope_a # Scope B is in scope A if scope_contains_scope(sdict, scope_a, scope_b): return scope_a # Scope A is in scope B if scope_contains_scope(sdict, scop...
[ "def parent(self) -> Optional[Scope]:\n return self._parent", "def common_ancestor(parent_list_0, parent_list_1):\n for b in parent_list_0[::-1]:\n if b in parent_list_1:\n return b\n return None", "def scope(self):\n if self._scope is None:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether a node in an SDFG is contained within a certain set of scope schedules.
def is_in_scope(sdfg: 'dace.sdfg.SDFG', state: 'dace.sdfg.SDFGState', node: NodeType, schedules: List[dtypes.ScheduleType]) -> bool: while sdfg is not None: if state is not None and node is not None: sdict = state.scope_dict() scope = sdict[node] while sco...
[ "def check_schedule_against_stn(stn, schedule, epsilon=1e-10):\n for (u, v) in stn.edges():\n # Retrieve the STC for this edge\n lb, ub = stn[u][v]['stc']\n stc_satisfied = (schedule[v] - schedule[u] <= ub + epsilon) and (schedule[v] - schedule[u] >= lb - epsilon)\n if not stc_satisfi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether a node in an SDFG is contained within GPU devicelevel code.
def is_devicelevel_gpu(sdfg: 'dace.sdfg.SDFG', state: 'dace.sdfg.SDFGState', node: NodeType, with_gpu_default: bool = False) -> bool: if with_gpu_default: schedules = dtypes.GPU_SCHEDULES + [dtypes.ScheduleType.GPU_Default] else: ...
[ "def is_devicelevel_gpu_kernel(sdfg: 'dace.sdfg.SDFG', state: 'dace.sdfg.SDFGState', node: NodeType) -> bool:\n is_parent_nested = (sdfg.parent is not None)\n if is_parent_nested:\n return is_devicelevel_gpu(sdfg.parent.parent, sdfg.parent, sdfg.parent_nsdfg_node, with_gpu_default=True)\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether a node in an SDFG is contained within an actual GPU kernel.
def is_devicelevel_gpu_kernel(sdfg: 'dace.sdfg.SDFG', state: 'dace.sdfg.SDFGState', node: NodeType) -> bool: is_parent_nested = (sdfg.parent is not None) if is_parent_nested: return is_devicelevel_gpu(sdfg.parent.parent, sdfg.parent, sdfg.parent_nsdfg_node, with_gpu_default=True) else: retur...
[ "def is_devicelevel_gpu(sdfg: 'dace.sdfg.SDFG',\n state: 'dace.sdfg.SDFGState',\n node: NodeType,\n with_gpu_default: bool = False) -> bool:\n if with_gpu_default:\n schedules = dtypes.GPU_SCHEDULES + [dtypes.ScheduleType.GPU_Default]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether a node in an SDFG is contained within FPGA devicelevel code.
def is_devicelevel_fpga(sdfg: 'dace.sdfg.SDFG', state: 'dace.sdfg.SDFGState', node: NodeType) -> bool: from dace.sdfg.utils import is_fpga_kernel return (is_in_scope(sdfg, state, node, [dtypes.ScheduleType.FPGA_Device]) or (state is not None and is_fpga_kernel(sdfg, state)))
[ "def is_devicelevel_gpu_kernel(sdfg: 'dace.sdfg.SDFG', state: 'dace.sdfg.SDFGState', node: NodeType) -> bool:\n is_parent_nested = (sdfg.parent is not None)\n if is_parent_nested:\n return is_devicelevel_gpu(sdfg.parent.parent, sdfg.parent, sdfg.parent_nsdfg_node, with_gpu_default=True)\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the current threadblock size if the given node is enclosed in a GPU kernel, or None otherwise.
def devicelevel_block_size(sdfg: 'dace.sdfg.SDFG', state: 'dace.sdfg.SDFGState', node: NodeType) -> Tuple[symbolic.SymExpr]: from dace.sdfg import nodes as nd from dace.sdfg.sdfg import SDFGState while sdfg is not None: sdict = state.scope_dict() scope = sdict[nod...
[ "def node_size(self):\n return self._partitions[self.partition]", "def node_height(node):\n height = node.screenHeight()\n\n # In Nuke 7, a bug can prevent screenHeight() from reporting correctly.\n # In that case, it will return as 0.\n if not height:\n height = 18 if node.Class() != 'D...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the aliases of this ModifyLeaderboardEvent.
def aliases(self): return self._aliases
[ "def get_aliases(self):\n return self.aliases", "def aliases(self):\n return self._names[1:]", "def getAliases(self):\n return self.__aliases;", "def aliases(self):\n\n return self._aliases.copy()", "def ask_amazon_for_account_aliases(self):\n self._get_info(get_cached=Tru...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the aliases of this ModifyLeaderboardEvent.
def aliases(self, aliases): self._aliases = aliases
[ "def set_aliases (self, alias):\r\n self._check_alias_dict(alias, \"alias\")\r\n self.alias = alias", "def addAliases(self, aliases):\n assert isinstance(aliases, dict);\n\n for alias, identifier in aliases.items():\n self.setAlias(alias, identifier);", "def _add_aliases(c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the columns of this ModifyLeaderboardEvent.
def columns(self, columns): self._columns = columns
[ "def setColumns( self, names ):\n self.columns = names", "def setColumns(self, *args):\n if not args:\n self._column_to_role = {col: role for col, role in enumerate(itertools.chain(self._role_to_prop.keys(),\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the grid_search_view of this ModifyLeaderboardEvent.
def grid_search_view(self): return self._grid_search_view
[ "def grid_search_view(self, grid_search_view):\n \n self._grid_search_view = grid_search_view", "def get_gradebook_column_search_session(self):\n return # osid.grading.GradebookColumnSearchSession", "def get_grid_container(self):\n\t\treturn self._grid_container", "def get_gradebook_sear...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the grid_search_view of this ModifyLeaderboardEvent.
def grid_search_view(self, grid_search_view): self._grid_search_view = grid_search_view
[ "def grid_search_view(self):\n return self._grid_search_view", "def set_SearchOn(self, value):\n super(GetSeasonGroupsInputSet, self)._set_input('SearchOn', value)", "def add_grid_search(self):\n # Here to apply ramdom search to pipeline, need to follow naming \"rgs__paramname\"\n pa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the numeric_channels of this ModifyLeaderboardEvent.
def numeric_channels(self): return self._numeric_channels
[ "def get_num_inchannels(self):\n return self.in_channels", "def get_channels(self):\n if self._channels is None:\n log.warn(\"get_channels called before check_for_update succeeded!\")\n return self._channels", "def get_channels_record(self):\n return self.channels_rcrds", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the numeric_channels of this ModifyLeaderboardEvent.
def numeric_channels(self, numeric_channels): self._numeric_channels = numeric_channels
[ "def _set_num_channels(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"num-channels\", parent=self, path_helper=self._path_helper, e...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the operation of this ModifyLeaderboardEvent.
def operation(self): return self._operation
[ "def get_op(self):\n if self.op is None:\n raise ValueError(\"%s: Operation undefined. Call compute_op before\"\n % self.get_label())\n return self.op", "def op_code(self):\n return self.__op_code", "def get_cellOperator(self):\n return self._op...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the sorted_by of this ModifyLeaderboardEvent.
def sorted_by(self): return self._sorted_by
[ "def sort_leaderboard(self):\n return self.order_by(\"-score\")", "def sorted_by(self, sorted_by):\n \n self._sorted_by = sorted_by", "def sort_leaderboard(self):\n return self.get_queryset().sort_leaderboard()", "def sort_order_by_key(self, sort_by):\n\n if self.current_sor...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the sorted_by of this ModifyLeaderboardEvent.
def sorted_by(self, sorted_by): self._sorted_by = sorted_by
[ "def sorted_by(self):\n return self._sorted_by", "def edited_by(self, edited_by):\n\n self._edited_by = edited_by", "def set_SortOn(self, value):\n super(GetSeasonGroupsInputSet, self)._set_input('SortOn', value)", "def reviewed_by(self, reviewed_by):\n\n self._reviewed_by = review...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the system_columns of this ModifyLeaderboardEvent.
def system_columns(self): return self._system_columns
[ "def system_columns(self, system_columns):\n \n self._system_columns = system_columns", "def getAllColumns (self):\n\n return self.columns", "def columns(self):\n return self.c", "def get_columns(self):\n return list(zip(*self.get_board()))", "def get_columns(self) -> dict:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the system_columns of this ModifyLeaderboardEvent.
def system_columns(self, system_columns): self._system_columns = system_columns
[ "def system_columns(self):\n return self._system_columns", "def columns(self, columns):\n \n self._columns = columns", "def setColumns( self, names ):\n self.columns = names", "def _update_columns(self, new_columns):\n for name, column in new_columns.items():\n self.c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the text_channels of this ModifyLeaderboardEvent.
def text_channels(self): return self._text_channels
[ "def get_channels(self):\n if self._channels is None:\n log.warn(\"get_channels called before check_for_update succeeded!\")\n return self._channels", "def getChannels(self):\r\n\t\tchannels = []\r\n\t\tfor row in self.db(self.db.user_channels.owner_id==self.user).select():\r\n\t\t\tchann...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the text_channels of this ModifyLeaderboardEvent.
def text_channels(self, text_channels): self._text_channels = text_channels
[ "def text_channels(self):\n return self._text_channels", "def set_channels(self, chan_list):\r\n on_chars = '!@#$'\r\n off_chars = '1234'\r\n out_string = ''\r\n for indx, chan in enumerate(chan_list):\r\n if chan == 1:\r\n out_string += on_chars[indx]\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Uses command line tools to filter trio VCF file and add PS tag
def filter_trio_vcf(trio_vcf, workdir, sample_name): trio_vcf_basename = os.path.basename(trio_vcf) if trio_vcf_basename.endswith('.vcf'): offset = -4 elif trio_vcf_basename.endswith('.vcf.gz'): offset = -7 else: return tmp_header = workdir + '/tmp_header.vcf' tmp_variant...
[ "def filter_pfcp(imsi,file_name):\r\n\tfilter_patten = '\\\"pfcp && e212.imsi == ' +imsi+ '\\\"'\r\n\tTfield = ' -Tfields -e pfcp.seqno'\r\n\tcmd = '\"C:\\\\Program Files\\\\wireshark\\\\tshark.exe\\\" -n -r \\\"' + file_name +'\\\" -2 -R ' +filter_patten + Tfield +' 2>null'\r\n\tprint(\"\\n\",cmd,\"\\n\")\r\n\r\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merge filtered trio VCF and rephased 10x VCF
def merge_trio_10X_vcf(tenx_rephased, trio_filtered, workdir): tenx_trio_merged_vcf = workdir + '/10X_and_trio_merged.vcf' tenx_trio_merged_sorted_vcf = tenx_trio_merged_vcf[:-4] + '.sorted.vcf' tenx_trio_merged_sorted_zipped_vcf = tenx_trio_merged_sorted_vcf + '.gz' command_merge = ['bcftools', 'c...
[ "def filter_trio_vcf(trio_vcf, workdir, sample_name):\n trio_vcf_basename = os.path.basename(trio_vcf)\n if trio_vcf_basename.endswith('.vcf'):\n offset = -4\n elif trio_vcf_basename.endswith('.vcf.gz'):\n offset = -7\n else:\n return\n tmp_header = workdir + '/tmp_header.vcf'\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
method for initialising ontologyOb from database
def initFromDatabase(self, identifier, connection): # first init base class - this will get obid ob.initFromDatabase(self, identifier, "ontologyOb", connection) # now get the complete object self.databaseFields = getObjectRecord(connection, "ontologyOb", self.databaseFields['obid']) ...
[ "def initFromDatabase(self, identifier, connection):\n\n # first init base class - this will get obid\n ob.initFromDatabase(self, identifier, \"ontologyTermFact\", connection)\n\n\n # now get the complete object\n self.databaseFields = getObjectRecord(connection, \"ontologyTermFact\", se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
this method can be used to add a term to an ontology. The method will check that the term does not already exist, and will only add it if it does not exist, if the checkexisting parameter is True (set False if importing and sure there is no existing data, as this will speed up the transaction)
def addTerm(self,connection,termname, checkexisting = True, termdescription = None, unitname=None,termcode=None): termDict = { 'ontologyob' : self.databaseFields['obid'], 'xreflsid' : "%s.%s"%(self.databaseFields['xreflsid'],termname), 'termname' : termname, 'term...
[ "def add_term(term):\n # See if we already added it.\n # Search in reverse\n with open(CORPUSFILE) as ofile:\n for line in reversed(ofile.readlines()):\n if ' ' + term + ' ' in line.strip():\n term = None\n break\n # Add it if we never encountered it.\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
method for initialising ontologyTermFact from database
def initFromDatabase(self, identifier, connection): # first init base class - this will get obid ob.initFromDatabase(self, identifier, "ontologyTermFact", connection) # now get the complete object self.databaseFields = getObjectRecord(connection, "ontologyTermFact", self.databaseField...
[ "def initFromDatabase(self, identifier, connection):\n\n # first init base class - this will get obid\n ob.initFromDatabase(self, identifier, \"ontologyOb\", connection)\n\n\n # now get the complete object\n self.databaseFields = getObjectRecord(connection, \"ontologyOb\", self.databaseF...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Permutate this block with the specified table
def __permutate(self, table, block): return list(map(lambda x: block[x], table))
[ "def apply_to_table( self, table ):\n table.add_key( self.key )", "def _fill_table(self, table, gen) -> None:\n seq_table = self._table_map[table]\n seq_table.table.put_value(next(gen))", "def visit_table(self, table):\n pass", "def apply_to_table( self, table ):\n table.cha...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Turn the string data, into a list of bits (1, 0)'s
def __String_to_BitList(self, data): if 2.7 < 3: # Turn the strings into integers. Python 3 uses a bytes # class, which already has this behaviour. data = [ord(c) for c in data] l = len(data) * 8 result = [0] * l pos = 0 for ch in data: i = 7 while i >= 0: if ch & (1 << i) != 0: result...
[ "def string_to_bitlist(data: ByteString) -> List[int]:\n l = len(data) * 8\n result = [0] * l\n pos = 0\n for ch in data:\n i = 7\n while i >= 0:\n if ch & (1 << i) != 0:\n result[pos] = 1\n else:\n result[pos] = 0\n pos += 1\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the 6 bytes of expansion en hexadecimal
def expand(self, fbits): bitlist = self.__String_to_BitList(fbits) expansion = self.__permutate(self.__expansion_table, bitlist) expansion_str = self.__BitList_to_String(expansion) return self.__String_to_hex(expansion_str)
[ "def get_hex(self):\n pass", "def to_hex(self):\n if self.size not in VAR_PREFIXES:\n return \"0\" * int((self.size - len(bin(self.value)[2:]))/4) + hex(int(bin(self.value)[2:], 2))[2:]", "def form_hex(dense_hash):\n return ''.join([format(number, '02x') for number in dense_hash])", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the 8 bytes permutation result in hexadecimal
def ipermutation(self, fbits): bitlist = self.__String_to_BitList(fbits) ipermutacion = self.__permutate(self.__ip, bitlist) permut_str = self.__BitList_to_String(ipermutacion) return self.__String_to_hex(permut_str)
[ "def finalPermutation(code):\n return_list = ''\n for i in range(16):\n list = ''\n for j in range(4):\n list += code[DS.ip_1[i * 4 + j] - 1]\n return_list += \"%x\" % int(list, 2)\n return return_list", "def random_cipher():\n return np.random.permutation(26)", "def ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
read crypt file method
def _readcrypt(self): if self.dbg: print(bgre(self._readcrypt)) __dct = {} try: __dct, err = self.decrypt(self.crypt) except DecryptError as err: error(err) exit(1) __dct = dict(load(str(__dct), Loader=FullLoader)) if err: if err == 'SIGERR': if self.gui: yesno = xyesno('reencrypt, e...
[ "def read_password():\n with open(passwordfile,'r') as handle:\n read = handle.read()\n return read", "def read_from_file(filename: str, key: bytes) -> bytes:\n with pyscrypt.ScryptFile(filename, key) as file:\n return file.read()", "def read_file(self, group, name, ext='yaml'):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stop immediately the PostgreSQL cluster.
def pg_stop(pg_bin, pg_port, pg_socket_dir, pg_data): cmd = '%s/pg_ctl stop -m immediate -D %s -o "-p%s -k%s"' % ( pg_bin, pg_data, pg_port, pg_socket_dir ) ret_code, out, err = exec_command(cmd, shell=True) assert 0 == ret_code, out + err
[ "def stopDB(self):\n pass", "def stop() -> None:\n config = load_config_file()\n instance_ips = [i.public_ip_address for i in get_running_instances(config)]\n if not instance_ips:\n raise Exception('ERROR: No instances with public IPs found. Exiting.')\n try:\n execute_commands_on...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Remove PostgreSQL data directory.
def pg_drop(pg_data): # /!\ WARNING: This is VERY dangerous /!\ # TODO: Find a safer way to drop the data directory. (ret_code, stdout, stderr) = exec_command(["rm", "-rf", pg_data]) if ret_code != 0: raise Exception(str(stderr))
[ "def wipe_database():\r\n dbpath = \"/\".join(__file__.split('/')[:-1] + ['samples.db'])\r\n os.system(\"rm -f {0}\".format(dbpath))", "def cleanup_data_dir():\n print \"cleaning up data directory...\"\n file_list = [ f for f in os.listdir(DATA_DIRECTORY) ]\n for f in file_list:\n os.remove(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the variable updated_at is an instance of date time
def test_updated_at_instance_of(self): self.assertTrue(isinstance(self.base.updated_at, datetime))
[ "def test_updated_at_type(self):\n self.assertEqual(type(self.user.updated_at), datetime)", "def test_updated_at_type(self):\n self.assertEqual(type(self.city.updated_at), datetime)", "def test_updated_at_is_datetime(self):\n b = BaseModel()\n self.assertTrue(type(b.updated_at) is da...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the variable updated_at
def test_updated_at(self): self.base.save() self.assertTrue(self.base.created_at != self.base.updated_at)
[ "def test_updated_at(self):\n new = self.value()\n self.assertEqual(type(new.updated_at), datetime.datetime)\n n = new.to_dict()\n new = BaseModel(**n)\n self.assertFalse(new.created_at == new.updated_at)", "def test_updated_at_type(self):\n self.assertEqual(type(self.use...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test if updated_at is a string inside of the dictionary
def test_to_dict_updated_at(self): test_dict = self.base.to_dict() self.assertEqual(type(test_dict['updated_at']), str)
[ "def test_to_dict_updated_at_str(self):\n c = City()\n c_dictionary = c.to_dict()\n self.assertEqual(str, type(c_dictionary['updated_at']))", "def test_updated_at_type(self):\n self.assertEqual(type(self.user.updated_at), datetime)", "def test_updated_at_type(self):\n self.ass...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test if created_at is a string inside of the dictionary
def test_to_dict_created_at(self): test_dict = self.base.to_dict() self.assertEqual(type(test_dict['created_at']), str)
[ "def is_valid_datetime(json_post):\n try:\n if not strict_rfc3339.validate_rfc3339(json_post[\"datetime\"]):\n return False\n else:\n return True\n except KeyError as e:\n print(e)\n return False", "def test_created_at_type(se...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the egress of this V1NetworkPolicySpec.
def egress(self, egress): self._egress = egress
[ "def egress(self) -> 'outputs.EgressResponse':\n return pulumi.get(self, \"egress\")", "def egress_configuration(self) -> Optional['outputs.ServiceNetworkConfigurationEgressConfiguration']:\n return pulumi.get(self, \"egress_configuration\")", "def egress_setting(self) -> Optional[pulumi.Input[str...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the ingress of this V1NetworkPolicySpec.
def ingress(self, ingress): self._ingress = ingress
[ "def _ovs_set_interface_ingress(self, interface_uuid, inbound_limit):\n if inbound_limit < 0:\n raise ValueError(\"inbound_limit is negative.\")\n \n burst = 0.1 * inbound_limit * 1024\n if burst < CONF.network_device_mtu:\n burst = CONF.network_device_mtu\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the pod_selector of this V1NetworkPolicySpec.
def pod_selector(self, pod_selector): if self.local_vars_configuration.client_side_validation and pod_selector is None: # noqa: E501 raise ValueError("Invalid value for `pod_selector`, must not be `None`") # noqa: E501 self._pod_selector = pod_selector
[ "def pod_selector(self, pod_selector):\n if pod_selector is None:\n raise ValueError('Invalid value for `pod_selector`, must not be `None`')\n\n self._pod_selector = pod_selector", "def setSelector(self, selector: cern.japc.core.Selector) -> 'LsaSelectorBuilder':\n ...", "def patch_core_v1_nam...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the policy_types of this V1NetworkPolicySpec.
def policy_types(self, policy_types): self._policy_types = policy_types
[ "def policy_types(self, policy_types):\n\n self._policy_types = policy_types", "def pool_types(self, pool_types):\n\n self._pool_types = pool_types", "def hybridization_types(self, hybridization_types: List[Chem.HybridizationType]) -> None:\r\n self._hybridization_types = hybridization_types", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Face value of the bond.
def face_value(self) -> float: return self.__face_value
[ "def getFace(self):\n return self._face", "def getFaceIndex(self) -> \"int\":\n return _coin.SoFaceDetail_getFaceIndex(self)", "def face_callback(self,value):", "def faces(self):\n return self.face.values()", "def GetValence(self) -> \"int\":\n return _itkQuadEdgeMeshPointPython....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Liquidity score assigned to buying/selling the bond.
def liquidity_score(self) -> RangeFilter: return self.__liquidity_score
[ "def bid_liquidity_used(self, bid_liquidity_used):\n\n self._bid_liquidity_used = bid_liquidity_used", "def income(self):\r\n if self.blockaded:\r\n return 0 # Blockaded planets have no income.\r\n income = round(self.realisedValue / float(100) * math.sqrt(self.owner.tech[\"Product...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Goldman Sachs' indicative charge of the bond (bps).
def gs_charge_bps(self) -> RangeFilter: return self.__gs_charge_bps
[ "def charge(self):\n return self.__charge", "def calculate_gdp_per_capita():\n pass", "def rate(self):\n rate, unused_value = self._get_bond_info()\n return rate", "def bond_B(k):\n return (4-k) * 300000", "def price_per_gb(self):\n return self.price / self.gb", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }