query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
This function will put your array in a FITS file that you can open in DS9 for visual inspection, or any other purpose. Parameter(s)
def make_fits(array, filename, path=''): hdu0 = fits.PrimaryHDU([]) hdu1 = fits.ImageHDU([array]) hdulist = fits.HDUList([hdu0, hdu1]) if path=='': path = os.getcwd() hdulist.writeto(path+filename+'.fits', overwrite=False) else: hdulist.writeto(path+filename+'.fits', overw...
[ "def export_fits(self, filename):", "def save_as_fits(self, filename):", "def tofits(self, filename=None):\n robot_array = self.robot_array()\n target_array = self.target_array()\n fitsio.write(filename, robot_array, clobber=True)\n fitsio.write(filename, target_array, clobber=False)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Organizes the files in your working directory based on visit number. Generates a dictionary that sorts the files based on visit number.
def group_visits(wdir): all_files = glob(os.path.join(wdir, '*flc.fits')) group = dict() for file in all_files: visit = fits.getheader(file)['LINENUM'].split('.')[0] if visit not in group: group[str(visit)] = [str(file)] elif visit in group: group[str(visit)]....
[ "def sorter(filenames):\n \n counter = collections.defaultdict(int)\n \n for filename in filenames:\n for i in open(filename):\n counter[i] += 1\n\n return counter", "def scan_directory(self):\n root_dir = self.gait_directory.rstrip(os.sep)\n\n directory_dict = {}\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prints HTML response; useful for debugging tests.
def debug_html(label, response): print("\n\n\n", "*********", label, "\n") print(response.data.decode('utf8')) print("\n\n")
[ "def _print_debug(response):\n\n # Parse the URL to get the path and location header.\n # parsed_url is a tuple with the form: (scheme, netloc, path, query, fragment)\n parsed_url = urlsplit(response.request.url)\n host = parsed_url[1]\n path = urlunsplit(('', '', parsed_url[2], p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tell the Robot to stop cleaning
def stopclean(self): raise Exception("Not implemented")
[ "def turn_off(self):\n self.robot.stop_simulation()", "def shutdown():\n\trospy.loginfo(\"Stopping the robot...\")\n\tglobal_vars.move_base.cancel_all_goals()\n\n\tglobal_vars.cmd_vel_pub.publish(Twist())\n\n\trospy.sleep(1)", "def shutdown(self):\n\t\trospy.loginfo(\"Stopping the robot...\")\n\t\tself.c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get indexing status Check if indexing is enabled. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
def is_indexing_enabled(self, collection_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.is_indexing_enabled_with_http_info(collection_id, **kwargs) else: (data) = self.is_indexing_enabled_with_http_info(collection_id, **kwarg...
[ "def is_indexed(self):\r\n return self._indexed", "def get_status(self, index_or_label):\n \n index = self._get_index(index_or_label)\n return self._items.GetClientData(index)[\"status\"]", "def indexing_ready_flag(self) -> Optional[bool]:\n return pulumi.get(self, \"indexing_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Request rebuild index Request an index rebuild on an existing collection. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
def rebuild(self, collection_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.rebuild_with_http_info(collection_id, **kwargs) else: (data) = self.rebuild_with_http_info(collection_id, **kwargs) return data
[ "def _rebuild_index(self):\n from django.core.management import call_command\n call_command('rebuild_index', interactive=False, verbosity=0)", "def reindex(self):\n ds = docstore.Docstore(config.DOCSTORE_HOST, config.DOCSTORE_INDEX)\n \n # check for ES connection before going to...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Change indexing status Enable or disable indexing on an existing collection. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
def set_indexing_enabled(self, collection_id, body, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.set_indexing_enabled_with_http_info(collection_id, body, **kwargs) else: (data) = self.set_indexing_enabled_with_http_info(collect...
[ "def set_indexing_enabled_with_http_info(self, collection_id, body, **kwargs):\n\n all_params = ['collection_id', 'body']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Change indexing status Enable or disable indexing on an existing collection. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
def set_indexing_enabled_with_http_info(self, collection_id, body, **kwargs): all_params = ['collection_id', 'body'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') par...
[ "def set_indexing_enabled(self, collection_id, body, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.set_indexing_enabled_with_http_info(collection_id, body, **kwargs)\n else:\n (data) = self.set_indexing_enabled_with_http_in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List collection status Display status information about an existing collection. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
def status(self, collection_id, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.status_with_http_info(collection_id, **kwargs) else: (data) = self.status_with_http_info(collection_id, **kwargs) return data
[ "def status_all_with_http_info(self, **kwargs):\n\n all_params = []\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List status for all collections Display status information about all existing collections. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
def status_all_with_http_info(self, **kwargs): all_params = [] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(param...
[ "def status(self, collection_id, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.status_with_http_info(collection_id, **kwargs)\n else:\n (data) = self.status_with_http_info(collection_id, **kwargs)\n return data",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update a collection Updates an existing collection. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
def update(self, collection_id, body, **kwargs): kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.update_with_http_info(collection_id, body, **kwargs) else: (data) = self.update_with_http_info(collection_id, body, **kwargs) return...
[ "async def updateCollection(self, id=None, body=\"\"):\n payload = {}\n \n if id:\n payload[\"id\"] = id\n \n\n # Parameter validation\n schema = CatalogValidator.updateCollection()\n schema.dump(schema.load(payload))\n \n # Body validation\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the normalized distance between the embeddings of two words.
def diff(self, word1, word2): v = self._vecs[self._index[word1]] - self._vecs[self._index[word2]] return v / np.linalg.norm(v)
[ "def word_distance(self, word1, word2):\n \n if word1 not in self.vocab:\n raise RuntimeError('Word \"{}\" not in vocabulary.'.format(word1))\n if word2 not in self.vocab:\n raise RuntimeError('Word \"{}\" not in vocabulary.'.format(word2))\n \n idx1, idx2 = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save the words and embeddings to a file, sorted by words frequency in descending order.
def save_embeddings(self, filename, binary=True): with open(filename, "wb", encoding="utf8") as fout: fout.write("%s %s\n" % self._vecs.shape) # store in sorted order: most frequent words at the top for i, word in enumerate(self._words): row = self._vecs[i] ...
[ "def _build_vocab(filename, vocab_path, vocab_size):\n data = _read_words(filename)\n counter = collections.Counter(data)\n count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n words, _ = list(zip(*count_pairs))\n words = words[:vocab_size]\n with open(vocab_path, \"w\") as f:\n f.write(\"\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print the most stereotypical professions on both ends of the bias direction.
def profession_stereotypes(self, profession_words, bias_space, print_firstn=20): assert isinstance(print_firstn, int) and print_firstn >= 0 # Calculate the projection values onto the bias subspace sp = sorted( [ (self.v(w).dot(bias_space), w) for w in ...
[ "def print_best_individual(self):\n\n pass", "def print_people_strategies():\n\t\tfor person in sorted(Simulation.community):\n\t\t\tSimulation.community[person].print_info()\n\t\tPerson.person_progression.write(\"--------------- END OF WEEK ---------------\" + \"\\n\")", "def print_skill_title(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print the analogies in a nicer format.
def viz(analogies): print("Index".ljust(12) + "Analogy".center(45) + "Gender score".rjust(12)) print("-" * 69) print( "\n".join( str(i).rjust(4) + a[0].rjust(29) + " | " + a[1].ljust(29) + (str(a[2]))[:4] for i, a in enumerate(analogies) ) )
[ "def debug_repr(self) -> str:\n repr_string = \"{}(Confi):\\n\".format(self.__class__.__name__)\n items = list(self.entries.items())\n items.sort(key = lambda item: item[0])\n indent = ' ' * 4\n for key, entry in items:\n repr_string += f\"{indent}{key}: {repr(entry.val...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform PCA on the centered embeddings of the words in the pairs.
def doPCA(pairs, embedding, num_components=10): matrix = [] for a, b in pairs: center = (embedding.v(a) + embedding.v(b)) / 2 matrix.append(embedding.v(a) - center) matrix.append(embedding.v(b) - center) matrix = np.array(matrix) pca = PCA(n_components=num_components) pca.fit...
[ "def get_vectors_pca(self, dimension=100):\n pca = PCA(n_components=dimension)\n \n input_dim = len(self.dictionary)\n corpus_dim = len(self.corpus)\n \n # See if there is one-hot encoded vectors (X_data)\n if self.X_data is None:\n # Transform data to be ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Argument Parser for the nussinov program
def setParser(): parser = argparse.ArgumentParser( prog="Nussinov Algorithm Solver", description="A program that runs Nussinov's Algorithm on a given RNA strand and returns the most viable pairings." ) group = parser.add_mutually_exclusive_group(required=True) group.add_argument("-f", "-...
[ "def parse_arguments(args):", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Run SVM.\")\n\n parser.add_argument('--input', nargs='?', default='emb_p=1_q=1.emb', \n help='Name of input file for node embeddings')\n\n return parser.parse_args()", "def parse_arg...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes passed arguments from script and loads the sequence from file or from input string
def getSequence(args): sequence = args.sequence if sequence in [None, "", ''] and args.filepath not in [None, "", '']: if path.exists(args.filepath): try: with open(args.filepath, "r+") as file: sequence = file.readline() except Exception as e...
[ "def load_seq(self, sequence_data):\n assert sequence_data, \"sequence data appears empty\"\n \n if isinstance(sequence_data, list):\n self.__load_list(sequence_data)\n elif isinstance(sequence_data, str):\n self.__load_file(sequence_data)\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines the cost associated with a pair, 1 if in valid pairs, else 0 This function gives 1 cost to UG pairs as well
def costFunction(a, b): pairs = [('G', 'C'), ('C', 'G'), ('A', 'U'), ('U', 'A')] if UNCOMMON: pairs.append([('G', 'U'), ('U', 'G')]) if (a, b) in pairs: return 1 return 0
[ "def _isCorrectWithCompCost(grid, EMPType='S'):\n gridSize_Y = len(grid)\n gridSize_X = len(grid[0])\n compCost=0\n if EMPType not in ['SF','S']:\n raise AssertionError(\"EMPType \"+EMPType+\" not defined.\")\n if EMPType == 'SF':\n #check corners\n for idxRow, row in enumerate(g...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compare a set of input keys to expected keys.
def assert_keys_match(keys, expected, allow_missing=True): if not allow_missing: missing = expected - keys assert not missing, 'missing keys: %s' % missing extra = keys - expected assert not extra, 'extraneous keys: %s' % extra
[ "def validateKeys(keySet,keysPresent,keysExpected):\n msg(\"Validating %s keys of %s\" % (keySet,keysExpected))\n\n # First make sure that any key present is expected\n for key in keysPresent:\n if key not in keysExpected:\n keyIssue(\"unexpected\", key,keySet)\n \n # Now make sure ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads a key from dict, ensuring valid bool if present.
def read_key_bool(op, key): if key in op: assert isinstance(op[key], bool), 'must be bool: %s' % key return op[key] return None
[ "def dkv_valid(d, keyname):\r\n return (d and (keyname in d) and d[keyname])", "def checkandConvertToBool(dictionary: Dict, key: str) -> bool:\n convertedValue = False\n try:\n if key in dictionary:\n convertedValue = True if dictionary[key] != \"False\" else False\n except Exception...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a dict, read `key`, ensuring result is a dict.
def read_key_dict(obj, key): assert key in obj, 'key `%s` not found' % key assert obj[key], 'key `%s` was blank' % key assert isinstance(obj[key], dict), 'key `%s` not a dict' % key return obj[key]
[ "def dict_get_nested(key, dict_):\n if not is_iterable(key):\n return dict_[key]\n for k in key:\n if k is None:\n continue\n dict_ = dict_[k]\n return dict_", "def read_dict(self, path_key: str) -> Dict[str, Any]:\n self._assert_path_in_domain(path_key)\n wi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verify `name` as a candidate and check for record id.
def validated_id(cls, name): if name: if name in cls._ids: return cls._ids[name] if cls.validated_name(name): if Accounts.exists(name): return cls.get_id(name) return None
[ "def test_pk_name(self):\n self.validate_test(self.group.pk_name() == \"id\")", "def validate_name(self, name):\n\n lesson = Lessons.query.filter_by(id=self.lesson.data).first()\n academy = Academy.query.filter_by(name=self.academy.data).first()\n student = Student.query.filter_by(acad...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a community name, get its internal id.
def get_id(cls, name): assert name, 'name is empty' if name in cls._ids: return cls._ids[name] sql = "SELECT id FROM hive_communities WHERE name = :name" cid = DB.query_one(sql, name=name) if cid: cls._ids[name] = cid cls._names[cid] = name ...
[ "def get_community(name):\n community = Community.query.filter_by(name=name).first()\n return community", "def get_net_id(self, nw_name):\n nw = self.get_networks()\n for n in nw:\n if n['name'] == nw_name:\n return n['id'], n['tenant_id']\n return None", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of all muted accounts.
def get_all_muted(cls, community_id): return DB.query_col("""SELECT name FROM hive_accounts WHERE id IN (SELECT account_id FROM hive_roles WHERE community_id = :community_id AND role_id ...
[ "async def listmuted(self, ctx):\r\n muteList = self.settings.getServerStat(ctx.guild, \"MuteList\")\r\n activeMutes = []\r\n for entry in muteList:\r\n member = DisplayName.memberForID(entry['ID'], ctx.guild)\r\n if member:\r\n # Found one!\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get user role within a specific community.
def get_user_role(cls, community_id, account_id): return DB.query_one("""SELECT role_id FROM hive_roles WHERE community_id = :community_id AND account_id = :account_id LIMIT 1""", commu...
[ "def get_role(os_conn, role_name):\n role_list = os_conn.keystone.roles.list()\n for role in role_list:\n if role.name == role_name:\n return role", "def role_get(self, role):\n method = '/auth/role/get'\n data = {\n \"role\": role\n }\n return self.c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a new post/comment, check if valid as per community rules
def is_post_valid(cls, community_id, comment_op: dict): assert community_id, 'no community_id' community = cls._get_name(community_id) account_id = Accounts.get_id(comment_op['author']) role = cls.get_user_role(community_id, account_id) type_id = int(community[5]) # TOD...
[ "def test_post_creation(self):\n self.assertTrue((self.post1.author == self.username1) and (self.post1.content == self.postText1))", "def validate_post(self, post_id: int):\n raise NotImplementedError", "def validatePostbase(postbase):\n\tvalid = False\n\treturn valid", "def validate_blog_post(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update all pending payout and rank fields.
def recalc_pending_payouts(cls): sql = """SELECT id, COALESCE(posts, 0), COALESCE(payouts, 0), COALESCE(authors, 0) FROM hive_communities c LEFT JOIN ( SELECT community_id, ...
[ "def update_payouts():\n from sales.models import Sale, NooksPayoutSchedule\n schedules = NooksPayoutSchedule.objects.all()\n sales = Sale.objects.all()\n\n for schedule in schedules:\n for sale in sales:\n if sale.date >= schedule.start_date and sale.date <= schedule.end_date:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check an account's subscription status.
def _subscribed(self, account_id): sql = """SELECT 1 FROM hive_subscriptions WHERE community_id = :community_id AND account_id = :account_id""" return bool(DB.query_one( sql, community_id=self.community_id, account_id=account_id))
[ "def verifysubscriptionstatusinaccounttab():\n pass", "async def status(ctx):\n redis = await RedisDB.create()\n user = ctx.message.author\n try:\n subscription_id = await get_subscription_id(user, redis)\n\n if subscription_id is None:\n subscription_json = await create_subsc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check post's muted status.
def _muted(self): sql = "SELECT is_muted FROM hive_posts WHERE id = :id" return bool(DB.query_one(sql, id=self.post_id))
[ "async def check_muted():\n\t\tfor Member in Guild.members:\n\t\t\tif get.role(\"Muted\") in Member.roles:\n\t\t\t\tMutedUntil = datetime.strptime(\n\t\t\t\t\tdatabase.field(\"SELECT MutedUntil FROM users WHERE UserID = ?\", Member.id),\n\t\t\t\t\t\"%Y-%m-%d %H:%M:%S\",\n\t\t\t\t)\n\t\t\t\tif MutedUntil < datetime....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check parent post's muted status.
def _parent_muted(self): parent_id = "SELECT parent_id FROM hive_posts WHERE id = :id" sql = "SELECT is_muted FROM hive_posts WHERE id = (%s)" % parent_id return bool(DB.query_one(sql, id=self.post_id))
[ "def _muted(self):\n sql = \"SELECT is_muted FROM hive_posts WHERE id = :id\"\n return bool(DB.query_one(sql, id=self.post_id))", "def muted(self) -> bool:\n return self._muted", "def is_volume_muted(self):\n return self._muted", "async def check_muted():\n\t\tfor Member in Guild.membe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check post's pinned status.
def _pinned(self): sql = "SELECT is_pinned FROM hive_posts WHERE id = :id" return bool(DB.query_one(sql, id=self.post_id))
[ "def is_pinned(self):\n return self._impl.is_pinned()", "def has_pinned_content(self):\n if \"query\" in self.query:\n q = self.query[\"query\"]\n else:\n q = self.query\n if \"pinned_ids\" in q:\n return bool(len(q.get(\"pinned_ids\", [])))\n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets up a classifier for use
def setup_classifier(name): global _classifier, _trained if name == "euclid": _classifier = name _trained = True elif name == "bayes": _classifier = name _trained = True elif name == "rocchio": _classifier = name _trained = True else: print("Cl...
[ "def __init__(self, classifier, **kwargs):\n self.params = {\n 'scaleFactor': 1.1,\n 'minNeighbors': 4,\n 'minSize': (16, 16),\n 'maxSize': (1024, 1024),\n 'flags': cv2.CASCADE_SCALE_IMAGE\n }\n self.params.update(kwargs)\n # Create ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Evaluate a text with given train set using the set up classifier
def evaluate(text, articles, no_preprocess=False): if not _trained: print("No classifier initialized. Make sure to do so first") raise Exception if not no_preprocess: text = body_reader.get_words_in(text) if _classifier == "euclid": return euclidean.evaluate(articles, text)...
[ "def evaluate_plain_text(self):\n correct_predictions = 0\n self.model.eval()\n total_predictions = 0\n for index, (inputs, labels) in enumerate(self.data_loader.test_loader):\n outputs = self.model(inputs)\n pred = outputs.argmax(dim=1)\n correct_predict...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
instead of querying the site twice (first leagues and then matches)
def get_all_matches_by_league(self): raise NotImplementedError
[ "def get_leagues(self):\n # ex of result : {'leagues in england': [{'league name ': 'Premier League'}, {'league name ':\n # 'Championship'},{'league name ': 'League Cup'}]}\n\n # https://int.soccerway.com/competitions/\n country = self.parameters_dictionary[\"country\"]\n\n # we s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get game name for user and set its proper id
def set_game_id(self, game_name): dic = {(''.join(filter(str.isalpha, key))): v for key, v in self.games_map.items()} dic = dic[self.league] dic = {(''.join(filter(str.isalpha,key))):v for key,v in dic.items()} self.game_id = dic[game_name][0] self.game_time = dic[game_name][1]
[ "def get_game_id(self) -> str:\n return self.game_name_entry.get()", "def get_unique_game_id(self):\n def set_game_id(result):\n self.unique_game_id = result\n d = app.uplink.root_obj.callRemote(\"get_unique_game_id\")\n d.addCallback(set_game_id)", "def updateName(self):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if a path is inside of a source Rez package's list of variants. This function's purpose is hard to describe.
def _get_variant_less_path(root, path, variants): for variant_less_path in _iter_variant_extracted_paths(root, path, variants): if not imports.has_importable_module(variant_less_path, ignore={"__init__.py"}): # This condition happens only when a Rez package defines # A Python package...
[ "def contains_shell_volume(source):\n # type: (List[str]) -> bool\n for code in source:\n if _SHELL_VOLUME_PATTERN.search(code) is not None:\n return True\n return False", "def package_varies_by(install_path: str, variant: str) -> bool:\n\n if variant not in PACKAGE_VARIANTS:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the given Rez package is a source directory or a built Rez package.
def is_built_package(package): try: parent_folder = finder.get_package_root(package) except (AttributeError, TypeError): raise ValueError( 'Input "{package}" is not a valid Rez package.'.format(package=package) ) version = str(package.version) if not version: ...
[ "def is_package(source_tree='.'):\n return os.path.exists(os.path.join(source_tree, 'setup.py'))", "def _is_package(path):\n if not os.path.isdir(path):\n return False\n return os.path.isfile(os.path.join(path, '__init__.py'))", "def _is_package():\n # Save the basename (possible package name...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the Python files that a Rez package adds to the user's PYTHONPATH. If the Rez package is an installed Rez package and it contains variants, each variant will have its paths returned.
def get_package_python_paths(package, paths): # Note: Here we're trying to get `package`'s specific changes to PYTHONPATH (if any) # # Unfortunately, the Rez API doesn't really support this yet. # There's 2 GitHub links that may one-day implement it though: # - https://github.com/nerdvegas/rez/i...
[ "def _get_python_filepaths():\n python_paths = [\"setup.py\"]\n for root in [\"k8s\", \"test\"]:\n for dirpath, _, filenames in os.walk(root):\n for filename in filenames:\n if filename.endswith(\".py\"):\n python_paths.append(os.path.join(dirpath, filename)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the folder above a Rez package, assuming the path is to a built Rez package. The "packages path" of a Rez package is basically "The path that would be needed in order to make this package discoverable by the rezenv command". For example, a released package like "~/.rez/packages/int/foo/1.0.0" has a packages path li...
def get_packages_path_from_package(package): root = finder.get_package_root(package) if is_built_package(package): package_name_folder = os.path.dirname(root) return os.path.dirname(package_name_folder) return os.path.dirname(root)
[ "def find_package_path(path: Path) -> Optional[Path]:\n for package_path in path.iterdir():\n if is_package_dir(package_path):\n return package_path", "def package_path(package):\n return Path(package.__path__[0])", "def resolve_package_path(path: Path) -> Optional[Path]:\n result = N...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a statement string and a list of statement strings. Returns the closest matching statement from the list.
def get(self, input_statement): statement_list = self.chatbot.storage.get_response_statements(input_statement.text) print("from adapter: "+ str(len(statement_list))) if not statement_list: if self.chatbot.storage.count(): # Use a randomly picked statement ...
[ "def closest(text, database):\n from fuzzywuzzy import process\n\n # Check if an exact match exists\n if database.find(text):\n return text\n\n # Get the closest matching statement from the database\n return process.extract(text, database.keys(), limit=1)[0][0]", "def get_closest_match(a,lst...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that the chatbot's storage adapter is available to the logic adapter and there is at least ne statement in the database.
def can_process(self, statement): return self.chatbot.storage.count()
[ "def do_db_check(self):", "def check_backend():\n raise NotImplementedError", "def check_connection(self):\n pass", "def check_in_DataBase(data):\n return True", "def check_extensions(self):\n extensions = self.cloud.get_network_extensions()\n for network_extension in self.neu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function adds the task into the file todo.txt
def add(): try: task = sys.argv[2] file = open("todo.txt", "a") file.write(task + "\n") print('Added todo: "{}"'.format(task)) except IndexError: print("Error: Missing todo string. Nothing added!")
[ "def add_task():\n\n yourTask = []\n line = input(\"Add your task: \")\n yourTask.append(line)\n taskfile = open('tasks.txt', 'a')\n for line in yourTask:\n taskfile.write(\"%s\\n\" % line)\n taskfile.close()\n\n import menu", "def add_task():\n if window.FindElement('add_save').Get...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function reads the file from todo.txt and prints it onto the screen
def getTasks(): tasks = open("todo.txt").readlines() if len(tasks): for num in range(len(tasks) - 1, -1, -1): print("[%d] %s" % (num + 1, tasks[num]), end="") else: print("There are no pending todos!")
[ "def read_todo_file(self):\n\n todo = []\n in_progress = []\n done = []\n if os.path.exists('TODO.txt'):\n todo_fp = open('TODO.txt', 'r')\n state = 0\n line = todo_fp.readline()\n while line:\n line = line.strip()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function stikes off the task which is done and deletes it from todo.txt and adds it to done.txt.
def markOff(isdelete = 0): try: taskId = sys.argv[2] tasks = open("todo.txt").readlines() file = open("todo.txt", "w") doneTasks = open("done.txt", "a") flag = True for task in range(len(tasks)): if task + 1 == int(taskId): flag = False if isdelete == 1: ...
[ "def mark_current_done(self):\n current_todo = self.current_todo()\n l = current_todo.linenum\n current_todo.unset_current() # remove current tag\n current_todo.remove_tags_if(lambda x: x.same_name_as(Tag.ignore_tag())) # remove ignore tags\n repeat_tag = [ t for t in current_todo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function need is similar to markOff function so calls it by passing isdelete = 1 as argument.
def deleteTask(): markOff(isdelete = 1)
[ "def markOff(isdelete = 0):\n\ttry:\n\t taskId = sys.argv[2]\n\t tasks = open(\"todo.txt\").readlines()\n\t file = open(\"todo.txt\", \"w\")\n\t doneTasks = open(\"done.txt\", \"a\")\n\t flag = True\n\t for task in range(len(tasks)):\n\t if task + 1 == int(taskId):\n\t \tflag = False...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This is used by the main method to change the smoothing parameter before training. Do not modify this method.
def setSmoothing(self, k): self.k = k
[ "def set_smoothing(smoothing):\n _global_config.smoothing = smoothing", "def setSmoothing(self, smooth=True):\n \n pass", "def set_smoothing_parameter(self, b):\n\n self.smooth_parameter = b", "def setSmoothing(self, k):\n\tself.k = k", "def update_smoothing(self):\n if (\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the logjoint distribution over legal labels and the datum. Each logprobability should be stored in the logjoint counter, e.g. logJoint[3] = To get the list of all possible features or labels, use self.features and self.legalLabels.
def calculateLogJointProbabilities(self, datum): logJoint = util.Counter() "*** YOUR CODE HERE ***" #Adds log(P(y)) to calculate P(y|f1,f2...) for label in self.legalLabels: logJoint[label] += math.log(self.prior[label]) #Adds log(P(f1|y)), log(P(f2|y))... to calculate P(y|f1, f2...) for key in datu...
[ "def calculateLogJointProbabilities(self, datum):\n\tlogJoint = util.Counter()\n\t#want to calculate log(P(y)) + log(sum(P(fi|y)))\n\t#where y is a label\n\tfor label in self.legalLabels:\n\t\tlogJoint[label] = math.log(self.prior_distribution_prob[label])\n\t\tfor feature, value in datum.items():\n\t\t\tcp = self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extract phrases from CSV and tokenize files. Add duplicate phrases only once.
def extract_phrases(phrase_dict, csv_reader, word_list): count_row = 0 for row in csv_reader: phrase = row[3] count_row += 1 if phrase not in all_phrases: tokens = tokenizer(phrase) tokens = list(tokens) phrase_dict[phrase] = tokens for t...
[ "def post_process(keyphrases):\n processed_keyphrases = []\n\n # Remove duplicates from the single phrases which are occurring in multi-keyphrases\n multi_phrases = [phrases for phrases in keyphrases if len(phrases[0].split()) > 1]\n single_phrase = [phrases for phrases in keyphrases if ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Moves up to the parent directory
def moveUp(): os.chdir("..")
[ "def moveUp():\n\tos.chdir(\"..\")", "def go_to_parent_directory(self):\r\n self.chdir(osp.abspath(osp.join(getcwd_or_home(), os.pardir)))", "def parent_directory(self):\r\n self.chdir(os.path.join(getcwd_or_home(), os.path.pardir))", "def _goUp(self) -> None:\n self._openPath(path=self._...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number of files in the cwd and all it's subdirectories
def countFiles(path): count = 0 lyst = os.listdir(path) for element in lyst: if os.path.isfile(element): count += 1 else: os.chdir(element) count += countFiles(os.getcwd()) os.chdir("..") return count
[ "def countFiles(path):\n\tcount = 0\n\tlyst = os.listdir(path)\n\tfor element in lyst:\n\t\tif os.path.isfile(element):\n\t\t\tcount += 1\n\t\telse:\n\t\t\tos.chdir(element)\n\t\t\tcount += countFiles(os.getcwd())\n\t\t\tos.chdir(\"..\")\n\treturn count", "def countFiles(path):\r\n\tcount = 0\r\n\tlyst = os.listd...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the number of bytes in the cwd and all its subdirectories
def countBytes(path): count = 0 lyst = os.listdir(path) for element in lyst: if os.path.isfile(element): count += os.path.getsize(element) else: os.chdir(element) count += countBytes(os.getcwd()) os.chdir("..") return count
[ "def countBytes(path):\r\n\tcount = 0\r\n\tlyst = os.listdir(path)\r\n\tfor element in lyst:\r\n\t\tif os.path.isfile(element):\r\n\t\t\tcount += os.path.getsize(element)\r\n\t\telse:\r\n\t\t\tos.chdir(element)\t\r\n\t\t\tcount += countBytes(os.getcwd())\r\n\t\t\tos.chdir(\"..\")\r\n\treturn count", "def dirsize(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of the filenames that contain the target string in the cwd and all its subdirectories
def findFiles(target, path): files = [] lyst = os.listdir(path) for element in lyst: if os.path.isfile(element): if target in element: files.append(path + os.sep + element) else: os.chdir(element) files.extend(findFiles(target, os.getcwd())) os.chdir("..") return files
[ "def _find_target_files(directory: str) -> List[str]:\n return [os.path.abspath(os.path.join(directory, p))\n for p in os.listdir(directory) if \"targets\" in p]", "def findFiles(target, path):\r\n\tfiles = []\r\n\tlyst = os.listdir(path)\r\n\tfor element in lyst:\r\n\t\tif os.path.isfile(element):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates samples from full dataframe using indices and n rows before. Returns a list of [joined texts from n rows before, current sentence].
def create_sample(df: pd.DataFrame, indices: list, n: int = 2) -> list: samples = [] for idx in indices: if idx <= n: continue samples.append([ ' '.join(df.loc[idx - n:idx - 1, 'article'].to_list()), df.loc[idx, 'article'] ]) return samp...
[ "def sample_rows(df, nrows):", "def get_sample(df,n):\n idxs = sorted(np.random.permutation(len(df))[:n])\n return df.iloc[idxs].copy()", "def RNN_df(embedding, index_list, original_df, length):\n x = {}\n embds = []\n # adding the embeddings with same index together (belonging to same transcript...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Whether nonNone DB settings are set on this instance.
def db_settings_set(self) -> bool: return self._db_settings is not None
[ "def db_settings(self) -> DBSettings:\n if self._db_settings is None:\n raise ValueError(\"No DB settings are set on this instance.\")\n return not_none(self._db_settings)", "def _has_ads_settings(self):\n sql = \"\"\"SELECT 1 FROM hive_ads_settings\n WHERE communi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DB settings set on this instance; guaranteed to be nonNone.
def db_settings(self) -> DBSettings: if self._db_settings is None: raise ValueError("No DB settings are set on this instance.") return not_none(self._db_settings)
[ "def db_settings_set(self) -> bool:\n return self._db_settings is not None", "def persistent_store_settings(self):\n ps_settings = (\n PersistentStoreDatabaseSetting(\n name='primary_db',\n description='primary database',\n initializer='teamher...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loads experiment and its corresponding generation strategy from database if DB settings are set on this `WithDBSettingsBase` instance.
def _load_experiment_and_generation_strategy( self, experiment_name: str ) -> Tuple[Optional[Experiment], Optional[GenerationStrategy]]: if not self.db_settings_set: raise ValueError("Cannot load from DB in absence of DB settings.") try: return load_experiment_and_gen...
[ "def _load_generation_strategy_by_experiment_name(\n experiment_name: str,\n decoder: Decoder,\n experiment: Optional[Experiment] = None,\n reduced_state: bool = False,\n) -> GenerationStrategy:\n gs_id = _get_generation_strategy_id(\n experiment_name=experiment_name, decoder=decoder\n )\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Saves attached experiment and generation strategy if DB settings are set on this `WithDBSettingsBase` instance.
def _save_experiment_to_db_if_possible( self, experiment: Experiment, suppress_all_errors: bool = False ) -> bool: if self.db_settings_set: save_experiment(experiment=experiment, db_settings=self.db_settings) return True return False
[ "def _saveExperiment(self, experiment, path):\n Experiment.save(experiment, path);", "def save_experiment(experiment: Experiment, config: Optional[SQAConfig] = None) -> None:\n if not isinstance(experiment, Experiment):\n raise ValueError(\"Can only save instances of Experiment\")\n if not exp...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Saves given trial and generation strategy if DB settings are set on this `WithDBSettingsBase` instance.
def _save_new_trial_to_db_if_possible( self, experiment: Experiment, trial: BaseTrial, suppress_all_errors: bool = False, ) -> bool: if self.db_settings_set: save_new_trial( experiment=experiment, trial=trial, db_settings=self.db_settings ...
[ "def save_or_update_trial(\n experiment: Experiment, trial: BaseTrial, config: Optional[SQAConfig] = None\n) -> None:\n config = config or SQAConfig()\n encoder = Encoder(config=config)\n decoder = Decoder(config=config)\n _save_or_update_trial(\n experiment=experiment, trial=trial, encoder=en...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
register a tag with name txt and with given foreground and background color
def register_tag(self, txt, foreground, background): # self.tag_config(txt, foreground=foreground, background=background) self.known_tags.add(txt)
[ "def tag(self, text):\n return self.__decorate(text, color_tag)", "def set_text_color(self, fg: int, bg: int, /) -> None:", "def create( self, fontStyle, mode=None ):", "def render(self, h, *args):\n return h.span(self.text, style='background-color: ' + self.color)", "def color(self, text, **kwarg...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Works like builtin 2argument `iter()`, but stops on `exception`.
def iter_except(function, exception): try: while True: yield function() except exception: return
[ "def iter_except(func, exception):\n try:\n while True:\n yield func()\n except exception:\n pass", "def iter_except(func, exception, first=None):\n\ttry:\n\t\tif first is not None:\n\t\t\tyield first()\n\t\twhile 1:\n\t\t\tyield func()\n\texcept exception:\n\t\tpass", "def stop()...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read subprocess output and put it into the queue.
def reader_thread(self, q): try: with self.process.stdout as pipe: for line in iter(pipe.readline, b''): q.put(line) finally: q.put(None)
[ "def queue_input(self):\n while self.process.poll() is None:\n line = self.process.stdout.readline()\n out = line.rstrip()\n if out != '':\n self.queue = out", "def _stdout_reader(self):\n self._is_launched.wait()\n stdout_iterator = iter(self._...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update GUI with items from the queue.
def update(self, q): for line in iter_except(q.get_nowait, Empty): # display all content if line is None: self.process.kill() self.process = None return else: result = str(line).replace('\\r', '\r').replace('\\\\', ...
[ "def _process_incoming_queue_messages(self):\n while self._queue.qsize():\n msg = self._queue.get()\n if msg == MAP_UPDATE:\n self._clear_measurement_progress_label()\n self._presenter.update_map(self.chosen_value.get())", "def display_worker(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the adjusted data for a Band that has already been added using add_plot
def set_adjusted_data(self, data:HistogramPlotData, band:Band): plots:AdjustableHistogramControl = self.__plots[band] if plots is not None: plots.set_adjusted_data(data)
[ "def update_plot():\n pass", "def _update_data_plot(self):\n data = self.layout.tree_to_xy(self.data.tree)\n\n # updating for the first time\n if self.data not in self._plots:\n p = self._axes.plot(data[0], data[1], linewidth=2, picker=5)[0]\n self._plots[self.dat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the derivative of output_name with respect to wrt.
def get_derivative(self, output_name, wrt): return self.gradient[wrt][output_name]
[ "def get_2nd_derivative(self, output_name, wrt):\n \n return self.hessian[wrt[0]][wrt[1]][output_name]", "def transfer_derivative(self, output):\n return self.activation.derivate_output(output)", "def derive(expression: sy.Expr, wrt: Union[str, sy.Symbol]) -> sy.Expr:\n\n # Define type h...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the 2nd derivative of output_name with respect to both vars in the tuple wrt.
def get_2nd_derivative(self, output_name, wrt): return self.hessian[wrt[0]][wrt[1]][output_name]
[ "def get_derivative(self, output_name, wrt):\n \n return self.gradient[wrt][output_name]", "def second_derivative_name(self, fname, wrt):\n if not isinstance(wrt, tuple) or len(wrt) != 2:\n raise ValueError(\"wrt must be a two-element tuple\")\n \n if wrt[0] == wrt[1]...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the gradient of the given output with respect to all parameters.
def get_gradient(self, output_name=None): return array([self.gradient[wrt][output_name] for wrt in self.param_names])
[ "def gradient_output(self, *, outputs: Outputs, inputs: Inputs) -> Gradients[Outputs]:", "def calc_output_gradient(self, target_val):\n delta = target_val - self.output_val\n self.gradient = delta * self.transfer_function_derivative(self.output_val)\n # print 'Output gradient ->', str(self.gr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the Hessian matrix of the given output with respect to all parameters.
def get_Hessian(self, output_name=None): #return array([self.hessian[in1][in2][output_name] for (in1,in2) in product(self.param_names, self.param_names)]) return array([self.hessian[in1][in2][output_name] for (in1,in2) in product(self.param_names, self.param_names)])
[ "def get_hessian(self):\n return self.tc.hessian_func(\n self.pf.XS[:, :, 0].transpose(),\n self.pf.XS[:, :, 1].transpose(),\n self.pf.WS[:].transpose())", "def _get_hessian(self):\n if not self.sparse:\n hess = numpy.dot(self.jacobian_T, self.jacobian)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the gradient vectors for all outputs in this Driver's workflow.
def calc_gradient(self): self.setup() # Create our 2D dictionary the first time we execute. if not self.gradient: for name in self.param_names: self.gradient[name] = {} # Pull initial state and stepsizes from driver's parameters ...
[ "def gradient_output(self, *, outputs: Outputs, inputs: Inputs) -> Gradients[Outputs]:", "def compute_gradient(self, verbose):\n from inversionson.helpers.autoinverter_helpers import IterationListener\n\n # Attempt to dispatch model smoothing right at the beginning.\n # So there is no smoothi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
In a navigation context, component of an equipment is a point (tag/entity)
def __getitem__(self,key): # Using [key] syntax on an equipment allows to retrieve a tag directly # or a point referred to this particular equipment for each in self.tags: if key == each: return self.tags[key] # if key not found in tags... we probably are sear...
[ "def handle_equipment_mouseover(self):\n if self.skill_tree_displaying:\n return\n mouse_pos = pg.mouse.get_pos()\n slot_moused_over = ''\n for slot in self.equipment_tiles:\n if self.equipment_tiles[slot].collidepoint(mouse_pos):\n slot_moused_over =...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
When iterating over an equipment, we iterate points.
def __iter__(self): for point in self.points: yield point
[ "def points(self):\n\n yield self[0][0]\n for dart in self:\n pit = iter(dart); pit.next() # skip first point\n for point in pit:\n yield point", "def enumerate_points(self):\n\t\traise Exception(NotImplemented)", "def iter_points(self):\n for x in range...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve an instance of the equip this entity is linked to.
def get_equip(self, callback=None): return self._session.get_entity(self.tags['equipRef'], callback=callback, single=True)
[ "def _get_equipment(self):\r\n eq = self._pvsr.getEquipmentByName(self._meas[\"equipment\"])\r\n if eq is None:\r\n site = self._pvsr.getSiteByName(self._default_site)\r\n if site is None:\r\n logging.info(\"Creating new default site {0}\".format(self._default_site...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the value of the indexth node in the linked list. If the index is invalid, return 1.
def get(self, index: int) -> int: node = self.get_node(index) if node: return node.val else: return -1
[ "def get(self, index: int) -> int:\n cnt = 0\n cur = self.head \n while cur != None:\n if(cnt == index):\n return cur.val\n cur = cur.next \n cnt += 1\n return -1", "def get(self, index: int) -> int:\n #如果索引越界,返回-1\n if inde...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
From self ordered list of steps return theirs objects.
def get_steps(self) -> list: ret_val = [] for step_id in self: step_body = Steps.cache_step(step_id) if step_body is not None: ret_val.append(step_body) return ret_val
[ "def next_steps(self) -> List[FlowNode]:\n return [node for predicate, node in self._current_step.children]", "def __iter__(self) -> Iterable[Step]:\n\n return iter(self.steps)", "def _get_all_steps(self):\n steps = [\n self.provide_facets(),\n self.provide_garment_fil...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create sorted list of steps bound to case in rising order.
def sort_case_steps(self, request): self.clear() # todo error with repeated step_id. use record id somehow for row in range(len(request)): data_step = request[row] prev_step_id = str(data_step.get(CaseSteps.PREVIOUS_STEP_ID, 0)) step_id = str(data_step.get(C...
[ "def _possible_directions(self):\n directions = []\n for i in self.range:\n for j in (-1, 1):\n directions.append([j if i == n else 0 for n in self.range])\n return directions", "def group_consecutives(vals, step=1):\r\n\trun = []\r\n\tresult = [run]\r\n\texpect = No...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Open meeting's calendar view to schedule a meeting on current phonecall.
def action_make_meeting(self): partner_ids = [ self.env['res.users'].browse(self.env.uid).partner_id.id] res = {} for phonecall in self: if phonecall.partner_id and phonecall.partner_id.email: partner_ids.append(phonecall.partner_id.id) res = s...
[ "def open_mwindow_agenda(self) -> None:\n self.mwindow_agenda.show()", "def meeting_times():\n app.logger.debug(\"Checking credentials for Google calendar access...\")\n credentials = valid_credentials()\n if not credentials:\n app.logger.debug(\"Redirecting to authorization...\")\n return flask.r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plots a confusion matrix of the model predictions to evaluate accuracy
def plot_confusion_matrix(self): interp = ClassificationInterpretation.from_learner(self.learn) interp.plot_confusion_matrix()
[ "def confusion_matrix_plot(y_true, y_pred) -> None:\n from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix\n\n cm = confusion_matrix(y_true, y_pred)\n plot = ConfusionMatrixDisplay(confusion_matrix=cm).plot()\n plot.ax_.set_title(\"Confusion Matrix\")", "def plot_confusion_matrix(y_val...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Classifies the labeled tiles and updates the feature layer with the prediction results with column output_label_field. ==================================== ==================================================================== Argument Description feature_layer Required. Feature Layer for classification. labeled_tiles_di...
def classify_features(self, feature_layer, labeled_tiles_directory, input_label_field, output_label_field, confidence_field=None): ALLOWED_FILE_FORMATS = ['tif', 'jpg', 'png'] IMAGES_FOLDER = 'images/' LABELS_FOLDER = 'labels/' files = [] for ext in ALLOWED_FILE_FORMAT...
[ "def __convert_labeled_featuresets(self, labeled_featuresets, output):\n\n\t\tif isinstance(output, str):\n\t\t\toutput = open(output,'w')\n\t\telif not isinstance(output, file):\n\t\t\traise TypeError('output is a str or a file.')\n\n\t\tfor featureset, label in labeled_featuresets:\n\t\t\tfeat, label = self.__tex...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Testing when ideal file with different numbers in input and file with equal numbers
def test_ideal_file_and_file_with_zeros(file_name, result): assert find_maximum_and_minimum(file_name) == result
[ "def scoreEqual(self, output1, output2):\n for i, (line, line2) in enumerate(zip(open(output1).readlines(), open(output2).readlines())):\n self.assertAlmostEqual(float(line.split()[-1]), float(line2.split()[-1]), 5)", "def check(self): \n #output file created by our platform\n orig...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add app messages to context.
def messages(request): ctx = {} messages = get_messages(request) if messages: ctx['mesgs'] = messages return ctx
[ "def add_app(self):\n \n pass", "def add_message(self, request, level, message_template,\n message_context=None, extra_tags=''):\n if 'django.contrib.messages' in settings.INSTALLED_APPS:\n try:\n if message_context is None:\n me...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Combines Excel spreadsheets of quality control data into a single file called a pickle file. This file is unique to Python, and it is very fast to load once created. This function expects a path to a folder of raw data containing .xlsx files. For example, the path
def pickle_data(path=PATH_TO_RAW_DATA): files = os.listdir(path) xlsx_files = [path+"./"+f for f in files if f[-4:] == 'xlsx'] print("Beginning to read excel sheets...will take a few minutes") df_list = [pd.read_excel(f) for f in xlsx_files] master_df = pd.concat(df_list) master_df.t...
[ "def export_to_excel(self, path_name=None):\n wb = Workbook()\n for curve in self.curves:\n sheet_name = f'{curve.speed.to(\"RPM\"):.0f~P}'\n ws = wb.create_sheet(sheet_name)\n for i, p in enumerate(curve):\n i += 1 # openpyxl index\n if ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks rotation matrix for d = 2.
def test_d_2(): rs = 10 d = 2 np.random.seed(rs) num = 3 theta = np.random.uniform(0, 2 * math.pi) rotation = np.identity(d) rotation[0, 0] = math.cos(theta) rotation[0, 1] = - math.sin(theta) rotation[1, 0] = math.sin(theta) rotation[1, 1] = math.cos(theta) np.random.seed(...
[ "def _rotation_matrix(axis1, axis2):\n rotation_matrix = axis2 * linalg.inv(axis1)\n determinant = float(linalg.det(rotation_matrix))\n if fabs((1.0-determinant))<TOLERANCE:\n AxisAngleException(\"Candidate rotation matrix has det != 1\") \n return rotation_matrix", "def _is_rotation_matrix(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The decorator for the tag class
def decorator(tag_class): name = tag_class.__name__ if name.startswith('Tag'): name = name[3:] # keep all-uppercase names, they are special tags # like LITERAL, COMMENT, OUTPUT if not name.isupper(): name = name....
[ "def tag(*args, **kwargs):\n def desc(func):\n assert not hasattr(func, 'tags')\n func.tags = Tags(*args, **kwargs)\n return func\n return desc", "def decorate(self):\n\n if self.action == Tag.Action.ASSIGN:\n className = Tag.DecoratorClass.ASSIGN\n elif self.ac...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
given some date like MONTH/DAY/YEAR, make a filename like base_path/YEAR/MONTH/DAY.md
def date_to_filename(base_path, raw_date_string): raw_date_string = raw_date_string[:-1] month, day, year = raw_date_string.split("/") relative_path = "{}/{}/{}.md".format(year, month, day) return base_path / relative_path
[ "def date_filename():\n return time.strftime('%Y%m%d%H%M%S') + '.html'", "def build_filename(date, secao, urlTitle, hive_partitioning=False):\n\n if hive_partitioning:\n prefix = 'part_data_pub=' + date.strftime('%Y-%m-%d') + '/part_secao=' + str(secao) + '/'\n else:\n prefix = ''\n \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given chromosome sizes, plot divider lines and labels. Draws black lines between each chromosome, with padding. Labels each chromosome range with the chromosome name, centered in the region, under a tick. Sets the axis limits to the covered range. By default, the dividers are vertical and the labels are on the X axis o...
def plot_chromosome_dividers(axis, chrom_sizes, pad=None, along="x"): assert isinstance(chrom_sizes, collections.OrderedDict) if pad is None: pad = 0.003 * sum(chrom_sizes.values()) dividers = [] centers = [] starts = collections.OrderedDict() curr_offset = pad for label, size in lis...
[ "def _draw_dividers(chrom_offsets, ax):\n\n positions = np.array(list(chrom_offsets.values()))\n\n # Draw dividers.\n for loc in positions[1:-1]:\n ax.axvline(loc, color='grey', lw=0.5, zorder=5)\n\n # Draw xtick labels.\n ax.set_xticks((positions[:-1] + positions[1:]) / 2)\n ax.set_xtickla...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create an ordered mapping of chromosome names to sizes.
def chromosome_sizes(probes, to_mb=False): chrom_sizes = collections.OrderedDict() for chrom, rows in probes.by_chromosome(): chrom_sizes[chrom] = rows["end"].max() if to_mb: chrom_sizes[chrom] *= MB return chrom_sizes
[ "def load_chrom_sizes(reference_genome):\n my_path = os.path.abspath(os.path.dirname(__file__))\n f = open(os.path.join(my_path, reference_genome + '.chrom.sizes'))\n lengths = {}\n for line in f:\n [ch, l] = line.strip().split()\n lengths[ch] = int(l)\n return lengths", "def scel_siz...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert start/end positions from genomic to binwise coordinates. Instead of chromosomal basepairs, the positions indicate enumerated bins. Revise the start and end values for all GenomicArray instances at once, where the `cnarr` bins are mapped to corresponding `segments`, and `variants` are grouped into `cnarr` bins a...
def update_binwise_positions(cnarr, segments=None, variants=None): cnarr = cnarr.copy() if segments: segments = segments.copy() seg_chroms = set(segments.chromosome.unique()) if variants: variants = variants.copy() var_chroms = set(variants.chromosome.unique()) # ENH: lo...
[ "def assign_ci_start_end(segarr, cnarr):\n lefts_rights = (\n (bins.end.iat[0], bins.start.iat[-1])\n if len(bins.end) > 0 and len(bins.start) > 0\n else (np.nan, np.nan)\n for _seg, bins in cnarr.by_ranges(segarr, mode=\"outer\")\n )\n ci_lefts, ci_rights = zip(*lefts_rights)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the location and size of each repeat in `values`.
def get_repeat_slices(values): # ENH: look into pandas groupby innards offset = 0 for idx, (_val, rpt) in enumerate(itertools.groupby(values)): size = len(list(rpt)) if size > 1: i = idx + offset slc = slice(i, i + size) yield slc, size offset ...
[ "def value_count(values):\n indices = {}\n for x, value in enumerate(values):\n if type(value) == list:\n value_tup = tuple(value)\n if value_tup in indices:\n indices[value_tup] += [x, ]\n else:\n indices[va...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the chromosomal position of each named gene in probes. Returns dict
def gene_coords_by_name(probes, names): names = list(filter(None, set(names))) if not names: return {} # Create an index of gene names gene_index = collections.defaultdict(set) for i, gene in enumerate(probes["gene"]): for gene_name in gene.split(","): if gene_name in na...
[ "def map_probes(probeset, entrez_ids): \n entrez_idx = None\n mapping = {}\n with open(probeset) as probes:\n for line in probes:\n if line.startswith('ID'):\n entrez_idx = line.split('\\t').index('ENTREZ_GENE_ID')\n elif entrez_idx:\n # if the index has been defined then we're past t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the chromosomal position of all genes in a range. Returns dict
def gene_coords_by_range(probes, chrom, start, end, ignore=params.IGNORE_GENE_NAMES): ignore += params.ANTITARGET_ALIASES # Tabulate the genes in the selected region genes = collections.OrderedDict() for row in probes.in_range(chrom, start, end): name = str(row.gene) if name in genes: ...
[ "def genome_range(self, query):\n if isinstance(query, (int, numpy.integer)):\n genome_row = self.genome_of_entry_nr(query)\n if query <= 0 or query > genome_row['EntryOff'] + genome_row['TotEntries']:\n raise InvalidOmaId(query)\n else:\n genome_row = s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prints a random number between 1 and the number of sides of the die
def roll_die(self): number = randint(1, self.sides) print(number)
[ "def roll_die(self):\r\n\t\trandom = randint(1,self.sides)\r\n\t\tprint(random)", "def roll_die(self):\n print('The die (' + str(self.sides) + ' sided) rolled: ' + str(randint(1, self.sides)))", "def roll_die(self):\n\t\tresult = random.randint(1, self.sides)\n\t\treturn result", "def roll_n_sided_die(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
send some data to badash
def send_to_badash(job, data): data['job'] = job data['result'] = 0 resp = requests.post(os.environ.get('BADASH_API_URL', ''), json=data, headers={'X-Api-Key': os.environ.get('BADASH_API_KEY')}) print(resp.status_code)
[ "def send(self, data):", "def sendData(self, data):\n print ('Send data to: zzzz: data: %s (unrealized)' % data)\n # TODO: realize method for sending data to a user", "def send(User,data):\n BufferManagement.append_to_buffer(data,User['ID'],User['GameID'],\"OUT\")", "def _send(self, data):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if the input array is zero (smaller than machine precision) everywhere. Useful for determining if tilt angle is zero everywhere (i.e. LFM file is in GSM coordinates).
def __isZeroEverywhere(self, array): epsilon = numpy.finfo( type(array[0]) ).eps boolList = numpy.less_equal(numpy.abs(array), epsilon) for b in boolList: if not b: return False return True
[ "def is_zero(self):\n for t in self:\n if t != TRIT_ZERO:\n return False\n return True", "def is_close_to_zero(value: Union[float, np.ndarray]) -> Union[bool, np.ndarray]:\n return abs(value) < 1.0e-10", "def contains_zeros(self):\n if not self.IS_AOA:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transform all magnetic field B and velocity V values from SM to GSM coordinates. Store results by overwriting dataDict contents.
def __sm2gsm(self, dataDict): b = (dataDict.getData('bx'),dataDict.getData('by'),dataDict.getData('bz')) v = (dataDict.getData('vx'),dataDict.getData('vy'),dataDict.getData('vz')) for i,time in enumerate(dataDict.getData('time_min')): d = self.startDate + datetime.timedelta(minutes...
[ "def __gse2gsm(self, dates, dataArray):\n for i,data in enumerate(dataArray):\n d = dates[i]\n\n # Update magnetic field\n b_gsm = pyLTR.transform.GSEtoGSM(data[1], data[2], data[3], d) \n data[1] = b_gsm[0]\n data[2] = b_gsm[1]\n data[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert from [year, doy, hour, minte] to datetime object >>> sw = LFM('examples/data/solarWind/LFM_SWSMDAT') >>> sw._LFM__parseDate('1995 80 0 1') datetime.datetime(1995, 3, 21, 0, 1)
def __parseDate(self, dateStr): fields = [int(s) for s in dateStr.split() ] date = ( datetime.datetime(year=fields[0], month=1, day=1, hour=fields[2], minute=fields[3]) + datetime.timedelta(fields[1] - 1) ) return date
[ "def parse_date(text, year):\n values = text.split()\n if len(values) != 2:\n return None\n\n month = MONTHS.get(values[0])\n if not month:\n return None\n\n return datetime.datetime(year, month, int(values[1]))", "def date_parse(date_string) -> datetime:\n return datetime.strptime...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the tissue expression as a tabular text file
def tissue_table(self, condition_tissue_id, use_means=True): table = ExpressionProfile.__profile_to_table( self.tissue_profile(condition_tissue_id, use_means=use_means) ) return table
[ "def summary_in_txt(df, _metric_name=\"bias\"):\n with open(\"./results/result_{}.txt\".format(_metric_name), \"w\") as f:\n f.write(tabulate(df, tablefmt=\"pipe\", headers=\"keys\"))", "def textDefinitionTable(outfile=sys.stdout ,delim=' '):\n o = outfile\n o.write('Parameter'+delim+'Number of...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if the mean expression value in any conditions in the plot is higher than the desired cutoff
def low_abundance(self, cutoff=10): data = json.loads(self.profile) checks = [mean(v) > cutoff for _, v in data["data"].items()] return not any(checks)
[ "def is_alertworthy(self):\n return (not healthdb.util.isNaN(self.zscore)) and self.zscore < 0 and (\n healthdb.util.isNaN(self.percentile) or (self.percentile < 25))", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[-1])/np.linalg.norm(self.me...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Applies a conversion to the profile, grouping several condition into one more general feature (e.g. tissue).
def tissue_profile(self, condition_tissue_id, use_means=True): ct = ConditionTissue.query.get(condition_tissue_id) condition_to_tissue = json.loads(ct.data) profile_data = json.loads(self.profile) output = ExpressionProfile.convert_profile( condition_to_tissue, profile_data...
[ "def calculate_thresholds_products(event, flag_value, flag_percent, value, percent):\n try:\n\n if flag_value == True:\n\n if event['groupby'] == 'week':\n value = (7 * value) / len(event['sites'])\n\n if event['groupby'] == 'hour':\n value = value / (24...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a heatmap for a given species (species_id) and a list of probes. It returns a dict with 'order' the order of the experiments and 'heatmap' another dict with the actual data. Data is zlog transformed
def get_heatmap(species_id, probes, zlog=True, raw=False): profiles = ( ExpressionProfile.query.options(undefer("profile")) .filter_by(species_id=species_id) .filter(ExpressionProfile.probe.in_(probes)) .all() ) order = [] output = [] ...
[ "def gene_heatmap(genes, genes_dict):\n n_genes = len(genes.index)\n n_strains = len(genes_dict.keys())\n genes_order = dict(zip(list(genes.index), range(n_genes)))\n strain_order = dict(zip(list(genes_dict.keys()), range(n_strains)))\n strain_genes = np.zeros((n_strains, n_genes))\n\n for strain,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the data for a set of probes (including the full profiles), a limit can be provided to avoid overly long queries
def get_profiles(species_id, probes, limit=1000): profiles = ( ExpressionProfile.query.options(undefer("profile")) .filter(ExpressionProfile.probe.in_(probes)) .filter_by(species_id=species_id) .options(joinedload("sequence").load_only("name").noload("xrefs")) ...
[ "def get_records(ShardIterator=None, Limit=None):\n pass", "def get_brapi_trials(endpoint):\n page = 0\n pagesize = 10\n maxcount = None\n while maxcount is None or page*pagesize < maxcount:\n params = {'page': page, 'pageSize': pagesize}\n r = requests.get(endpoint+'trials', params=p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }