query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Get France team classification based on per region scores
def get_france_team_classif(team_scores: dict) -> dict: team_names = list(team_scores) team_france_scores = {team: sum(team_scores[team].values()) for team in team_scores} ordered_idx = np.argsort(list(team_france_scores.values())) france_team_classif = {i+1: (team_france_scores[...
[ "def scores_by_class(self):\n return self.map_results(lambda x: set(x['bounding_box_classes']) & set(x['ground_truth_classes']),\n f1_score)", "def compute_score(scores):\n\tcurr_score = 50\n\tfor classification in scores: \n\t\tif classification == 1 or classification == 3: ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save per region score to a .csv file
def save_per_region_score_to_csv(team_scores: dict, result_dir: str, date_of_run: datetime.datetime): # get team and region names team_names = list(team_scores) regions = [list(team_scores[team]) for team in team_names] # check that all teams have the sa...
[ "def save_csv(self, path):\n\n self.scores.to_csv(path, header=False)", "def save_csv(self, filename):\n redditors = set(self.submitters.keys()).union(self.commenters.keys())\n mapping = dict((x.lower(), x) for x in redditors)\n with codecs.open(filename, 'w', encoding='utf-8') as outf...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get improvement trajectories (in terms of scores) of the different teams based on a list of run dates
def get_improvement_traj(current_dir: str, list_of_run_dates: list, team_names: list): # initialize score trajectories scores_traj = {team: {} for team in team_names} # loop over run folders for run_date in list_of_run_dates: try: current_df = pd.read_csv(os.path.jo...
[ "def get_projected_scores(self, nba_teams, end_date, league_players, current_scores): \n # Get the rosters for each team and the current matchup score\n self.league_players = league_players\n self.rosters = [self.get_team_roster(self.get_user_team_id(), self.league_players), \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts extracted parsers groups dict, where groups are strings to dict where groups was converted to proper type, which is defined in parser object. Params come from only one line from logs.
def convert_parsers_groups_from_matched_line(self, line): params_dict = self.get_extracted_parsers_params(line) converted_params = {} for parser_name, parser in six.iteritems(params_dict): converted_params[parser_name] = self._parsers_dict[parser_name].convert_params(parser) ...
[ "def convert_line(line):\n split_line = line.split(',')\n N, L = split_line.pop(0).split() # Number of neurons in group and max Layer in the first line\n fired = [] # save all time/sender pairs of the group, these items are in pairs of two\n while len(split_line[0].split()) < 4:\n s, t = split_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extracts groups from subregexes that matched with given line
def get_extracted_parsers_params(self, line): # Handle case when regex module is not installed by matching many regexes if IMPORTED_RE: extracted_regex_params = {} self._brute_subregexes_matching(extracted_regex_params, 0, len(self._parsers) - 1, line) return extracte...
[ "def _split_by_lines_matching(pattern, lines):\n groups = [[]]\n for line in lines:\n if re.match(pattern, line): # Start a new group\n groups.append([])\n groups[-1].append(line)\n\n return [g for g in groups if g]", "def parse_groups_file(path):\n groups = []\n\n def read_li...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Loads corpus from text file
def load_corpus(input_file): print('Loading corpus...') time1 = time.time() corpus = input_file.read() time2 = time.time() total_time = time2 - time1 print('It took %0.3f seconds to load corpus' % total_time)
[ "def load_corpus(self, dir):\n word_fn = codecs.open(dir + \"word.dic\", \"r\", \"utf-8\")\n for line in word_fn:\n word_nr, word = line.strip().split(\"\\t\")\n self.int_to_word.append(word)\n self.word_dict[word] = int(word_nr)\n word_fn.close()\n tag_f...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes the slack connection ID
def __init__(self): self.SLACK_CONN_ID = 'slack'
[ "def __init__(self, *args, **kwargs):\n self.server = kwargs['server']\n self.token = kwargs['token']\n self.connection = SlackClient(self.token)", "def __init__(self, slack_id: str):\n self.slack_id = slack_id\n self.name = \"\"\n self.email = \"\"\n self.github_u...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the mass of a particle based on the number of electrons / protons / neutrons. Note that is functions support both integers, floats and arrays of integers / floats.
def get_mass_part(electrons_nb, protons_number, neutrons_number): return (neutrons_number+protons_number)*NUCLEON_MASS+electrons_nb*ELECTRON_MASS
[ "def particle_mass(self):\n return mass(\n radius=self.particle_radius,\n density=self.particle_density,\n shape_factor=self.shape_factor,\n volume_void=self.volume_void,\n )", "def calculate_particle_mass(particle_volume, gas_density=dfl['values'][\n '...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes an analyzer result.
def __init__(self): super(AnalyzerResult, self).__init__() self.analyzer_name = None self.attribute_name = None self.attribute_value = None
[ "def __init__(self):\n self.analyzer_map = dict()", "def __init__(self, result):\n self.result = LinkerUnit(result)\n self.modules = []", "def __init__(self):\n self.TestResults = self.enum(\n RESULT_SUCCESS=\"success\",\n RESULT_FAILURE=\"failure\",\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a key K for each secret. The key will be used to encrypt all secrets. Key is later split among users.
def cipher_generate_keys(self): for secret in range(self.k): key = os.urandom(Dealer.AES_KEY_LEN) self.cipher_keys.append(key)
[ "def keygen(self):\n self._generate_sk()\n self._generate_pk()\n return [self._secret_key, self._public_key]", "def generate_key(force=False):\n if generate_key.secret_key is not None and not force:\n return generate_key.secret_key\n\n choices = 'abcdefghijklmnopqrstuvwxyz0123456...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Symmetric encryption of input using key. If input is not bytes converts the input to needed format.
def cipher_encrypt(self, input, key): if(isinstance(input, int)): input = input.to_bytes(bytehelper.bytelen(input), byteorder='big') elif(isinstance(input, str)): input = input.encode('utf-8') assert(isinstance(input, (bytes, bytearray))) # Perform padding if th...
[ "def SymmetricEncrypt(plaintext, key):\n \n # Define output\n ciphertext = \"\"\n \n # Define alphabet\n upper = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n lower = \"abcdefghijklmnopqrstuvwxyz\"\n \n # Make key uppercase\n key = key.upper()\n \n # Convert letters in plaintext to position n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Named for compatibility with other algorithms. Scan for pseudo shares specific to a chosen participant.
def get_pseudo_shares_for_participant(self, participant): my_shares = {} access_group = 0 # Herranz scheme does not use groups assert self.key_shares assert participant != 0 for i, gamma in enumerate(self.access_structures): for q, A in enumerate(self.access_struct...
[ "def set_pseudo_shares_from_participant(self, participant, my_pseudo_shares):\n\n for i, _ in enumerate(self.access_structures):\n for q, _ in enumerate(self.access_structures[i]):\n for b, Pb in enumerate(self.access_structures[i][q]):\n if Pb == participant:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Take my_pseudo_shares dictionary from a specific user and put shares into right places in the dealer's pseudo_shares nested list. (Reverse of get_pseudo_shares_for_participant() )
def set_pseudo_shares_from_participant(self, participant, my_pseudo_shares): for i, _ in enumerate(self.access_structures): for q, _ in enumerate(self.access_structures[i]): for b, Pb in enumerate(self.access_structures[i][q]): if Pb == participant: ...
[ "def get_pseudo_shares_for_participant(self, participant):\n\n my_shares = {}\n access_group = 0 # Herranz scheme does not use groups\n\n assert self.key_shares\n assert participant != 0\n\n for i, gamma in enumerate(self.access_structures):\n for q, A in enumerate(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all the images and labels in directory/label/.jpg
def list_images(directory,labelmap_file): #pdb.set_trace() fid= open(labelmap_file) lbldata = json.load(fid) fid.close() label_map = lbldata[0] alldirs = sorted(os.listdir(directory)) filenames = [] labels = [] files_and_labels = [] for label in alldirs: for f in os.l...
[ "def get_img_labels(self):\n if self.img_dir is None:\n return None\n return self.get_dir_labels(self.img_dir)", "def get_labels(self):\n return {get_label_path(img) for img in self.imgs}", "def getTrainImages(self):\n images = []\n labels = []\n # Get all fo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compares the creation date of a folder or file with the current time. Returns True if the folder/file has not been modified with the creation_safety period.
def within_modify_date(path, creation_safety=60): # The modification time folder_time = os.path.getmtime(path) # Compare with current time output = (time() - folder_time) < creation_safety return output
[ "def created_today(self):\n # Get the current time\n current_time = time.localtime()\n\n # Last time the DRP file was created (or modified?)\n created_time = time.gmtime(os.path.getctime(self.file_path()))\n\n return (current_time.tm_year == created_time.tm_year and\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return ``True`` if the locals in this table can be optimized.
def is_optimized(self, ): pass
[ "def is_local_compilation_feasible(template_labels):\n if oplabel.number_of_qubits <= 1:\n return len(template_labels) > 0 # 1Q gates, anything is ok\n elif oplabel.number_of_qubits == 2:\n # 2Q gates need a compilation gate that is also 2Q (can't do with just 1Q...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return ``True`` if the block is a nested class or function.
def is_nested(self, ): pass
[ "def IsNestedFamORAssem(self) -> bool:", "def IsNestedFamANDAssem(self) -> bool:", "def IsBlock(self) -> bool:", "def is_block(self):\n if self.get_level() == 1:\n return True\n else:\n return False", "def IsNestedFamily(self) -> bool:", "def IsBlock(block_name):\n i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return ``True`` if the block uses ``exec``.
def has_exec(self, ): pass
[ "def is_executable(block, lang, attrs):\n return (is_code_block(block) and attrs.get('eval') is not False and\n lang is not None)", "def is_exec(self):\n return 'exec' in self.user.groups.values_list(Lower('name'), flat=True)", "def allows_execution(self):\n return self.state.allows_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return ``True`` if the block uses a starred fromimport.
def has_import_star(self, ): pass
[ "def IsImport(self) -> bool:", "def is_import(node):\r\n return node.type in (syms.import_name, syms.import_from)", "def is_imported(product):\n return True if 'import' in product else False", "def hasImport(self, text, module_name):\n if self.hasPattern(text, \"import %s\" % module_name):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a tuple containing names of locals in this function.
def get_locals(self, ): pass
[ "def getvars(f):\n return f.__code__.co_varnames", "def pdb_locals(self):\r\n if self.pdb_frame:\r\n return self.pdb_obj.curframe_locals\r\n else:\r\n return {}", "def get_variables(self):\n variable_names = []\n for block in self.variable_block():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return ``True`` if the symbol is a parameter.
def is_parameter(self, ): pass
[ "def is_parameter(name):\n return name.startswith('par-')", "def is_param(obj):\n return isParameter(obj)", "def has_parameter(self, a_name):\n return a_name in self.parameters", "def hasParameter(self, p) :\n return p in self.parameters", "def accepts_parameter(func, param):\n signat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return ``True`` if the symbol is global.
def is_global(self, ): pass
[ "def is_declared_global(self, ):\n\t\tpass", "def global_exists(self, global_name):\n return self.evaluate('!(typeof %s === \"undefined\");' %\n global_name)", "def is_global_name(name):\n return not name.startswith('_')", "def is_global_prefix_set() -> bool:\n return os.path.isfil...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return ``True`` if the symbol is declared global with a global statement.
def is_declared_global(self, ): pass
[ "def is_global(self, ):\n\t\tpass", "def global_exists(self, global_name):\n return self.evaluate('!(typeof %s === \"undefined\");' %\n global_name)", "def is_global_name(name):\n return not name.startswith('_')", "def is_global_variable_exists(a_variable_name):\n return is_prefixed_va...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return ``True`` if the symbol is local to its block.
def is_local(self, ): pass
[ "def has_local_as(self, local_as):\n for as_path_seg in self._path_seg_list:\n for as_num in as_path_seg:\n if as_num == local_as:\n return True\n return False", "def is_relative_local(self) -> bool:\n\n return self.__is_relative_local", "def is_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return ``True`` if name binding introduces new namespace. If the name is used as the target of a function or class statement, this will be true.
def is_namespace(self, ): pass
[ "def is_named(self) -> bool:\n return bool(self.name)", "def correct_namespace(name, api_name, env_name) -> bool:\n regex = f\"^{api_name}(-[a-z]+)*-{env_name}(-[a-z]+)*$\"\n return bool(re.match(regex, name))", "def match_namespace(self, qname: str) -> bool:\n\n if self.namespace_matches is...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs Losetup on a target block device or image file.
def PreprocessLosetup(source_path, partition_offset=None, partition_size=None): losetup_device = None if not os.path.exists(source_path): raise TurbiniaException( ('Cannot create loopback device for non-existing source_path ' '{0!s}').format(source_path)) # TODO(aarontp): Remove hard-coded ...
[ "def unified_module_setup(self):\n # Assign breakdown photo / report paths as well as final report path\n self.unified_module.cropped_subcomponent_dir_filepath = self.image_breakdown_dir\n self.unified_module.breakdown_reports_dir = self.breakdown_report_dir\n self.unified_module.final_r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Uses lsblk to detect the filesystem of a partition block device.
def GetFilesystem(path): cmd = ['lsblk', path, '-f', '-o', 'FSTYPE', '-n'] log.info('Running {0!s}'.format(cmd)) fstype = subprocess.check_output(cmd).split() if not fstype: # Lets wait a bit for any previous blockdevice operation to settle time.sleep(2) fstype = subprocess.check_output(cmd).split()...
[ "def imager_list_usb(self, partition=1):\r\n disk = []\r\n if platform.system() == \"Linux\":\r\n output = subprocess.check_output(\"lsblk -i\", shell=True)\r\n if not partition == 1:\r\n for line in output.splitlines():\r\n line = line.split()\r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets account information for a particular coin
def get_account_information(self, coin): accounts = self.auth_client.get_accounts() for account in accounts: if coin in account['currency']: return float(account['available']) return None
[ "def get_coin_info():\n query = iroha.query('GetAssetInfo', asset_id='coin#domain')\n IrohaCrypto.sign_query(query, admin_private_key)\n\n response = net.send_query(query)\n data = response.asset_response.asset\n print('Asset id = {}, precision = {}'.format(data.asset_id, data.precision))", "def ge...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Applies Conv2D based on the number of anchors and classifications classes, then reshape the Tensor.
def conv_classes_anchors(inputs, num_anchors_stage, num_classes): x = tf.keras.layers.Conv2D( filters=num_anchors_stage * (num_classes + 5), kernel_size=1, strides=1, padding="same", use_bias=True, )(inputs) x = tf.keras.layers.Reshape( (x.shape[1], x.shape[2]...
[ "def _postprocess_conv2d_output(x, data_format):\n\n if data_format == 'channels_first':\n x = tf.transpose(x, (0, 3, 1, 2))\n\n if K.floatx() == 'float64':\n x = tf.cast(x, 'float64')\n return x", "def _postprocess_conv2d_output(x, data_format):\n\tif data_format == 'channels_first':\n\t\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Applies the yolov4 box regression algorithm on the output of a stage.
def yolov3_boxes_regression(feats_per_stage, anchors_per_stage): grid_size_x, grid_size_y = feats_per_stage.shape[1], feats_per_stage.shape[ 2] num_classes = feats_per_stage.shape[ -1] - 5 # feats.shape[-1] = 4 + 1 + num_classes box_xy, box_wh, objectness, class_probs = tf.split(feats_per_...
[ "def ml_loop(side: str):\n\n # === Here is the execution order of the loop === #\n # 1. Put the initialization code here\n ball_served = False\n who_serve = 2\n filename = path.join(path.dirname(__file__),\"save\\SVMRegression_1.pickle\")\n with open(filename, 'rb') as file:\n clf = pickle....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use pandas .to_numpy() to get an ndarray from a Pandas object if I don't need to preserve the dtypes. Do not use .values .to_numpy() is available in Python 2 and Python 3
def dataframe_to_ndarray(): df = pd.DataFrame(operations.get_mixed_matrix()) print(type(df)) # <class 'pandas.core.frame.DataFrame'> print(df) ary = df.to_numpy() print(type(ary)) # <class 'numpy.ndarray'> print(ary) print(ary.shape) # (10, 10)
[ "def to_ndarray(df: DataFrameType) -> NDArray:\n if isinstance(df, (cudf.DataFrame, pd.DataFrame)):\n return df.values\n elif isinstance(df, (dask_cudf.DataFrame, dd.DataFrame)):\n return df.compute().values\n else:\n raise NotImplementedError(f'Conversion of type {type(df)} is not sup...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Makes sure that the node is the biggest one out of its childrens PSEUDO CODE (A is an Array with index 0..n) (i is the index of the node to MaxHeapify) (heapSize is the size of the heap to MaxHeapify) MaxHeapify(A,i,heapSize) l = Left(i) r = Right(i) if l A[i] largest = l else largest = i if r A[largest] largest = r if...
def max_heapify(A:list, i:int, heapSize:int): l = left(i) r = right(i) if l < heapSize and A[l] > A[i]: largest = l else: largest = i if r < heapSize and A[r] > A[largest]: largest = r if largest != i: temp = A[largest] A[largest] = A[i] A[i] = temp ...
[ "def max_heapify(arr):\n parent = ((len(arr) - 1) - 1 ) // 2\n while parent >= 0:\n shift_down(arr, parent)\n parent -= 1\n return", "def repair_heap(array, start_index, heap_size):\n\n # Check given given parameter data type.\n if not type(array) == list:\n raise TypeError('ar...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Shuffles samelength arrays `a` and `b` in unison
def shuffle_unison(a, b): c = np.c_[a.reshape(len(a), -1), b.reshape(len(b), -1)] np.random.shuffle(c) return c[:, :a.size//len(a)].reshape(a.shape), c[:, a.size//len(a):].reshape(b.shape)
[ "def unison_shuffled_copies(a, b):\r\n assert len(a) == len(b)\r\n p = np.random.permutation(len(a))\r\n return a[p], b[p]", "def shuffle_in_unison_inplace(a, b):\n assert len(a) == len(b)\n p = np.random.permutation(len(a))\n return a[p], b[p]", "def unison_shuffled_copies(arrays):\n\tlength = len(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
answer an appropriate salutation
def _salutation_for_member(self, mdata): key = mdata.get('salutation', '') return self.salutations.get(key, '') % mdata
[ "def guess_rate(salary):\n if salary < 50:\n return \"per hour\"\n elif salary < 10000:\n return \"per day\"\n else:\n return \"per annum\"", "def test_salutation():\n assert salutation(\"Kinga\") == \"Hello, Kinga!\"", "def Salary(self, h):\n\t\treturn _hi.hi_Person_Salary(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
a comment has been edited. Notify the creator of the comment.
def comment_edited(self, comment): # do not notify the creator if she has edited the comment herself mtool = getToolByName(comment, 'portal_membership') member = mtool.getAuthenticatedMember() creator = mtool.getMemberById(comment.Creator()) if (member == creator) or creator is N...
[ "def subscription_comment_edited(self, comment):\n if not self.subscription_comment_edited_text or not self.subscription_comment_edited_text.strip():\n return\n thread = comment.getConversation()\n di = self._thread_info(thread)\n di['commenturl'] = comment.absolute_url()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
a thread has been moved to a new board. Notify all contributors.
def thread_moved(self, thread): if not self.thread_moved_text or not self.thread_moved_text.strip(): return di = self._thread_info(thread) memberids = set([comment.Creator() for comment in thread.getComments()]) for memberid in memberids: md = self._memberdata_for...
[ "def update_trello_board(self):\n # Don't update the card if it is in any of these lists\n skip_list_ids = [self.trello.get_list_id(self.trello_board,COMPLETED),\n self.trello.get_list_id(self.trello_board,ABORTED)]\n updated = False\n runs = self.list_runs()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
a comment has been deleted. Notify its creator.
def comment_deleted(self, comment): if not self.comment_deleted_text or not self.comment_deleted_text.strip(): return thread = comment.getConversation() di = self._thread_info(thread) di['commenturl'] = comment.absolute_url() md = self._memberdata_for_content(comment)...
[ "def delete_comment(self):\n self.content = \"Comment was deleted\"\n self.status = \"deleted\"\n self.save()", "def test_post_comment_notification_is_deleted_when_deleting_comment(self):\n user = make_user()\n\n commenter = make_user()\n\n post = user.create_public_post(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
a comment has been edited. Notify thread subsribers.
def subscription_comment_edited(self, comment): if not self.subscription_comment_edited_text or not self.subscription_comment_edited_text.strip(): return thread = comment.getConversation() di = self._thread_info(thread) di['commenturl'] = comment.absolute_url() subscr...
[ "def comment_edited(self, comment):\n # do not notify the creator if she has edited the comment herself\n mtool = getToolByName(comment, 'portal_membership')\n member = mtool.getAuthenticatedMember()\n creator = mtool.getMemberById(comment.Creator())\n if (member == creator) or cr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
a comment has been added to a thread. Notify thread subscribers.
def subscription_comment_added(self, comment): if not self.subscription_comment_added_text or not self.subscription_comment_added_text.strip(): return thread = comment.getConversation() di = self._thread_info(thread) di['commenturl'] = comment.absolute_url() subscript...
[ "def on_comments_changed(self, old, new):", "def comment(thread_uid):\n thread = storage.get_thread(thread_uid)\n if not thread:\n abort(404)\n\n text = request.form.get('text') or ''\n if not text:\n return error('comment:text')\n\n storage.add_comment(thread_uid, g.username, text)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get all versions that the user has access, sorted.
def _get_active_versions_sorted(self): project = self._get_project() versions = project.ordered_active_versions( user=self.request.user, include_hidden=False, ) return versions
[ "def get_versions(cls, name, user_id=None):\n if user_id:\n return cls._get(sql.Card.get_versions_with_collection_information(), user_id=user_id, name=name)\n else:\n return cls._get(sql.Card.get_versions(), name=name)", "def get_model_versions(self, user, name):\n\n mod...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the models.Base used as the declarative base for models.
def get_base() -> typing.Any: # pylint: disable=no-member return open_alchemy.models.Base # type: ignore
[ "def get_base(name='default'):\n if name not in __AVAILABLE_MANAGERS__:\n __AVAILABLE_MANAGERS__[name] = declarative_base()\n return __AVAILABLE_MANAGERS__[name]", "def base(self):\n ret = self._get_attr(\"base\")\n return IMedium(ret)", "def hack_declarative_base():\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set an association table on the models.
def set_association(*, table: sqlalchemy.Table, name: str) -> None: setattr(open_alchemy.models, name, table)
[ "def create_table_for(self, model):", "def setTable(self, tableName):\n # TODO: Need to check if tableName really exists in the selected database before setting it.\n # if does not exist throw exception.\n # right now any string is selected as database - need to FIX this.\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set model by name on models.
def set_model(*, name: str, model: TUtilityBase) -> None: setattr(open_alchemy.models, name, model)
[ "def model_name(self, name):\n\n self._model_name = name", "def setModel(engine,model):\n engine.model = model", "def register_model(name: str) -> None:\n # Add the model to the list of valid models.\n VALID_MODELS.append(name)", "def _set_model_name_if_needed(self):\n if self._base_name:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Yields edges between each node and `halfk` neighbors.
def adjacent_edges(nodes, halfk): n = len(nodes) for i, u in enumerate(nodes): for j in range(i+1, i+halfk+1): v = nodes[j % n] yield u, v
[ "def k_edge_subgraphs(self, k):\n if k < 1:\n raise ValueError('k cannot be less than 1')\n H = self.H\n A = self.A\n # \"traverse the auxiliary graph A and delete all edges with weights less\n # than k\"\n aux_weights = nx.get_edge_attributes(A, 'weight')\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Makes a ring lattice with `n` nodes and degree `k`.
def make_ring_lattice(n, k): G = nx.Graph() nodes = range(n) G.add_nodes_from(nodes) G.add_edges_from(adjacent_edges(nodes, k//2)) return G
[ "def generate_k_star_system(n, k):\n dag = np.zeros((n,n))\n r = int(math.ceil(n/k))\n host = 0 \n nodes = np.arange(n)\n np.random.shuffle(nodes)\n\n for i in range(n):\n if i%r == 0:\n host = i\n else:\n #start undirected\n dag[nodes[host], nodes[i]...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Select a random subset of nodes without repeating.
def _random_subset(repeated_nodes, k): targets = set() while len(targets) < k: x = random.choice(repeated_nodes) targets.add(x) return targets
[ "def random_subset(self, perc=0.5):", "def pick_nodes(self):\n if self.nodes == []:\n return []\n return self.nodes\n # return sample(self.nodes,1)", "def sample_random_node(self):\n #Naive Approach \n return self.tree[int(self.rng.random()*len(self.tree))] # OUT OF BOU...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assigns marksmanships to each player in the graph.
def assign_marksmanships(self): list_of_marksmanships = [0.5, 0.8, 1] for node in self.G.nodes(): self.G.nodes[node]["marksmanship"] = random.choice(list_of_marksmanships)
[ "def assignSpies(self):\n # Number of spies should be number of players / 3 rounded up\n numSpies = -(-len(self.players) // 3)\n \n # create local instance of players for shuffling\n players = self.players\n shuffle(players)\n # grab from the front of the shuffled p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets a duel between two players with a random shooting order.
def random_duel(self, players): if self.marksmanships[players[0]] == self.marksmanships[players[1]]: return players self.duel_count += 1 while len(players) == 2 : random.shuffle(players) starting_player = players[0] if flip(self.marksmanships[starting_player]):...
[ "def pickMontyDoor(door1,door2,door3,doors,PlayerDoor):\r\n MontyDoor = randint(1,3)\r\n while MontyDoor == PlayerDoor:\r\n MontyDoor = randint(1,3)\r\n ## With the Monty's door selected, we must now make sure he didnt pick the same\r\n ## door as the Player as well as with the car behind it.\r\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make a random player in G compete in either a duel or truel depending on how many neighbors it has.
def step(self): #Get random player players = [random.choice(list(self.G.nodes()))] # if this player has at least two neighbors, go into a truel; otherwise, go into a duel if len(self.G[players[0]]) > 1: players.extend(_random_subset(list(self.G[players[0]]), 2)) #print("Truel: ", players) ...
[ "def random_strategy(player, board):\n return random.choice(othello.legal_moves(player, board))", "def random_move (self, player):\n choices = []\n for cell in self.cells.keys():\n if (self.cells[cell] == self.empty):\n choices.append(cell)\n n_choices = len(choic...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs the simulation k times for a random seed.
def monte_carlo(n, k, seed, runs): res = np.array([0.0,0.0,0.0]) for run in range(runs): print("run:" +str(run)) simulation = HungerGames(n, k, seed) winners = np.array(simulation.loop()) res += winners return res/k
[ "def sample(population, k, seed=42):\n if k is None or k > len(population):\n return population\n random.seed(len(population) * k * seed)\n return random.sample(population, k)", "def run_k_trials(self, controller, k):\n avg_lifetime = 0.\n for i in range(k):\n avg_lifetime...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a correlation key. Used for correlation of process instances that wait for incoming messages. Only global process instance variables are considered.
def add_correlation_key(self, name: str, value: typing.Any, type_: str = None) -> None: self.correlation_keys[name] = {'value': value, 'type': type_}
[ "def add_local_correlation_key(self, name: str, value: typing.Any, type_: str = None) -> None:\n self.local_correlation_keys[name] = {'value': value, 'type': type_}", "def add_zabbix_key(self):\n\n self.zagg_sender.add_zabbix_keys({self.args.key : self.args.value})", "def api_addkey(keydata):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a correlation key. Used for correlation of process instances that wait for incoming messages. Only variables in the execution scope are considered.
def add_local_correlation_key(self, name: str, value: typing.Any, type_: str = None) -> None: self.local_correlation_keys[name] = {'value': value, 'type': type_}
[ "def add_correlation_key(self, name: str, value: typing.Any, type_: str = None) -> None:\n self.correlation_keys[name] = {'value': value, 'type': type_}", "def add_zabbix_key(self):\n\n self.zagg_sender.add_zabbix_keys({self.args.key : self.args.value})", "def api_addkey(keydata):\n config = Gi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add local variables to the process after correlating the message.
def add_local_process_variable( self, name: str, value: typing.Any, type_: str = None, value_info: typing.Mapping = None ) -> None: self.process_variables_local[name] = { 'value': value, 'type': type_, 'valueInfo': value_info }
[ "def add_to_local_frame(self, variable):\n\n if self.local_frame is None:\n sys.stderr.write(\"ERROR: LF is not initialized!\\n\")\n exit(55)\n\n if not self.get_from_local_frame(variable.name, False):\n self.local_frame.append(variable)\n self.vars_stats_calcul...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merges all mask metadata and returns test metadata This function works well only when you define the mask in YAML Exports YAML file with only components with a measurement label This is the automatic version of write_labels combined with merge_test_metadata
def merge_metadata( gdspath: Path, labels_prefix: str = "opt", layer_label: Tuple[int, int] = gf.LAYER.TEXT, ) -> DictConfig: mdpath = gdspath.with_suffix(".md") yaml_path = gdspath.with_suffix(".yml") test_metadata_path = gdspath.with_suffix(".tp.yml") build_directory = gdspath.parent.pare...
[ "def data_dict_prep(data_folder: str, mask_label: str) -> Dict[str, List[str]]:\n imagedict = {}\n maskdict = {}\n combineddict = {}\n # assign nifti images to imagedict and masks to maskdict\n for dirpath, subdirs, files in os.walk(data_folder):\n for file in files:\n # making sure...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a type name to a type.
def _str_to_type(type_name): type_ = _PRIMITIVE_TYPES.get(type_name) if type_ is not None: return type_ return getattr(sys.modules[__name__], type_name)
[ "def _translate_type(type_name):\n if not isinstance(type_name, str):\n raise Exception('Type name must be a string')\n type_name = _sanitize_identifier(type_name)\n\n return _ASN1_BUILTIN_TYPES.get(type_name, type_name)", "def parse_type(type_name):\n for name, type_object in _type_definitions...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for create_using_post1
def test_create_using_post1(self): pass
[ "def test_create_document_using_post(self):\n pass", "def test_create_node_using_post(self):\n pass", "def test_create_account_using_post(self):\n pass", "def test_update_using_post1(self):\n pass", "def test_create_node_relationship_using_post(self):\n pass", "def test_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for delete_using_delete1
def test_delete_using_delete1(self): pass
[ "def test_delete_document_using_delete(self):\n pass", "def test_delete_by_id(self, _id):", "def test_delete_query(self):\n pass", "def test_delete_checker_result(self):\n pass", "def test_delete_escalation(self):\n pass", "def test_delete_node_using_delete(self):\n pass...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for get_using_get2
def test_get_using_get2(self): pass
[ "def test_hirststonge_using_get2(self):\n pass", "def test_hirststonge_using_get(self):\n pass", "def test_hirststonge_using_get1(self):\n pass", "def test_list_using_get1(self):\n pass", "def test_get_node_using_get(self):\n pass", "def test_hirststonge_using_get3(self)...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for list_pipeline_template_dependents_using_get1
def test_list_pipeline_template_dependents_using_get1(self): pass
[ "def test_get_dependents_for_task(self):\n pass", "def template_deps(self, template_name):\n raise NotImplementedError()", "def test_post_get_prepper_template_list(self):\n pass", "def test_parameter_references(spec):\n params = tp.get_parameter_references(spec)\n assert params == s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for list_using_get1
def test_list_using_get1(self): pass
[ "def test_me_get_list(self):\n pass", "def test_get_note_from_list(self):\n notes = [\"a\" , \"b\" , \"c\"]\n id = 1\n expected_output = \"b\"\n self.assertEqual(expected_output, get(notes,id))", "def test_list_pipeline_template_dependents_using_get1(self):\n pass", "...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for list_versions_using_get
def test_list_versions_using_get(self): pass
[ "def test_version_list_ok(self):\n rv, output = self.execute('version list')\n self.assertEqual(0, rv, output)\n self.assertExpectedResult(output)", "def test_list_versions(self):\n self.metadata.create_or_update(data=self.create)\n\n # Find by name\n res_name = self.meta...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for plan_using_post
def test_plan_using_post(self): pass
[ "def test_post_job(self):\n pass", "def test_create_using_post1(self):\n pass", "def test_create_decision_tree_using_post(self):\n pass", "def test_create_decision_tree_result_using_post(self):\n pass", "def run_post_test(self):\n pass", "def test_post_muveto_pmts(self):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for update_using_post1
def test_update_using_post1(self): pass
[ "def test_update_post(self):\n\n # Create new post to update so doesn't interfere with old test that specify a specific pid\n # Incase we run the test multiple times\n\n request_body = {\n \"username\": \"Test_Post_Username_2\",\n \"anonymous\": False,\n \"topic...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that notify subprocess exceptions are handled correctly.
async def test_subprocess_exceptions( caplog: pytest.LogCaptureFixture, hass: HomeAssistant, load_yaml_integration: None ) -> None: with patch( "homeassistant.components.command_line.notify.subprocess.Popen" ) as check_output: check_output.return_value.__enter__ = check_output check...
[ "def test_syscall_with_error(self):\n with self.assertRaises(Exception):\n utils.syscall(\"notacommandunlessyoumadeitone\")", "def test_notify_run_status(self):\n pass", "def test_rsync_agent_propagate_error(self):\n with mock.patch.object(\n transfer.subprocess, 'chec...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List all the members of the group. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response.
def list_group_members(self, group, **kwargs): all_params = ['group', 'resolve_names', 'limit', 'zone', 'provider'] all_params.append('callback') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( ...
[ "def ls_members(self, group, **kwargs):\n members = []\n status, data = self.run_gerrit_command('ls-members', group, **kwargs)\n if status == 0:\n member_list = data.split('\\n')[1:]\n for member_str in member_list:\n user_list = member_str.split('\\t')\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Input options from the analysis filtering html form is processed here and custom django database filtering procedures are automatically specified. Labels for the plot are dynamically created based on the parameters included when the plotting method. The column name in the relevant database table is specified through a ...
def GeneratePlot(request): form = PlotCustomizationOptions(request.POST or None) if request.method == 'POST': if form.is_valid(): FilterThreshold = form.cleaned_data.get('FilterThreshold', 0) if (form.cleaned_data.get('ArithmaticOperator', 0)=='GreaterThan'): ...
[ "def graph_controls(chart_type, df, dropdown_options, template):\n length_of_options = len(dropdown_options)\n length_of_options -= 1\n\n plot = px.scatter()\n\n if chart_type == 'Scatter plots':\n st.sidebar.subheader(\"Scatterplot Settings\")\n\n try:\n x_values = st.sidebar.s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of letters that can go at the specified index of the word
def get_allowed_letters(self, word, index): words = self.search(word) return set([w[0][index] for w in words])
[ "def find_all_indexes(word, letter):\n return [index for index, character in enumerate(word) if character == letter]", "def index_words_typical(text):\n result = []\n if text:\n result.append(0)\n for index, letter in enumerate(text):\n if letter == ' ':\n result.append(index+...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the Mycroft notification service.
def get_service( hass: HomeAssistant, config: ConfigType, discovery_info: DiscoveryInfoType | None = None, ) -> MycroftNotificationService: return MycroftNotificationService(hass.data["mycroft"])
[ "def get_service(hass, config, discovery_info=None):\n return ClickatellNotificationService(config)", "async def async_get_service(hass, config, discovery_info=None):\n notification_devices = []\n for device in hass.data[DATA_KNX].xknx.devices:\n if isinstance(device, XknxNotification):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send a message mycroft to speak on instance.
def send_message(self, message="", **kwargs): text = message mycroft = MycroftAPI(self.mycroft_ip) if mycroft is not None: mycroft.speak_text(text) else: _LOGGER.log("Could not reach this instance of mycroft")
[ "def handle_speak(event):\n bus.emit(Message('speak', event))", "def speak(msg):\n if(pu.io.myIP()=='192.168.5.115'):\n os.system(\"say -v Veena \"+str(msg))", "def say(self, text):\n if self.speaker:\n print \"Speech:\", text\n else:\n print \"Speech:\", text", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if a message matches a regex, and if so, calls the callback. Called every loop of the main loop.
def call(self, message): matches = message.match(self.regex) if matches: logging.debug("%s matches %s", message.text, self.regex.pattern) self.callback(Response(self.robot, message, matches))
[ "def match(self, message):", "async def message_check(self, msg):\n if self.debug is True : await self.console_message(msg)\n if self.logging is True : await self.log_message(msg)\n if self.blacklist is True : await self.word_filter(msg)", "def dispatch(self, callback, txt):\n\n try:...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
To be called from any components, as soon as their internal state changed in a way, that we do need to remap the mappings that are processed directly by the Live engine. Dont assume that the request will immediately result in a call to your build_midi_map function. For performance reasons this is only called once per G...
def request_rebuild_midi_map(self): pass
[ "def request_rebuild_midi_map(self):\n MackieControlComponent.request_rebuild_midi_map(self)\n for ex in (self._ChannelStripController__left_extensions + self._ChannelStripController__right_extensions):\n ex.request_rebuild_midi_map()", "def before_map(self, map):", "def _update_keyboar...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use this function to send MIDI events through Live to the _real_ MIDI devices that this script is assigned to.
def send_midi(self, midi_event_bytes): pass
[ "def OnMidiMsg(event):\n\n \"\"\" Initialize states \"\"\"\n print(\"\\n-----------------------------\\n\")\n\n mpd_device.TARGET = None\n mpd_device.TARGET_TYPE = None\n mpd_device.TARGET_VALUE = 0\n mpd_device.TARGET_PRESSURE = 0\n\n print(f'event.handled: {event.handled}')\n print(f'event...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the user count and last scale time. Resets the user count to 0 so that the counter can increment it as new users arrive. Also updates the last scale time.
def reset_scale_counters(self): self.user_count_since_last_scale = 0 self.last_scale_time = self.sim.now()
[ "def update_users_count_data(self):\n users = self.users_db.find({\"status\":\"active\"})\n total_current_users = users.count()\n #insert present data# sort the dict and remove the last one\n admin_data = self.admin_db.find_one({\"entitled\":\"all\"})\n user_trend_count = admin_da...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the simulation time since the last reset_scale_counters() call. The result is expected to be >= 0 and a RuntimeError is raised if it is negative.
def get_time_since_last_scale_event(self): time_since_last_scale_point = self.sim.now() - self.last_scale_time if time_since_last_scale_point < 0: raise RuntimeError('The time since the last scale event is negative') return time_since_last_scale_point
[ "def get_time_until_next_scale_event(self):\n time_until_next_scale_event = self.sim.scaler.scale_rate - self.get_time_since_last_scale_event()\n if time_until_next_scale_event < 0:\n raise RuntimeError('The time until the next scale event is negative')\n return time_until_next_scale...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the simulation time until the next reset_scale_counters() call. The result is expected to be >= 0 and a RuntimeError is raised if it is negative.
def get_time_until_next_scale_event(self): time_until_next_scale_event = self.sim.scaler.scale_rate - self.get_time_since_last_scale_event() if time_until_next_scale_event < 0: raise RuntimeError('The time until the next scale event is negative') return time_until_next_scale_event
[ "def get_time_since_last_scale_event(self):\n time_since_last_scale_point = self.sim.now() - self.last_scale_time\n if time_since_last_scale_point < 0:\n raise RuntimeError('The time since the last scale event is negative')\n return time_since_last_scale_point", "def warmup(self):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates the Poisson user generator sim the sim users should arrive at for service lamda the parameter to a Poisson distribution which defines the arrival process mu the parameter to a Poisson distribution which defines the service time process
def __init__(self, sim, lamda, mu): self.lamda = lamda self.mu = mu super(PoissonGenerator, self).__init__(sim=sim)
[ "def data_generator_simulation1():\n # Target : 1 nuage de point\n nt = 1000\n mu_t = np.array([50, 50])\n cov_t = np.array([[60, 40], \n [40, 60]])\n xt = ot.datasets.make_2D_samples_gauss(nt, mu_t, cov_t)\n\n # Source : 3 nuages de points\n ns1 = 700\n mu_s = np.array([25, 60])\n cov_s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns true if the difference between the two functions is above the tolerance. Used to check if the difference has fallen below the tolerance. Returns
def inTolerance(newFunction, oldFunction, tolerance): return np.sum(newFunction-oldFunction) > tolerance
[ "def _within_tolerance_condition(self, val, expected, tolerance):\n try:\n val = float(val)\n except (ValueError, TypeError):\n return False\n return abs(val - expected) <= tolerance", "def within_tolerance(tolerance: Any, value: Any,\n secrets: Secre...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
CREATE A TABLE, CREATE RESOURCES FOR OPERATIONS, ENSURE CREATOR HAS CONTROL OVER TABLE
def create_table_resource(self, table_name, owner): new_resources = wrap( [ {"table": table_name, "operation": op, "owner": 1} for op in TABLE_OPERATIONS ] ) self._insert(RESOURCE_TABLE, new_resources) with self.db.transaction() ...
[ "def _create_table(self, table_name):\n raise NotImplementedError()", "def tables(self):\n yield self.sql_create_table", "def __create_tables(self):\r\n self.__create_lines_table()\r\n self.__create_stops_table()\r\n self.__create_buses_table()", "def CreateTable(self, param...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return d[key_name] (as opposed to the normal behavior, d['key_name']
def bykey(d, key_name): try: return d[key_name] except KeyError: return ''
[ "def keyvalue(dictionary, key):\n\n return dictionary[key]", "def key(dict_, key_):\n try:\n return dict_[key_]\n except KeyError as e:\n log.info(e)", "def __getitem__(self, key):\n query = select([self.store.c.value]).where(self.store.c.key == key)\n result = self.conn.exe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return s with underscores turned into spaces
def uscore2space(s): return s.replace('_', ' ')
[ "def _normalize_spaces(key: str) -> str:\n # Convert spaces to underscores.\n key = key.replace(\"_\", \" \")\n # Replace strings of underscores with a single underscore.\n key = MULTIPLE_SPACES.sub(\" \", key)\n # Remove all underscores from the start and end.\n return key.strip(\"_\")", "def c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return s with dashes turned into spaces
def dash2space(s): return s.replace('-', ' ')
[ "def uscore2space(s):\n return s.replace('_', ' ')", "def stringFormat(s):\n s = s.replace(\" \", \"\")\n s = s.replace(\"-\", \"\")\n s = s.lower()\n return s", "def format_word(word):\n\tdashes = r\"[- ]*\"\n\tmodified_word = re.sub(dashes, \"\", word.lower())\n\treturn modified_word", "def r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list and a size, return a rescaled/samples list. For example, if we want a list of size 5 and we have a list of size 25, return a new list of size five which is every 5th element of the origina list.
def rescale_list(input_list, size): assert len(input_list) >= size # Get the number to skip between iterations. skip = len(input_list) // size # Build our new output. output = [input_list[i] for i in range(0, len(input_list), skip)] # Cut off the last one if needed. ...
[ "def resize(a_list, new_length):\n new_list = [None] * new_length\n if len(a_list) == new_length:\n new_list = list(a_list)\n elif new_length < len(a_list):\n if len(a_list) % new_length == 0:\n # exact integer division\n # print \"doing exact integer division!\"\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a set of frames (filenames), build our sequence.
def build_image_sequence(self, frames): def process_image(image, target_shape): """Given an image, process it and return the array.""" # Load the image. h, w = target_shape image = load_img(image, target_size=(h, w)) # Turn it into numpy, normalize an...
[ "def process_seqs():\n\n for dir_name in glob.glob('data/set*'):\n parent_dir = os.path.split(dir_name)[-1]\n if not os.path.exists('target/{}'.format(parent_dir)):\n os.mkdir('target/{}'.format(parent_dir))\n for seq_path in glob.glob('{}/*.seq'.format(dir_name)):\n vi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns command to generate thumbnails as list
def get_thumb_cmd(ifile, ofile): cmd = [THUMB_BIN] + THUMB_ARGS cmd += ['-i', ifile, '-o', ofile] return cmd
[ "def make_image_list(image_dir):", "def create_thumbs(self):\n for m in Movie.query.filter(Movie.thumb == False).all():\n tname = m.hash_id + THUMB_EXT\n tname_full = os.path.join(THUMB_DIR, tname)\n p = subprocess.Popen(get_thumb_cmd(m.location, tname_full), \\\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
finds all movies recursively in path with extensions listed in exts
def movies_in_path(self, path=None, exts=None): if path is None: path = self.root if exts is None: exts = MOVIE_EXTS for dirpath, _, files in os.walk(path): for f in files: lower_f = f.lower() for ext in exts: ...
[ "def scanfolder(root):\n\tmovies = []\n\tfor path, dirs, files in os.walk(root):\n\t\tfor f in files:\n\t\t\tif f.endswith('.mkv') or f.endswith('.m2ts') or f.endswith('.avi'):\n\t\t\t #print os.path.join(path, f)\n\t\t\t movies.append(os.path.join(path,f))\n\t#print movies\t \n\treturn movies", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Selects rows from database where no thumbnail is available and creates one in the thumbs directory
def create_thumbs(self): for m in Movie.query.filter(Movie.thumb == False).all(): tname = m.hash_id + THUMB_EXT tname_full = os.path.join(THUMB_DIR, tname) p = subprocess.Popen(get_thumb_cmd(m.location, tname_full), \ stdout=subprocess.PIPE).c...
[ "def update_thumbnail_images(self):\n top_of_row = (self.selected_stage -\n (self.selected_stage % NUM_OF_THUMBS))\n for thumb_index in range(0, NUM_OF_THUMBS):\n if top_of_row + thumb_index <= self.num_of_stages() - 1:\n stage_index = top_of_row + thumb_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a unicode string to an ascii string. If the argument is not a unicode string, returns the argument.
def utf_to_ascii(s): # http://stackoverflow.com/questions/4299675/python-script-to-convert-from-utf-8-to-ascii if isinstance(s, unicode): return s.encode('ascii', 'replace') return s
[ "def coerce_to_ascii(s) :\r\n # We dont need this anymore\r\n return s", "def ensure_native_ascii_str(value):\n if isinstance(value, str):\n return value\n elif isinstance(value, unicode): # noqa\n return value.encode(\"ascii\", \"replace\")\n else:\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
True iff dt is equlivalent to numpy.datetime64('NaT') Does casting so It's the correct "NOT A TIME"
def is_not_a_time(dt): return dt == NOT_A_TIME.astype(dt.dtype)
[ "def isnat(obj):\n if obj.dtype.kind not in ('m', 'M'):\n raise ValueError(\"%s is not a numpy datetime or timedelta\")\n return obj.view(int64_dtype) == iNaT", "def is_np_datetime_like(dtype: DTypeLike) -> bool:\n return np.issubdtype(dtype, np.datetime64) or np.issubdtype(dtype, np.timedelta64)"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
True iff dtype is nonstructured or every sub dtype is the same
def np_dtype_is_homogeneous(A): # http://stackoverflow.com/questions/3787908/python-determine-if-all-items-of-a-list-are-the-same-item if not is_sa(A): return True dtype = A.dtype first_dtype = dtype[0] return all(dtype[i] == first_dtype for i in xrange(len(dtype)))
[ "def is_subarray_type(dtype: np.dtype) -> bool:\n return (hasattr(dtype, 'shape')\n and isinstance(dtype.shape, tuple)\n and len(dtype.shape) != 0)", "def _dsUniformLen(self):\n\n # if the first entry in `ds` is non-scalar\n if numerix.shape(self.ds[0]) != ():\n l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True iff M is a numpy.ndarray
def is_nd(M): return isinstance(M, np.ndarray)
[ "def is_numpy(self):\n return isinstance(self.data, np.ndarray)", "def is_numpy_array(x):\n return _is_numpy(x)", "def _is_numpy_array(obj: object) -> bool:\n return _as_numpy_array(obj) is not None", "def _is_atleast_1d_numpy_array(data):\n return NUMPY and isinstance(data, numpy.ndarray) and...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests whether distance between two points is less than a threshold
def dist_less_than(lat_1, lon_1, lat_2, lon_2, threshold): return (distance(lat_1, lon_1, lat_2, lon_2) < threshold)
[ "def test_filter_by_distance(self):\n\n threshold = 1\n points = random.uniform(-1,1,size=(100,6))\n points = mathtools.filter_by_distance(points, threshold)\n \n for point in points:\n dif = points[:,0:3]-point[0:3]\n euclidean_distance = sum(dif*dif,1)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a structured array containing all the rows in its arguments Each argument must be a structured array with the same column names and column types. Similar to SQL UNION
def stack_rows(args): if len(args) > 0: M0 = check_sa(args[0], argument_name='args[0]') dtype0 = M0.dtype checked_args = [M0] for idx, M in enumerate(args[1:]): M = check_sa(M) if dtype0 != M.dtype: raise ValueError('args[{}] does not have the ...
[ "def select_into_arr(cursor, sql, args=[]):\n \n if args == []:\n cursor.execute(sql)\n else:\n cursor.execute(sql, tuple(args))\n try:\n rows = cursor.fetchall()\n if len(rows) == 0:\n rows = np.array([], dtype='i4')\n else:\n columnNameLst = zip...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Does SQLstype join between two numpy tables Supports equality join on an arbitrary number of columns Approximates Pandas DataFrame.merge
def join(left, right, how, left_on, right_on, suffixes=('_x', '_y')): left, left_on = check_consistent( left, col_names=left_on, M_argument_name='left', col_names_argument_name='left_on') right, right_on = check_consistent( right, col_na...
[ "def join(left, right, keys=None, join_type='inner',\n uniq_col_name='{col_name}_{table_name}',\n table_names=['1', '2'],\n col_name_map=None):\n # Store user-provided col_name_map until the end\n _col_name_map = col_name_map\n\n if join_type not in ('inner', 'outer', 'left', 'right...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verifies that M is a structured array. Otherwise, throws an error If M is not a structured array, but can be converted to a structured array, this function will return the converted structured array without throwing an error.
def check_sa(M, argument_name='M', n_rows=None, n_cols=None, col_names_if_converted=None): try: M = convert_to_sa(M, col_names_if_converted) except ValueError: raise ValueError("Structured array or similar object required for " "variable '{}'. Got {} instea...
[ "def test_to_numpy_valid(array_like, expected):\n output = to_numpy(array_like)\n np.testing.assert_array_equal(output, expected)\n assert type(output) == np.ndarray\n assert output.dtype == expected.dtype", "def matrix2array(M):\n if isspmatrix(M):\n M = M.todense()\n return np.squeeze(n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Makes sure that input is valid and selfconsistent 1. Makes sure that M is a valid structured array. 2. If col is provided, makes sure it's a valid column. 3. If col is provided, makes sure that M and col have the same number of rows 4. If col_names is provided, makes sure that col_names is a list of str 5. If col_names...
def check_consistent(M, col=None, col_names=None, M_argument_name='M', col_argument_name='col', col_names_argument_name='col_names', n_rows=None, n_cols=None, col_names_if_M_converted=None): ...
[ "def _validate_inputs(self,col_list):\n if not set(col_list).difference(self.raw_data.columns):\n print 'Columns is ok,Begin to Run....'\n else:\n raise ValueError('''The columns not in data's columns ''')", "def check_sa(M, argument_name='M', n_rows=None, n_col...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
True iff the host doesn't appear to have a display to plot to
def on_headless_server(): return not os.environ.has_key('DISPLAY')
[ "def is_graphic(self):\n return contains_only_zeros(self.process[-1])", "def isVisualizationSoftwareRunning(self):\n\t\treturn (self.visualizer and not self.visualizer.isClosed())", "def __nonzero__(self): # pragma: no cover\n return self._panels is not None", "def _is_empty_axes(ax):\n retu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Type annotations for step decorator in case of no arguments.
def step(_func: F) -> Type[BaseStep]: ...
[ "def step():\n def decorator(func):\n name = func.__name__\n\n global STEPS\n assert name not in STEPS\n\n STEPS[name] = func\n\n return func\n\n return decorator", "def step1(self): # real signature unknown; restored from __doc__\n pass", "def step(\n _func: O...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Outer decorator function for the creation of a ZenML step In order to be able work with parameters such as `name`, it features a nested decorator structure.
def step( _func: Optional[F] = None, *, name: Optional[str] = None, enable_cache: bool = True ) -> Union[Type[BaseStep], Callable[[F], Type[BaseStep]]]: def inner_decorator(func: F) -> Type[BaseStep]: """Inner decorator function for the creation of a ZenML Step Args: func...
[ "def step():\n def decorator(func):\n name = func.__name__\n\n global STEPS\n assert name not in STEPS\n\n STEPS[name] = func\n\n return func\n\n return decorator", "def templatemethod(name_):\n\n def template_decorator(func):\n \"\"\"Inner decorator function\"\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate MS SQL Server connection string using a YAML config file and credentials provided by the user and / or stored in the system registry (Windows) or keyring (Mac, Unix). You may be asked your _computer_ password to authorize retrival of the stored database password.
def get_mssql_connection_string(yamlfile, reset=False, urlencode=False, check_winreg=True, driver=None, **kwargs): kwargs = {kk.lower():vv for kk,vv in kwargs.items() if vv is not None} # set a default driver if driver is None: #import pyodbc #drivers = pyod...
[ "def create_pyodbc_connection_string(self, server: str, database: str, user: str, pwid: str) -> str:\r\n if not server or not database:\r\n raise ValueError(\"server and database parameters must be provided\")\r\n\r\n conn_string_parts = {\r\n \"DRIVER\": r\"{ODBC Driver 17 for S...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validate `country_code` with `phone_number` using the `phonenumber_field` validation methods.
def validate_phone_number(self, value): # Format country_code with + sign. country_code = self.initial_data['country_code'] if not country_code.startswith('+'): country_code = f'+{country_code}' phone_number = self.initial_data['phone_number'] phone_number = f'{count...
[ "def validate_country_phone_code(ctry_code_phone):\n try:\n Nation.objects.get(phone_code=ctry_code_phone)\n except Nation.DoesNotExist:\n return False\n\n return True", "def validate_country_code_number(self, field):\n if Store.query.filter_by(\n country_code=field.co...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }