query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Returns the index corresponding to the given class label.
def lookup_class_idx(self,label): return self.class_labels[label]
[ "def get_class_index(label):\n if isinstance(label,str) is False:\n basic.outputlogMessage('input label must be a string')\n assert(False)\n length = len(class_label)\n for i in range(0,length):\n if label.lower()==class_label[i]:\n return i\n #if not found\n basic.out...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Applies a function mapping to each element in the feature data.
def apply_fn(self,fn): self.check_Data() for split,data_ in self.processed_data.items(): x = data_['x'] x = np.array([fn(xi) for xi in x]) data_['x'] = x
[ "def map(self, function=lambda item: item):\n for i, row in enumerate(self):\n for j, item in enumerate(row):\n row[j] = function(item)", "def map(self, function):\n pass", "def map(self, function):\n return FunctionalWrapper(map(function, self.data))", "def appl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates a new MLP using the nn.Sequential class. Returns
def generate(self): components = [] components.append(nn.Linear(self.n_features,self.hidden_sizes[0])) self._activation(components,self.activation) self._dropout(components,self.dropout) for i in range(1,len(self.hidden_sizes)): components.append...
[ "def make_mlp_model():\n return snt.Sequential([\n snt.nets.MLP([LATENT_SIZE] * NUM_LAYERS, activate_final=True),\n snt.LayerNorm()\n ])", "def mlpModel(input1_shape, layers=[4]):\n model = Sequential()\n last_idx = len(layers) - 1\n for (idx, num_units) in enumerate(layers):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a new activation function and adds it to the list of components.
def _activation(self,components,activation): if activation == "ReLU": components.append(nn.ReLU()) elif activation == "Sigmoid": components.append(nn.Sigmoid()) else: raise Exception("Invalid activation fn: "+activation)
[ "def construct_activation_function(self):\n # Add the activation function\n if not self.activation_function is None:\n # Check if it is a string\n if isinstance(self.activation_function, str):\n activation_function = get_activation_function_by_name(\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a dropout object to the list of components
def _dropout(self,components,dropout=None): if dropout is not None: components.append(nn.Dropout(dropout))
[ "def add(self, component) -> None:\n pass", "def addComponent(self,component):\r\n self.append(component)", "def add(self, component):\n self.components.add(component)", "def add_depot(self, depot):\n self.destination_list.append(depot)", "def add_component(self, componentInstanc...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Splits a DataFrame into 3 distinct DataFrames based on the given percentages and returns a dict of the data.
def split_data(text_df,splits=None,rand_perm=True): if splits is None: splits = {'train':0.6,'val':0.1,'test':0.3} if np.round(np.sum(list(splits.values())),4) != 1: raise Exception("Split percentages do not sum to 1") size = len(text_df) if rand_perm: pe...
[ "def split_data(df_data, clusters):\n\n if clusters is None:\n\n return {0: df_data}\n\n return {\n k: df_data.loc[clusters.index[clusters == k]]\n for k in clusters.unique()\n }", "def split_train_dev_set(df, percent=0.2):\n train = []\n dev = []\n for k, g in df.groupby(\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads a English > French text file and filters the lines based on the given filter_fn. If filter_fn is None, the default filter will be
def filter_nmt_file(filename,filter_fn=None): if filter_fn is None: filter_fn = lambda en : en.lower().startswith('i am') or \ en.lower().startswith('he is') or \ en.lower().startswith('she is') or \ en.lower().startswith('they are') or \ en.lower(...
[ "def on_filter_process(self, filter_terms=None):\n if not filter_terms:\n filter_terms = self.ui_filterLine.text()\n # break up the string term based common separator characters\n terms = lists.fragment(terms=filter_terms, splits=list(' ,'),\n clean=True...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list of lines of English/French text, creates a DataFrame with train/val/test split labels.
def create_nmt_data(text,train_pct=0.7,val_pct=0.15): if train_pct + val_pct >= 1: raise Exception("train_pct + val_pct must be < 1.0") source = [] target = [] for line in text: text = line.split('\t') source.append(text[0]) target.append(text[1]) ...
[ "def ucf_read_train_test_split(self, path):\n # get the test train split txt file\n train = []\n test = []\n for (dirpath, dirnames, filenames) in os.walk(path):\n train += [os.path.join(path, file) for file in filenames if file.startswith('trainlist')]\n test += [o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads a glove word embedding text file and generates a DataFrame with the embeddings.
def process_glove_data(filename): word_list = [] embed_list = [] with open(filename,encoding="utf8") as file: lines = file.readlines() for line in lines: toks = line.split(' ') word_list.append(toks[0]) vec = [float(tok) for tok in toks[1:]] ...
[ "def read_glove_source(self):\n embeddings = []\n word2vec = {}\n idx2word = []\n with open(self.source) as file:\n lines = file.readlines()\n for line in lines:\n data = line.split()\n word = data[0]\n vector = np.asarra...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list of dividends whose ex_dates are all the next trading day, calculate and store the cash and/or stock payments to be paid on each dividend's pay date.
def earn_dividends(self, cash_dividends, stock_dividends): for cash_dividend in cash_dividends: self._dirty_stats = True # only mark dirty if we pay a dividend # Store the earned dividends so that they can be paid on the # dividends' pay_dates. div_owed = self.p...
[ "def _calculate_next_dividend(self, symbols):\n yahoo_financials = YahooFinancials(symbols)\n logging.debug(\"[_calculate_next_dividend] Fetching get_exdividend_date\")\n data = yahoo_financials.get_exdividend_date()\n logging.debug(\"[_calculate_next_dividend] Finished fetching get_exdividend_date\")\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a cash payment based on the dividends that should be paid out according to the accumulated bookkeeping of earned, unpaid, and stock dividends.
def pay_dividends(self, next_trading_day): net_cash_payment = 0.0 try: payments = self._unpaid_dividends[next_trading_day] # Mark these dividends as paid by dropping them from our unpaid del self._unpaid_dividends[next_trading_day] except KeyError: ...
[ "def cash_flow(self):\n _cash_flow = self.after_tax_profit() + self.depreciation()\n return _cash_flow", "def get_cash(self):\n\n\t\tpass", "def test_discounted_payment_below_debit(self):\n debit_jobs([(self.job, A(600), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(20), A(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Force a computation of the current portfolio state.
def update_portfolio(self): if not self._dirty_portfolio: return portfolio = self._portfolio pt = self.position_tracker portfolio.positions = pt.get_positions() position_stats = pt.stats portfolio.positions_value = position_value = ( position_st...
[ "def _initalize_portfolio_with_cash(self):\n self.cash = copy.copy(self.starting_cash)\n\n if self.starting_cash > 0.0:\n self.history.append(\n PortfolioEvent.create_subscription(\n self.current_dt, self.starting_cash, self.starting_cash\n )...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute the current portfolio. Notes This is cached, repeated access will not recompute the portfolio until the portfolio may have changed.
def portfolio(self): self.update_portfolio() return self._immutable_portfolio
[ "def update_portfolio(self):\n if not self._dirty_portfolio:\n return\n\n portfolio = self._portfolio\n pt = self.position_tracker\n\n portfolio.positions = pt.get_positions()\n position_stats = pt.stats\n\n portfolio.positions_value = position_value = (\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Override fields on ``self.account``.
def override_account_fields(self, settled_cash=not_overridden, accrued_interest=not_overridden, buying_power=not_overridden, equity_with_loan=not_overridden, to...
[ "def fix_account(self, account):\n pass", "def account(self, account):\n\n self._account = account", "def onAccountUpdate(self, data):\n pass", "def change_account(self, account):\r\n check_account = Account(account, steem_instance=self.steem)\r\n self.account = check_accoun...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Called when the partition's reference count reaches zero. If the partition contains a temporary file which is not referenced by any other partition then the temporary file is removed from disk. If the partition contains a nontemporary file which is not referenced by any other partition then the file is closed.
def __del__(self): # subarray = getattr(self, '_subarray', None) subarray = self._subarray # If the subarray is unique it will have 2 references to # it plus 1 within this method, making 3. If it has more # than 3 references to it then it is not unique. if getrefc...
[ "def test_file_deleted(self):\n try:\n with get_temp_file() as (fd, name):\n os.unlink(name)\n except Exception as err:\n self.fail('Failed with exception \"{}\"'.format(err))", "def __del__(self):\n if self.has_temp_file:\n logging.warning('Tem...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add i to the count of subarrays referencing the file of this partition's subarray. Only do this if self._subarray is an instance of FileArray, but not a temporary FileArray.
def _add_to_file_counter(self, i): # subarray = getattr(self, '_subarray', None) subarray = self._subarray if subarray is None: return try: if isinstance(subarray, FileArray) and not isinstance( subarray, CachedArray ): ...
[ "def append_subint_array(self,table):\n fits_to_append = F.FITS(table)", "def iterappend(self, arrayiterable):\n if self._accessmode != 'r+':\n raise OSError(f\"Accesmode should be 'r+' \"\n f\"(now is '{self._accessmode}')\")\n if not hasattr(arrayiterable...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add 1 to the Partition.file_counter if self._subarray is an instance of FileArray and not a temporary FileArray.
def _increment_file_counter(self): self._add_to_file_counter(1)
[ "def _add_to_file_counter(self, i):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n if subarray is None:\n return\n\n try:\n if isinstance(subarray, FileArray) and not isinstance(\n subarray, CachedArray\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Subtract 1 from the Partition.file_counter if self._subarray is an instance of FileArray and not a temporary FileArray.
def _decrement_file_counter(self): self._add_to_file_counter(-1)
[ "def _add_to_file_counter(self, i):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n if subarray is None:\n return\n\n try:\n if isinstance(subarray, FileArray) and not isinstance(\n subarray, CachedArray\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add the auxiliary mask to the config dictionary. Assumes that ``self.config`` already exists.
def _configure_auxiliary_mask(self, auxiliary_mask): indices = self.indices new = [ mask[ tuple( [ (slice(None) if n == 1 else index) for n, index in zip(mask.shape, indices) ] ...
[ "def hybrid_dict_mask(self, test=False, a='6', msg=msgs.m_hydi_atk):\n self.argv = self.build_args()\n mask = self.masks_file or self.mask\n if not mask:\n return\n try:\n self.argv.insert(0, mask)\n self.common_attack_pattern(test, a, msg)\n excep...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
True if and only if the partition's subarray is in memory as opposed to on disk.
def in_memory(self): return hasattr(self._subarray, "__array_interface__")
[ "def in_memory(self) -> bool:\n return all(isinstance(x, np.ndarray) for x in self.chunks.values())", "def extra_memory(self):\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is on disk so getting the partition's data\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
True if and only if the partition's subarray is on disk as opposed to in memory.
def on_disk(self): return isinstance(self._subarray, FileArray)
[ "def in_memory(self):\n return hasattr(self._subarray, \"__array_interface__\")", "def in_memory(self) -> bool:\n return all(isinstance(x, np.ndarray) for x in self.chunks.values())", "def extra_memory(self):\n if not self.in_memory:\n # ------------------------------------------...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The partition's subarray of data.
def subarray(self): return self._subarray
[ "def partition(self, sep):\n return asarray(partition(self, sep))", "def GetPartitioningArray(self):\n return _hypre.HypreParVector_GetPartitioningArray(self)", "def get_slice(self, array_slice, shape, dtype, s3_bucket, s3_key): # pylint: disable=too-many-locals\n # convert array_slice int...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Change the axis names. The axis names are arbitrary, so mapping them to another arbitrary collection does not change the data array values, units, nor axis order.
def change_axis_names(self, axis_map): axes = self.axes # Partition axes self.axes = [axis_map[axis] for axis in axes] # Flipped axes flip = self.flip if flip: self.flip = [axis_map[axis] for axis in flip]
[ "def setAxisName(name, axes='XYZ'):\n dislin.name(name, axes)", "def setAxesNames(self):\n \n labels = ['T', 'Z', 'Y', 'X'] + [chr(ord('S')-i) for i in xrange(18)]\n if (len(self.axisList) >= 4):\n i = 0\n else:\n i = 4 - len(self.axisList)\n \n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Close the partition after it has been conformed. The partition should usually be closed after its `array` method has been called to prevent memory leaks. Closing the partition does one of the following, depending on the values of the partition's `!_original` attribute and on the
def close(self, **kwargs): config = getattr(self, "config", None) if config is None: return if kwargs: config.update(kwargs) original = getattr(self, "_original", None) logger.partitioning("Partition.close: original = {}".format(original)) if n...
[ "def file_close(self):\n if self.on_disk:\n self._subarray.close()", "def close_all(self):\n self.partition_map.close_all()", "def close(self):\n self.drill = None", "def close(self):\n self.dataset.close()", "def exit(self):\n for acc in self.to_close:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
True if the subarray contains datetime objects.
def isdt(self): return self.Units.isreftime and self._subarray.dtype == _dtype_object
[ "def _contains_cftime_datetimes(array) -> bool:\n # Copied / adapted from xarray.core.common\n from xarray.core.pycompat import is_duck_dask_array\n\n if cftime is None:\n return False\n else:\n if array.dtype == np.dtype(\"O\") and array.size > 0:\n sample = array.ravel()[0]\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Close the file containing the subarray, if there is one.
def file_close(self): if self.on_disk: self._subarray.close()
[ "def closeFile():\r\n global datafile\r\n if datafile is not None:\r\n datafile.close()", "def close(self):\n if self.closed:\n return\n\n self.closed = True\n try:\n if self.mode in (\"a\", \"w\", \"x\"):\n self.fileobj.write(NUL * (BLOCKSIZE...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return an iterator over indices of the master array which are spanned by the data array.
def master_ndindex(self): # itermaster_indices(self): return itertools_product( *[range(*r) for r in self.location] ) # TODO check
[ "def indicesIter(self):\n \n pass", "def indices(self):", "def getArrayIndices(self):\n \n pass", "def __iter__(self):\n start = 0\n for i, dist in enumerate(self.dists):\n count = self.ndims[i]\n if count == 1:\n idx = start\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Update the `!part` attribute inplace for new indices of the master array.
def new_part(self, indices, master_axis_to_position, master_flip): shape = self.shape if indices == [slice(0, stop, 1) for stop in shape]: return # ------------------------------------------------------------ # If a dimension runs in the wrong direction then change its ...
[ "def _update_assessment_parts_map(self, part_list):\n for part in part_list:\n # perhaps look for a \"level offset\"?\n level = part._level_in_section # plus or minus \"level offset\"?\n if str(part.get_id()) not in self._part_ids():\n self._insert_part_map(ge...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The extra memory required to access the array.
def extra_memory(self): if not self.in_memory: # -------------------------------------------------------- # The subarray is on disk so getting the partition's data # array will require extra memory # -------------------------------------------------------- ...
[ "def allocated_memory(self):\n return self._allocated_memory", "def memory(self):\n return self._memory", "def get_array_size(self):\r\n return conf.lib.clang_getArraySize(self)", "def arraysize(self):\n return self._arraysize", "def MAXMEM(self):", "def memory_size(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Move the partition's subarray to a temporary file on disk.
def to_disk(self, reopen=True): # try: tfa = CachedArray(self.array) # except Exception: # return False fd, _lock_file = mkstemp( prefix=tfa._partition_file + "_", dir=tfa._partition_dir ) close(fd) self.subarray = tf...
[ "def close(self, **kwargs):\n config = getattr(self, \"config\", None)\n\n if config is None:\n return\n\n if kwargs:\n config.update(kwargs)\n\n original = getattr(self, \"_original\", None)\n logger.partitioning(\"Partition.close: original = {}\".format(ori...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register a temporary file on this rank that has been created on another rank.
def _register_temporary_file(self): _partition_file = self._subarray._partition_file _partition_dir = self._subarray._partition_dir if _partition_file not in _temporary_files: fd, _lock_file = mkstemp( prefix=_partition_file + "_", dir=_partition_dir ) ...
[ "def register_tmp_file(self, tmp_file: str):\n self.temp_files.add(pathlib.Path(tmp_file))", "def set_temp_file(self):\n\n index = self.filename.rfind('/') + 1\n self.temp_filename = self.filename[:index] + \"tmp_\" + self.filename[index:]", "def _upload_temp(cls, path, token, rtype):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add the lock files listed in lock_files to the list of lock files managed by other ranks.
def _update_lock_files(self, lock_files): _, _lock_file, _other_lock_files = _temporary_files[ self._subarray._partition_file ] _other_lock_files.update(set(lock_files)) if _lock_file in _other_lock_files: # If the lock file managed by this rank is in the list of ...
[ "def LockFiles(self, entries):\n self._model.lock(entries)", "def addFiles(self, file_list):\n \n # Add the files to the queue\n for file_name in file_list:\n self.file_queue.put(file_name)\n \n # Write the queue to disk\n self.saveQueue()\n \...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Samples a 2d function f over specified intervals and returns two arrays (X, Y) suitable for plotting with matlab (matplotlib) syntax. See examples\mplot2d.py. f is a function of one variable, such as x2. x_args is an interval given in the form (var, min, max, n)
def sample2d(f, x_args): try: f = sympify(f) except SympifyError: raise ValueError("f could not be interpreted as a SymPy function") try: x, x_min, x_max, x_n = x_args except (TypeError, IndexError): raise ValueError("x_args must be a tuple of the form (var, min, max, n)"...
[ "def PlotF(f,start,stop,x_label='x axis',y_label='y axis',lab='f(x)',\\\r\n my_title='Graph',arguments=()):\r\n \r\n #initiate figure object, plot object\r\n my_fig, my_plot = pyplot.subplots();\r\n\r\n #if no additional arguments, args is an empty tuple, so just plot\r\n\r\n if arguments==...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Samples a 3d function f over specified intervals and returns three 2d arrays (X, Y, Z) suitable for plotting with matlab (matplotlib) syntax. See examples\mplot3d.py. f is a function of two variables, such as x2 + y2. x_args and y_args are intervals given in the form (var, min, max, n)
def sample3d(f, x_args, y_args): x, x_min, x_max, x_n = None, None, None, None y, y_min, y_max, y_n = None, None, None, None try: f = sympify(f) except SympifyError: raise ValueError("f could not be interpreted as a SymPy function") try: x, x_min, x_max, x_n = x_args ...
[ "def frontiere_3d(f, data, step=20):\n ax = plt.gca(projection='3d')\n xmin, xmax = data[:, 0].min() - 1., data[:, 0].max() + 1.\n ymin, ymax = data[:, 1].min() - 1., data[:, 1].max() + 1.\n xx, yy = np.meshgrid(np.arange(xmin, xmax, (xmax - xmin) * 1. / step),\n np.arange(ymin, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Samples a 2d or 3d function over specified intervals and returns a dataset suitable for plotting with matlab (matplotlib) syntax. Wrapper for sample2d and sample3d. f is a function of one or two variables, such as x2. var_args are intervals for each variable given in the form (var, min, max, n)
def sample(f, *var_args): if len(var_args) == 1: return sample2d(f, var_args[0]) elif len(var_args) == 2: return sample3d(f, var_args[0], var_args[1]) else: raise ValueError("Only 2d and 3d sampling are supported at this time.")
[ "def sample3d(f, x_args, y_args):\n x, x_min, x_max, x_n = None, None, None, None\n y, y_min, y_max, y_n = None, None, None, None\n try:\n f = sympify(f)\n except SympifyError:\n raise ValueError(\"f could not be interpreted as a SymPy function\")\n try:\n x, x_min, x_max, x_n = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
iterate through each restaurant name from restaurant names and aggregate to results
def results_aggregator(self, names): for name in names: result = self.main(name) self.results.append(result) print("'%s' has been written to the file." % result[0]) """result is formatted name, number, rating, review count"""
[ "def resolveResult(self, restaurants):\n restaurant_list = []\n for restaurant in restaurants:\n restaurant_list.append({'Name': restaurant['restaurant']['name'], \"cuisines\": [x.strip() for x in restaurant['restaurant']['cuisines'].split(',')],\n \"lat\": restaurant['restaurant...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Récupère la date de publication d'un CVE si celleci est disponible
def parser_cve_date_publi(self,cve): try: pageCVE = opener.open(cve.get('href')) except(ssl.CertificateError) as e: return None soupCVE = BeautifulSoup(pageCVE, 'html.parser') res = soupCVE.find('strong', text=re.compile("Last Modified")) if res != None: ...
[ "def receive_date(self):\n if 'v112' in self.data['article']:\n return tools.get_publication_date(self.data['article']['v112'][0]['_'])\n return None", "def acceptance_date(self):\n if 'v114' in self.data['article']:\n return tools.get_publication_date(self.data['article...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Récupère les CVE, leurs liens vers NVD, dates de création, et les texte associées chaque vulnérabilité Remplie les listes initialisées au préalable (voir fonction __init__ cidessus)
def cve_parser(self): cves = self.soup.findAll('a', text=re.compile("CVE-")) for cve in cves: id = cve.getText() self.cve+=[id] cve_date = re.sub("CVE-","",id) cve_date = re.sub("-[0-9\s]*","",cve_date) cve_date += "-01-01" self.cve...
[ "def _initCvsVersion(self):\n\n output = _exec('cvs -vf')\n m = re.match(\n r'Concurrent Versions System \\(CVS\\) '\n r'(?P<numericpart>(\\d+\\.)+\\d+)(?P<rest>\\S*)'\n r' \\(client\\/server\\)',\n output[1]\n )\n if m:\n v = [i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Récupère les vecteur CVSS, les split dans des listes ; récupère aussi les scores CVSS Remplie les listes initialisées au préalable (voir fonction __init__ cidessus)
def cvss_parser(self): cvsss = self.soup.findAll('a', text=re.compile("AV:")) for cvss in cvsss: id = cvss.getText() id = re.sub("[()]*","",id) id = id.split('/') self.cvss += [id] score = cvss.parent.getText() score = re.sub("[A-Za...
[ "def __init__(self, n):\r\n self.lcs = [LearningCurve(name=f\"cv_{i}\") for i in range(n)]", "def __init__(self):\n self.svclassifier = SVC(kernel='linear')", "def test_split_data_cv():\n from parrot import process_input_data as pid\n\n data_file = os.path.abspath(\"../data/seq_class_dataset...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Récupère les références CWE, leurs lien, abstraction et structures répertoriées et leurs arborescences SFP1 et SFP2 Remplie les listes initialisées au préalable (voir fonction __init__ cidessus)
def cwe_parser(self): cwes = self.soup.findAll('a', text=re.compile("CWE-")) for cwe in cwes: id = cwe.getText() id = re.sub("[A-Za-z0-9;,:\s\-\(\)\"\']* CWE-","CWE-",id) # Lien possiblement cassé try: pageCWE = opener.open(cwe.get('href'))...
[ "def __init__(self):\n self._declarations = self.get_declarations()", "def __init__(self):\n # just the list of class/construct types\n self.lut = {}\n self.lut[\"struct\"] = structure\n self.lut[\"typedef\"] = typedef\n self.lut[\"define\"] = def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Default reducer for distinctions. Expects all distinctions to follow
def __reduce__(self): return instanceReducer(self)
[ "def evaluation_reducer(self) -> Union[Reducer, Dict[str, Reducer]]:\n return Reducer.AVG", "def _reduce(self, action):\n assert len(self.stack) >= 2, \"ERROR: Cannot reduce with stack length less than 2\"\n \n # STUDENT\n # hint: use list.pop()\n # END STUDENT\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For conjugate distinctions this should be overridden and return the base distinctions used. For none conjugate it will automatically return an empty list.
def getBaseDistinctions(self): return []
[ "def get_conjugate_bases_of(chebi_ent):\n if hasattr(chebi_ent, 'OntologyParents'):\n return [ent.chebiId for ent in chebi_ent.OntologyParents if\n (ent.type == \"is conjugate base of\")]\n else:\n return []", "def conjugate(self):\n pass", "def conjugate(self):\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates a random distinction of this type than is valid for the schema config.schema and for the given graphs. This function for must take graphs as its first argument, and if its a conjugate distinction it must then take, as separate args, not a tuple,
def getRandomDistinction(config, graphs, *base_distinctions): raise AbstractMethodException(Distinction)
[ "def generate_regular_graph(variable_names, dist_func, num_neigh=10, **kwargs):\n shuffle(variable_names)\n num_vars = len(variable_names)\n num_neigh = min(num_neigh, num_vars-1)\n graphs = nx.random_graphs.random_regular_graph(num_neigh, num_vars)\n edges = np.array(graphs.edges())\n edges.sort(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get an estimate of the number of different subtypes for this distinction. This is used to estimate a PDF for randomly sampling the distinction space. Examine the code of other distinctions to get a feel for how things are estimated.
def getNumberOfSubtypes(config, low_estimate=True): raise AbstractMethodException(Distinction)
[ "def test_type_distribution(self):\n np.random.seed(SEED)\n total = 100000\n tolerance = 0.02\n astro_generator = Generator(1e9, source='astrophysical')\n counts = Counter(astro_generator.get_particle_type()\n for _ in range(total))\n assert (counts[...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a schema return True if this type of distinction is valid for the schema. Default is True. Should be overridden if there are any schemas a distinction is not valid for.
def isValidForSchema(schema): return True
[ "def is_a_dde_schema(self, schema):\n return schema in self.registered_dde_schemas", "def compatibleSchema(self,\n schema: schemaconverter.TDXSchema,\n raise_error: bool = True\n ) -> bool:\n db_tdx_schema = self.tdx_schema\n # see https://stackoverflow.com/a/41579450/1014916...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Matrix multiplication of chains of square matrices
def chain_matmul_square(As): As_matmul = As while As_matmul.shape[0] > 1: if As_matmul.shape[0] % 2: A_last = As_matmul[-1:] else: A_last = None As_matmul = torch.matmul(As_matmul[0:-1:2], As_matmul[1::2]) if A_last is not None: As_ma...
[ "def matrix_chain_multiply(A: List[np.ndarray], s: List[List[int]], i: int, j: int) -> np.ndarray:\n if i == j:\n return A[i]\n if i + 1 == j:\n return np.dot(A[i], A[j])\n Ak = matrix_chain_multiply(A, s, i, s[i][j])\n Ak1 = matrix_chain_multiply(A, s, s[i][j]+1, j)\n prod = np.dot(Ak,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print Bento details by providing the bento_tag. \b
def get(bento_tag: str, output: str) -> None: # type: ignore (not accessed) bento = bento_store.get(bento_tag) if output == "path": console.print(bento.path) elif output == "json": info = json.dumps(bento.info.to_dict(), indent=2, default=str) console.print_...
[ "def print_entity(entity):\n print 'entity.original_text:', entity.original_text\n print 'entity.display_text:', entity.display_text\n print 'entity.display_html:', entity.display_html\n print 'entity.start_index:', entity.start_index\n print 'entity.end_index:', entity.end_index", "def print_tags(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
List Bentos in local store \b show all bentos saved $ bentoml list \b show all verions of bento with the name FraudDetector $ bentoml list FraudDetector
def list_bentos(bento_name: str, output: str) -> None: # type: ignore (not accessed) bentos = bento_store.list(bento_name) res = [ { "tag": str(bento.tag), "path": display_path_under_home(bento.path), "size": human_readable_size(calc_dir_size(...
[ "def view_command():\n list1.delete(0,END)\n for row in AppbookstoredbBACKEND.view_data():\n list1.insert(END,row)", "async def list(self, ctx: commands.Context, name: str = None):\n if not name:\n try:\n data = self.memory[ctx.guild.id]\n except KeyError:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Export a Bento to an external file archive \b
def export(bento_tag: str, out_path: str) -> None: # type: ignore (not accessed) bento = bento_store.get(bento_tag) out_path = bento.export(out_path) logger.info("%s exported to %s.", bento, out_path)
[ "def export_obo(path_to_file, connection=None):\n db = DbManager(connection)\n db.export_obo(path_to_export_file=path_to_file)\n db.session.close()", "def archive(po_filename, bl_filename):\n\n # Store archive in same dir as this script\n root = os.path.abspath(os.path.dirname(sys.argv[0]))\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import a previously exported Bento archive file \b
def import_bento_(bento_path: str) -> None: # type: ignore (not accessed) bento = import_bento(bento_path) logger.info("%s imported.", bento)
[ "def import_archive(self):\n if self.archive:\n archive = IrkruTildaArchive(self.archive, material=self)\n archive.process()", "def import_into_beets(self):\n # TODO: Rework this and properly call the beets API.\n os.system(f'beet import {self.downloader.temp_path.name}'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pull Bento from a yatai server.
def pull(bento_tag: str, force: bool) -> None: # type: ignore (not accessed) yatai_client.pull_bento(bento_tag, force=force)
[ "def pull():", "def pull_from_postmaster(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Destiny2/Actions/...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Push Bento to a yatai server.
def push(bento_tag: str, force: bool, threads: int) -> None: # type: ignore (not accessed) bento_obj = bento_store.get(bento_tag) if not bento_obj: raise click.ClickException(f"Bento {bento_tag} not found in local store") yatai_client.push_bento(bento_obj, force=force, threads=threa...
[ "def push(context_service: ContextService):\n cli_output: CliOutput = context_service.get_cli_output()\n cli_output.info(\"Pushing changes to remote...\")", "def push(self, obj):\n pass", "def push(self):\n logger.debug('PUSHING...')\n self._rest()\n self._trigger(self.STATE.PU...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Build a new Bento from current directory.
def build(build_ctx: str, bentofile: str, version: str) -> None: # type: ignore (not accessed) if sys.path[0] != build_ctx: sys.path.insert(0, build_ctx) build_bentofile(bentofile, build_ctx=build_ctx, version=version)
[ "def newproject(self):\n \n self.path = os.path.join(self.base, self.name)\n subpath = os.path.join(self.path, self.lowname)\n check_build_path(subpath)\n \n for filename, content in self.files.items():\n self.buildfile(filename, content, self.path)\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the value. (And calls the base class) This will also check for Options to set the bools. FAULTS_ACTIVE FAULTS_CURRENT >>> BIT_FAULT_PROBE = 0 >>> BIT_FAULT_OVERTEMP = 1 >>> BIT_FAULT_PANEL_OPEN = 2 >>> BIT_FAULT_HIGH_VOLTAGE = 3 >>> BIT_FAULT_RAM_CRC = 4 >>> BIT_FAULT_EEPROM_CRC = 5 >>> BIT_FAULT_GPIO_ERROR = 6 >>>...
def set_value(self, item, value): super(t_16_Bit_Options, self).set_value(item, value) if(item == t_16_Bit_Options.FAULT_ACTIVE): self.set_bools(value, self.faults_current, t_16_Bit_Options.BIT_FAULT_MAX ) if(item == t_16_Bit_Options.FAULT_LATCHED): self.set_bools(value...
[ "def set(self, value): # interface for BlueSky plans\n if str(value).lower() not in (\"fly\", \"taxi\", \"return\"):\n msg = \"value should be either Taxi, Fly, or Return.\"\n msg + \" received \" + str(value)\n raise ValueError(msg)\n\n if self.busy.value:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the value. (And calls the base class) This will also check for Options to set the bools. BOOLEAN_CONFIG_1 >>> BIT_PROBE_TERMINATION = 0 >>> BIT_TMODE = 1 >>> BIT_EMODE = 2 >>> BIT_MUTE = 3 >>> BIT_PATTERN_TRIGGER = 4 >>> BIT_DEBUG_REALTIME = 5 >>> BIT_DEBUGPRINT = 6 >>> BIT_DEBUG_HW_OVERRIDE = 7
def set_value(self, item, value): super(t_8_Bit_Options, self).set_value(item, value) if(item == t_8_Bit_Options.BOOLEAN_CONFIG_1): self.set_bools(value, self.bools, t_8_Bit_Options.BIT_MAX)
[ "def setbool(self, strcommand, value):\n command = ct.c_wchar_p(strcommand)\n value = ct.c_bool(value)\n self.lib.AT_SetBool(self.AT_H, command, value)", "def set_bool_node_value(node_name,value):\n\n\timport Mgmt\n code, msg = Mgmt.set((node_name,'bool', value))\n return code,m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Builds a command packet
def build_command_packet(self, command): packet = bytearray() # All option fields are 0 packet.append(0) packet.append(0) packet.append(0) packet.append(command) return packet
[ "def _build_command(self, command_name, hardware_address = '', comp_var_dict = None):\n # Start command adn set name\n command = \"<Command><Name>{command_name}</Name>\".format(command_name=command_name)\n\n if hardware_address:\n command += \"<DeviceDetails><HardwareAddress>{hardwar...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This will get the current faults on the system.
def get_faults_current(self): request = self.get_option_from_shouter([t_16_Bit_Options.FAULT_ACTIVE], BP_TOOL.REQUEST_16) return self.__get_faults_list(self.config_16.faults_current)
[ "def get_faults(self):\n status = self.get_status()\n return [k for k in status if k.endswith('_FAULT') and status[k]]", "def get_faults(self):\n try:\n faults_parent = self.rootelement.findall(\"{\"+self.xmlns+\"}Faults\")[0]\n self.faults = faults_parent.findall(\"{\"+...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This will get the latched faults on the system.
def get_faults_latched(self): request = self.get_option_from_shouter([t_16_Bit_Options.FAULT_LATCHED], BP_TOOL.REQUEST_16) return self.__get_faults_list(self.config_16.faults_latched)
[ "def get_faults(self):\n status = self.get_status()\n return [k for k in status if k.endswith('_FAULT') and status[k]]", "def get_faults_current(self):\n request = self.get_option_from_shouter([t_16_Bit_Options.FAULT_ACTIVE], BP_TOOL.REQUEST_16)\n return self.__get_faults_list(self.con...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the pattern wave pat_wave 101011110011 .... >>> Request >>> 0> >>> Pattern Wave [More to follow] >>> >> Request Next block >>> 0> >>> Pattern Wave [More to follow] >>> >> >>> ..... >>> >>> Request Next block >>> 0> >>> Pattern Wave [No More to follow] >>> <)
def __request_pat_wave(self, r_number): packet = bytearray() packet.append(0) # 16 bit options packet.append(0) # 8 bit options packet.append(1) # Request the 1 option # --------------------------------------------------------------------- # Request the variable length ...
[ "def waveband(self):\n return self.get(\"waveband\", default=\"\", decode=True).split(\"#\")", "def _wave(self):\n try:\n return wave.open(StringIO(self.contents))\n except wave.Error, err:\n err.message += \"\\nInvalid wave file: %s\" % self\n err.args = (err...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The resource ID of the Network Fabric l3IsolationDomain.
def l3_isolation_domain_id(self) -> pulumi.Input[str]: return pulumi.get(self, "l3_isolation_domain_id")
[ "def l3_isolation_domain_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"l3_isolation_domain_id\")", "def l3_id(self):\n return self._l3_id", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def cluster_resource_id(self) -> str:\n return pulu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The default interface name for this L3 network in the virtual machine. This name can be overridden by the name supplied in the network attachment configuration of that virtual machine.
def interface_name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "interface_name")
[ "def get_default_iface_name():\n return netifaces.gateways()['default'][netifaces.AF_INET][1]", "def interface_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"interface_name\")", "def l3_network_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The name of the L3 network.
def l3_network_name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "l3_network_name")
[ "def name(self) -> str:\n return self.__configuration['network']['name']", "def computer_network_name(self) -> str:\n return self._computer_network_name", "def network_instance_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"network_instance_name\")", "def name(self...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get an existing L3Network resource's state with the given name, id, and optional extra properties used to qualify the lookup.
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'L3Network': opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = L3NetworkArgs.__new__(L3NetworkArgs) __props__.__dict__["associated_res...
[ "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Layer':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = LayerArgs.__new__(LayerArgs)\n\n __props__.__dict__[\"attributes\"]...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The list of resource IDs for the other Microsoft.NetworkCloud resources that have attached this network.
def associated_resource_ids(self) -> pulumi.Output[Sequence[str]]: return pulumi.get(self, "associated_resource_ids")
[ "def resource_ids(self):\n return self._resource_ids", "def network_ids(self):\n return self._network_ids", "def get_resource_identifiers(self):\n return self.__resourceIdentifiers", "def resource_share_ids(self):\n return self._resource_share_ids", "def resource_list(self):\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The more detailed status of the L3 network.
def detailed_status(self) -> pulumi.Output[str]: return pulumi.get(self, "detailed_status")
[ "def status(ctx):\n return show_network_status()", "def status(self):\n res = \"\"\n for tlight in self.trafficLights:\n res += \"Traffic light {} status: {}\\n\".format(self.trafficLights[tlight].id,self.trafficLights[tlight].getState())\n return res", "def status(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The extended location of the cluster associated with the resource.
def extended_location(self) -> pulumi.Output['outputs.ExtendedLocationResponse']: return pulumi.get(self, "extended_location")
[ "def cluster_extended_location(self) -> pulumi.Output['outputs.ExtendedLocationResponse']:\n return pulumi.get(self, \"cluster_extended_location\")", "def cluster_location(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"cluster_location\")", "def cluster_location(self) -> Option...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The default interface name for this L3 network in the virtual machine. This name can be overridden by the name supplied in the network attachment configuration of that virtual machine.
def interface_name(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "interface_name")
[ "def get_default_iface_name():\n return netifaces.gateways()['default'][netifaces.AF_INET][1]", "def interface_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"interface_name\")", "def l3_network_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"l3...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The type of the IP address allocation, defaulted to "DualStack".
def ip_allocation_type(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "ip_allocation_type")
[ "def ip_address_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_address_type\")", "def _get_address_type(self):\n return self.__address_type", "def IpType(self):\n\t\treturn self._get_attribute('ipType')", "def address_type(self):\n return self._address_type", "def...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The resource ID of the Network Fabric l3IsolationDomain.
def l3_isolation_domain_id(self) -> pulumi.Output[str]: return pulumi.get(self, "l3_isolation_domain_id")
[ "def l3_isolation_domain_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"l3_isolation_domain_id\")", "def l3_id(self):\n return self._l3_id", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def cluster_resource_id(self) -> str:\n return pulum...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
join the input string
def my_join(iters, string): out = "" for i in range(iters): out += "," + string return out
[ "def join(self, iterable): # real signature unknown; restored from __doc__\n return \"\"", "def join(self, iterable) -> String:\n pass", "def join_strings(words):\n joined_string = ''\n for word in words:\n joined_string += word\n\n return joined_string", "def my_join(iters, string):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds the keys 'logits' and 'probs' to the end points dictionary of ResNet50v2.
def _get_updated_endpoints(original_end_points, name): end_points = dict(original_end_points) end_points['logits'] = tf.squeeze(end_points[name], [1, 2]) end_points['probs'] = tf.nn.softmax(end_points['logits']) return end_points
[ "def extend_network_dict(self, session, base_model, result):\n self._call_on_dict_driver(\"extend_network_dict\", session, base_model,\n result)", "def augment(self):\n n1 = { 'edges': [ self.next_insert['pred'], self.next_insert ], 'pred': self.next_insert['pred'] }...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load weights from a checkpoint file into the tensorflow graph.
def load_weights(self, checkpoint_path, sess=None): if sess is None: sess = tf.get_default_session() assert sess is not None saver = tf.train.Saver(self.variables_to_restore) saver.restore(sess, checkpoint_path)
[ "def load(self):\n latest = tf.train.latest_checkpoint(self.checkpoint_dir)\n self.model.load_weights(latest)", "def load_weights(model, checkpoint_path):\n # Your code here\n \n model.load_state_dict(torch.load(checkpoint_path))\n model.eval()", "def load_weights(self, path=None):\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load weights from a checkpoint file into the tensorflow graph.
def load_weights(self, checkpoint_path, sess=None): if sess is None: sess = tf.get_default_session() assert sess is not None saver = tf.train.Saver(self.variables_to_restore) saver.restore(sess, checkpoint_path)
[ "def load(self):\n latest = tf.train.latest_checkpoint(self.checkpoint_dir)\n self.model.load_weights(latest)", "def load_weights(model, checkpoint_path):\n # Your code here\n \n model.load_state_dict(torch.load(checkpoint_path))\n model.eval()", "def load_weights(self, path=None):\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Warn about unused static variables.
def _find_unused_static_warnings(filename, lines, ast_list): static_declarations = { node.name: node for node in ast_list if (isinstance(node, ast.VariableDeclaration) and 'static' in node.type.modifiers) } def find_variables_use(body): for child in body: ...
[ "def unusedVars(self):\n fullcode = self.code_cfg\n variables = set([x[1:] for x in codeconfig_getvars(fullcode)])\n exceptions = set(['complexity', 'code_cfg'])\n clsvars = set(vars(self).keys())\n nones = set(filter(lambda x: self.__dict__[x] is None, clsvars))\n nones = ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the parsed contents of the config file.
def get_config(): return json.loads(CONFIG_FILE.read_text())
[ "def read(self):\n if self.default_file:\n self.read_default_config()\n return self.read_config_files(self.all_config_files())", "def read_config():\n\n\tfilename = \"config.json\"\n\n\tfile_object = open(filename, \"r\")\n\n\treturn json.loads(file_object.read())", "def _get_config(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
seed users. by defualt set to 5 users
def seed_User(number=5, overwrite=False): if overwrite: print('Overwriting all users') User.objects.all().delete() count = 0 for i in range(number): username = fake.first_name() User.objects.create_user( email=username + "@blogmail.com", password="vns...
[ "def populate(self, nbUsers):\n users = []\n f = faker.Faker()\n\n for i in range(nbUsers):\n user, addr = self.create_user(f.name(), f.address())\n users.append(user)\n\n self.session.add_all(users)\n self.session.commit()", "def generate_users(count=10):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
set_score increments the score by change can be negative
def set_score(self, change): self._score = self._score + change
[ "def set_score(self, score: float):\n self.score = score", "def set_score(self, score):\n self._score = score", "def update_score():\n pass", "def increase_score(self, score):\r\n self.score += score", "def set_score(self, score):\n self.score_function = score", "def sco...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
move_ray this is the primary function which is responsible for recursively moving a ray. Although it primarily look after the action of the Ray.Ray class it lives in the Game instance itself. THIS IS HOW WE DETERMINE THE EXIT POINT OF ALL RAYS HORIZONTAL, VERTICAL, OR WITH DETOURS
def move_ray(self, ray): # look to the next spot in the ray's trajectory next_coordinates = ray.get_next_location() next_location = self._board.get_board_square(next_coordinates) # check for a collisition - return if it occurs if ray.check_for_collision(next_location): ...
[ "def shoot_ray(self, row, column):\n # check if row/column is an allowed entry point\n if (row, column) not in self._allowed_entry_points:\n return False\n\n # add entry to entry/exit point list and deduct point if entry hasn't already been used\n if (row, column) not in self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
shoot_ray shoots a ray from a given row and column if possible
def shoot_ray(self, origin_row, origin_column): # get the the square object at row x column origin = self._board.get_board_square((origin_row, origin_column)) # check that it is a valid "edge" to send a ray from origin_check = origin.is_edge() # if it's not then return false ...
[ "def shoot_ray(self, row, column):\n\n # check if ray is being shot from corner square\n if (row == 0 or row == 9) and (column == 0 or column == 9):\n return False\n\n # check if ray is being shot from non-border square\n if row in range(1, 9) and column in range(1, 9):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
atoms_left returns the number of unguessed atoms still left
def atoms_left(self): return len(self._atoms)
[ "def atoms_left(self):\n return self._atoms_remaining", "def _get_mark_count_left(self):\n return self._number_of_bombs - sum([sum([1 for c in row if c.is_marked]) for row in self._cells])", "def num_pieces_left(self):\n return self.num_white_pieces + self.num_black_pieces", "def get_num_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test GRU gnmt encoder. time_major=True
def runGRUEncoder(self, encoder, num_layers): inputs_ph = tf.placeholder( dtype=tf.float32, shape=(None, common_utils.TIME_STEPS, common_utils.DEPTH)) inputs_length_ph = tf.placeholder(dtype=tf.int32, shape=(None)) outputs, states = encoder.encode( mode=tf.estimator.ModeKeys.TRAIN, ...
[ "def test_agilent_2d_rnmrtk():\n # prepare agilent converter\n vdic, vdata = ng.varian.read(os.path.join(DATA_DIR, \"agilent_2d\"))\n uvdic = ng.varian.guess_udic(vdic, vdata)\n vC = ng.convert.converter()\n vC.from_varian(vdic, vdata, uvdic)\n\n # prepare rnmrtk converter\n rdic, rdata = ng.rn...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a CourseGraph, fetching unitary weights and edge weights from database, creating CourseNodes for each course, and
def __init__(self, database, session, max_suggestions=5, max_courses=30, cache_mult=4): self._nodes = dict() # dict with courseid keys, CourseNode vals self._max_suggestions = max_suggestions self._max_courses = max_courses self._cache_mult = cache_mult db = database ...
[ "def build_computational_graph():\n pass", "def _initilise_graph_db(self):\n for collector in self.collectors:\n collector.init_graph_db()", "def build_graph():\n file = open(\"../data/data.json\", \"r\")\n data = json.load(file)\n node_dict = {}\n for id in data:\n n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the crosslistings of the top edges from a course
def getTopEdgesFrom(self, session, courseid): node = self.getNode(courseid) # get CourseNode if not node: return [] edges = node.getEdges() # get its Edge dict return sorted(edges.keys(), key=lambda k: edges[k], reverse=True)[:5]
[ "def assembly_courses(wall):\n courses = []\n vertices = set(wall.nodes())\n base = set(wall.nodes_where({'is_support': True}))\n\n if base:\n courses.append(list(base))\n\n seen = set()\n seen.update(base)\n\n vertices -= base\n\n while vertices:\n nbrs = s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes turtle instance for turtle game.
def initialize(turtle_shape, bg_color, turtle_color, turtle_speed): turtle_instance = turtle.Turtle() turtle_instance.shape(turtle_shape) turtle.bgcolor(bg_color) turtle_instance.color(turtle_color) turtle_instance.speed(turtle_speed) return turtle_instance
[ "def init_turtle():\n turtle.up()\n turtle.home()", "def __init__(self):\r\n turtle.setup()\r\n turtle.screensize(100000, 100000)\r\n self.__risi_pot = turtle.Turtle()\r\n self.__risi_prijatelje = turtle.Turtle()\r\n self.__risi_pot.color('red')\r\n self.__risi_pot....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Defines the turtle movement for the initialized turtle instance and executes that movement.
def turtle_movement(turtle_shape, bg_color, turtle_color, turtle_speed): turtle_name = initialize(turtle_shape, bg_color, turtle_color, turtle_speed) for i in range(36): for i in range(4): turtle_name.forward(200) turtle_name.right(90) turtle...
[ "def setTurtle(t):\r\n t.pu()\r\n t.goto(initialCoordinates())", "def init_turtle():\n turtle.up()\n turtle.home()", "def movement(self):", "def __init__(self, commands=[], turtle_name=\"Terry\", speed=6, shape=\"classic\"):\n super().__init__()\n turtle.colormode(255)\n self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Saves summary statistics as a csv file in the current directory and returns the output filename.
def save_summary_statistics_csv( experiment_name, roi_summary_data, save_directory_path: str = "" ): # Create directories on the path if they don't already exist Path(save_directory_path).mkdir(parents=True, exist_ok=True) csv_filename = f"{experiment_name} - summary statistics (generated {iso_datetime...
[ "def save_csv(dir_out, no_of_files, result):\n try:\n np.savetxt(f\"{dir_out}_results_from_{no_of_files}-files.csv\",\n result.T, delimiter=\",\", header='Time(h), Avrg_int, SD, SE, Sum_int, Max_int')\n except:\n print(\"Existing csv file is not accessible!\")\n exit()",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
stack pandas DataFrames logically into a bigger DataFrame, resets the index of the resulting DataFrame to avoid duplicates in the index
def _stack_dataframes(dataframes: List[pd.DataFrame]) -> pd.DataFrame: return pd.concat(dataframes).reset_index(drop=True)
[ "def split_and_stack(df,new_names):\n\n half = int(len(df.columns)/2)\n left = df.iloc[:, :half]\n right = df.iloc[:,half:]\n\n return pd.DataFrame(data = np.vstacks([left.values, right.values], columns = new_names))", "def make_sub_df(src_df, index_col, cols):\n cols.append(index_col)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
stack pandas Series logically into a DataFrame
def _stack_serieses(serieses: List[pd.Series]) -> pd.DataFrame: return pd.concat(serieses, axis="columns").T
[ "def stack(*series):\n _timeseriescompat_multiple(*series)\n return time_series(MA.column_stack(series), series[0]._dates,\n **_attrib_dict(series[0]))", "def series_to_frame(series: pd.Series, new_col_names: Dict[Any, Any]) -> pd.DataFrame:\n return series.to_frame().reset_index()....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load instruments from configpath
def _load(self) -> list[Instrument]: logger.info("Loading config...") self._config = yml.load(self.configpath) instruments, modespec = self._config["instruments"], self._config["modes"] logger.success(f"Found {len(instruments)} instruments, {len(modespec)} modes")
[ "def test_load_configs_simulation(self):\n global locator, config_paths\n locator.load_config(config_paths[1])\n\n self.assertEqual(locator.config['routines'], ['simulate'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'SimDriver',\n 'kwargs': {...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Expose unique instrument classes found in config
def _expose(self) -> None: classes = {instrument.__class__ for instrument in self._config["instruments"]} for class_ in classes: pyro.expose(class_) logger.success(f"Exposed {len(classes)} instrument class(es): {classes}")
[ "def config(self) -> InstrumentConfig:\n ...", "def configure_instrumented_models(self):\n # Expose Pyramid configuration to classes\n from websauna.system.model.meta import Base\n Base.metadata.pyramid_config = self.config", "def instrument_configs(self) -> list:\n from .rss ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Register instrument instances and self with daemon and storing uris
def _serve(self) -> None: for instrument in self._config["instruments"]: uri = self._daemon.register(instrument, objectId=str(instrument)) self._services[instrument.id] = str(uri) logger.success(f"Registered {instrument} at {uri}") self.uri = self._daemon.register(sel...
[ "def register():\n signals.initialized.connect(initialize)\n signals.article_generator_context.connect(add_libravatar)", "def _Register(self):\r\n self._persistor.AddHandler(self)", "def __init__(self, instrument):\n endpoint = self.ENDPOINT.format(instrument=instrument)\n super(Instrumen...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Disconnect instruments and shutdown daemon
def shutdown(self) -> None: logger.info("Disconnecting instruments...") for instrument in self._config["instruments"]: instrument.disconnect() logger.info(f"Shutting down {self}...") self._daemon.shutdown()
[ "def shutdown(self):\n os.remove('/tmp/mimic_daemon')\n for address, p in self._connections.iteritems():\n if not p.returncode:\n p.terminate()\n self.daemon.shutdown()", "def stopAndDisconnectWalabot():\n wlbt.Stop()\n wlbt.Disconnect()\n print ('Terminatio...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
python ~/code/xdoctest/testing/test_linenos.py test_lineno_failcase_called_code python ~/code/xdoctest/testing/test_linenos.py
def test_lineno_failcase_called_code(): text = _run_case(utils.codeblock( r''' def func(a): """ Example: >>> func(0) >>> # this doesnt do anything >>> print('this passes') this passes >>> # call t...
[ "def test_expected_failures(modpath, expected_failure):\n code = os.path.dirname(expected_failure)\n retcode, out = flake8(join(modpath, expected_failure))\n assert retcode, \"expected failure (%s), got success\" % code\n needle = \": %s \" % code\n assert needle in out\n\n with open(os.path.join(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add to the list of describing adjectives.
def add_adjectives(self, adjective): self.adjectives += [adjective]
[ "def add_adjectives(self, *sAdjs):\n self.adjectives += list(sAdjs)", "def add_adnotation(self, adnotation):\n adnotations = list([self.decoding_dict[v] for v in self.get_item_list()])\n adnotations.append(adnotation)\n adnotations.sort(key = lambda item: item.pos)\n self.decodi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the list of describing adjectives. The list is shuffled first because generally this is used to get a random adjective.
def get_adjectives(self): random.shuffle(self.adjectives) return self.adjectives
[ "def get_adjectives(lyrics):\n doc = nlp(lyrics.lower())\n all_adjectives = [token.lemma_ for token in doc if token.pos_ == \"ADJ\"]\n return all_adjectives", "def adjectives_sorted(lyrics):\n adjectives = get_adjectives(lyrics)\n sorted_adjectives = Counter(adjectives)\n return sorted_adjective...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the noun, including all its describing adjectives, as a string.
def full_string(self): return "{}: {}".format(str(self.word), " ".join([str(adj) for adj in self.adjectives]))
[ "def sentence():\r\n return nounPhrase() + \" \" + verbPhrase()", "def getNouns(self):\n return self.nouns", "def nounPhrase():\r\n return random.choice(articles) + \" \" + random.choice(nouns)", "def replaceNouns(self):\n textacy.extract.named_entities\n return self.sentence", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse a noun object from a data file containing nouns and their describing adjectives.
def parse(text): parts = text.split(' ') noun = Noun(parts[0], int(parts[1])) parts = parts[2:] while len(parts) > 0: noun.add_adjectives(Word(parts[0], int(parts[1]))) parts = parts[2:] return noun
[ "def extractNouns(filepath, debug=False):\n try:\n text = open(filepath).read()\n except:\n print(\"No such file found. Aborting...\")\n exit()\n \n is_noun = lambda pos: pos[:2] == 'NN'\n # do the nlp stuff\n tokenized = nltk.word_tokenize(text)\n nouns = [word for (word, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the self.guessed_by and self.metaphors_used data as a readable string.
def get_str_metadata(self): return "\n".join(["Guessed by {}".format(self.guessed_by), "{} metaphors used".format(self.metaphors_used)])
[ "def to_strings(self):\n str1 = \"Matches: {0}\".format(self.matches)\n str2 = \"Inliers: {0}\".format(self.inliers)\n str3 = \"Inlier ratio: {0:.2f}\".format(self.ratio)\n str4 = \"Keypoints: {0}\".format(self.keypoints)\n str5 = \"FPS: {0:.2f}\".format(self.fps)\n return ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Quick plot of a `tick.base.TimeFunction`
def plot_timefunction(time_function, labels=None, n_points=300, show=True, ax=None): if ax is None: fig, ax = plt.subplots(1, 1, figsize=(4, 4)) else: show = False if time_function.is_constant: if labels is None: labels = ['value = %.3g' % time_func...
[ "def test_plot_time_data():\n fig, ax = GlobalData.plot_time_data(timeStart=-1e-3, timeEnd=1e-3, units='ms', show_fig=False)\n return fig", "def cistime_py():\n timing.plot_scalings(compare='python')", "def plotTime(data,rate):\n t = np.arange(len(data))*1.0/rate\n \n #Plot time domain\n pl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates mapping from water measurements column names to indices of the given header.
def get_water_index_map(archive, header): column_re = { 'surface': { 'flow': 'pretok', 'level': 'vodostaj' }, 'ground': { 'altitude': 'nivo', 'level': 'vodostaj' } } column_map = {key: -1 for key in column_re[archive].keys()} ...
[ "def indices(header):\n return dict((n,i) for i,n in enumerate(header))", "def _create_field_header_index_dictionary(header):\n field_header_index_dict = {}\n for name in header:\n if name in call_data_field_dict.keys():\n field_header_index_dict[name] = header.index(name)\n\n return...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }