query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Load a saved DQN instance. In order to load a saved model, initialize the `DQN` class as usual (since some properties like model structure need to be defined through `__init__`), and then call this method, i.e. ``` dqn = DQN(...) dqn.load_model(save_path, custom_objects={}) ``` After this the weights of Q networks (`q_...
def load( self, save_path: str ): # load Q models and optimizer manager = tf.train.CheckpointManager( checkpoint=self.checkpoint, directory=save_path, max_to_keep=3 ) self.checkpoint.restore(manager.latest_checkpoint) # load hyperparameters ...
[ "def load_trained_DQN(self, path):\r\n\r\n trained_file = pickle.load(open(path, 'rb'))\r\n model = trained_file['model']\r\n print \"Trained DQN Parameters:\", json.dumps(trained_file['params'], indent=2)\r\n return model", "def load_trained_DQN(self, path):\r\n\r\n trained_fil...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save the weights of Q networks (`q_model` and `q_model_target`), the state of the optimizer, hyperparameters of the DQN instance and record of loss values. Note that `self.epsilon` will be stored as a single value, i.e. `self.epsilon(self.train_steps)`.
def save(self, save_path='training_savings'): if not os.path.exists(save_path): os.makedirs(save_path, exist_ok=True) # save weights of Q networks and the optimizer manager = tf.train.CheckpointManager( checkpoint=self.checkpoint, directory=save_path, max_to_keep=3 ...
[ "def save_model(self):\n with open(self.model_dir + \".pkl\", \"wb\") as f: # Save Q-network as pickle\n pickle.dump(self.agent._target_q_network, f)", "def save(self) -> None:\n # Make directory if it doesn't exist yet\n pathlib.Path(\"saved_models/\").mkdir(parents=True, exist_o...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
retrieve frozen node data
def get_frozen_node(user, data, pname): r = requests.get('%s/files/byProjectPathname/%s' % (ida_api_url, pname), json=data, auth=(user, ida_api_pass), verify=False) return r.status_code, r.json()
[ "def get_frozen(ctx):\n # TODO: Use newest helper from pynyzo\n url = \"{}/frozenEdge\".format(ctx.obj['client'])\n if VERBOSE:\n app_log.info(f\"Calling {url}\")\n res = get(url)\n if VERBOSE:\n app_log.info(res)\n if path.isdir(\"tmp\"):\n # Store for debug purposes\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
retrieve frozen node data associated with action
def get_frozen_node_action(user, pid): r = requests.get('%s/files/action/%s' % (ida_api_url, pid), auth=(user, ida_api_pass), verify=False) return r.status_code, r.json()
[ "def get_frozen_node(user, data, pname):\n r = requests.get('%s/files/byProjectPathname/%s' % (ida_api_url, pname), json=data, auth=(user, ida_api_pass), verify=False)\n return r.status_code, r.json()", "def getActionInfo(action_chain, check_visibility=0, check_condition=0):", "def ex_node_action(self, no...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
SET and RESUME enter controls allowed on their falling edge.
def test_set_resume_buttons(self): for btn in range(8): self.safety.set_controls_allowed(0) for _ in range(10): self._rx(self._button_msg(btn)) self.assertFalse(self.safety.get_controls_allowed()) # should enter controls allowed on falling edge if btn in (Buttons.RES_ACCEL, ...
[ "def valveControl():\n cageSet = AHF_CageSet()\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(cageSet.rewardPin, GPIO.OUT, initial=GPIO.LOW)\n runLoop(cageSet)", "def control(self, car):\n\n keys = pygame.key.get_pressed()\n if keys[K_ESCAPE]:\n return True\n\n #...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Solve min_x ||x||_1 s.t. Ax=y, and compute err = ||xtrue_x||_2.
def l1_min_err(A, y, true_x): emb_dim, input_dim = A.shape model = Model() model.params.outputflag = 0 # disable solver output x = [] for i in xrange(input_dim): x.append(model.addVar(lb=-GRB.INFINITY, ub=GRB.INFINITY, obj=0)) for i in xrange(input_dim): x.append(model.addVar(l...
[ "def min_error(self):\n \n res = self.y[0] - self.y_line[0]\n if res < 0:\n res = res * -1\n\n min_error_var = res\n\n for j in range(self.y.size-1):\n res = self.y[j] - self.y_line[j]\n if res < 0:\n res = res * -1\n\n if...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run l1_min for each sample, and compute the RMSE. true_X is a 2D csr_matrix with shape=(num_sample, input_dim).
def l1_min_avg_err(A, Y, true_X, use_pos=False, eps=1e-10): num_sample = Y.shape[0] num_exact = 0 # number of samples that are exactly recovered num_solved = num_sample # number of samples that successfully runs l1_min err = 0 for i in xrange(num_sample): y = Y[i, :].reshape(-1,) x...
[ "def l1_min_err(A, y, true_x):\n emb_dim, input_dim = A.shape\n model = Model()\n model.params.outputflag = 0 # disable solver output\n x = []\n for i in xrange(input_dim):\n x.append(model.addVar(lb=-GRB.INFINITY, ub=GRB.INFINITY, obj=0))\n for i in xrange(input_dim):\n x.append(m...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform CoSaMP with blocksparsity model. Blocksparsity signals are defined in Section VI of the paper "Modelbased compressive sensing".
def CoSaMP_block_sparsity(A, y, true_x, block_dim, sparsity_level, eps=1e-10, use_pos=False): emb_dim, input_dim = A.shape x = np.zeros(input_dim) residual = y - np.dot(A, x) err = np.linalg.norm(x-true_x) current_supp_idx = None # block indices of current estimation's sup...
[ "def assemble_ss(self, wake_prop_settings=None):\n\n cout.cout_wrap('\\tBlock form state-space realisation of UVLM equations started...', 1)\n t0 = time.time()\n MS = self.MS\n K, K_star = self.K, self.K_star\n Kzeta = self.Kzeta\n\n # --------------------------------------...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run CoSaMP_block_sparsity for each sample, and compute the RMSE. true_X is a 2D csr_matrix with shape=(num_sample, input_dim).
def CoSaMP_block_avg_err(A, Y, true_X, block_dim, sparsity_level, eps=1e-10, use_pos=False): num_sample = Y.shape[0] num_exact = 0 # number of samples that are exactly recovered num_solved = num_sample # number of samples that successfully runs CoSaMP ...
[ "def CoSaMP_block_sparsity(A, y, true_x, block_dim, sparsity_level,\n eps=1e-10, use_pos=False):\n emb_dim, input_dim = A.shape\n x = np.zeros(input_dim)\n residual = y - np.dot(A, x)\n err = np.linalg.norm(x-true_x)\n current_supp_idx = None # block indices of current estim...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Perform CoSaMP with onehotsparsity model. Here 'onehotsparsity' refers to the sparsity structure of onehot encoded data. The onehotsparsity model is specified by 'feature_indices'. It is a 1D array storing the index ranges of each categorical feature.
def CoSaMP_onehot_sparsity(A, y, true_x, feature_indices, eps=1e-10, use_pos=False): emb_dim, input_dim = A.shape x = np.zeros(input_dim) residual = y - np.dot(A, x) err = np.linalg.norm(x-true_x) current_supp_idx = None # feature indices of current estimation's support ...
[ "def sparse_one_hot(indices, dense_shape, dtype=dtypes.float32):\n flat_indices = to_tensor_cast(indices, dtypes.int64)\n indices = batch_to_matrix_indices(flat_indices, dtype=dtypes.int64)\n\n return sparse_ones(indices, dense_shape, dtype)", "def indexes_to_one_hot(self, indexes):\n indexes = in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Run CoSaMP_onehot_sparsity for each sample, and compute the RMSE. true_X is a 2D csr_matrix with shape=(num_sample, input_dim).
def CoSaMP_onehot_avg_err(A, Y, true_X, feature_indices, eps=1e-10, use_pos=False): num_sample = Y.shape[0] num_exact = 0 # number of samples that are exactly recovered num_solved = num_sample # number of samples that successfully runs CoSaMP err = 0 for i in xrange(num_s...
[ "def CoSaMP_onehot_sparsity(A, y, true_x, feature_indices,\n eps=1e-10, use_pos=False):\n emb_dim, input_dim = A.shape\n x = np.zeros(input_dim)\n residual = y - np.dot(A, x)\n err = np.linalg.norm(x-true_x)\n current_supp_idx = None # feature indices of current estimation'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Replaces dimensions in a list with a dictionary of overrides. Overrides should be indexed by the dimension name with values that is either a Dimension object, a string name or a dictionary specifying the dimension parameters to override.
def replace_dimensions(dimensions, overrides): replaced = [] for d in dimensions: if d.name in overrides: override = overrides[d.name] else: override = None if override is None: replaced.append(d) elif isinstance(override, basestring): ...
[ "def _update_dimensions(self, dimensions):\n if self._dimensions:\n new_dimensions = self._dimensions.copy()\n else:\n new_dimensions = {}\n if dimensions:\n new_dimensions.update(dimensions)\n\n return new_dimensions", "def _replace_dim(coords, dims, a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes the Dimension object with the given name.
def __init__(self, name, **params): if isinstance(name, Dimension): existing_params = dict(name.get_param_values()) elif (name, params.get('unit', None)) in self.presets.keys(): preset = self.presets[(str(name), str(params['unit']))] existing_params = dict(preset.get_...
[ "def dimension(self, name):\n dim_classes = {\n \"crimes\": Crimes,\n \"regions\": Regions,\n \"periods\": Periods\n }\n if name not in dim_classes.keys():\n raise Exception(\"{} is not a valid dimension. Options are {}.\"\\\n .form...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Pretty prints the dimension name and value using the global title_format variable, including the unit string (if set). Numeric types are printed to the stated rounding level.
def pprint_value_string(self, value): unit = '' if self.unit is None else ' ' + self.unit value = self.pprint_value(value) return title_format.format(name=self.name, val=value, unit=unit)
[ "def _fmt_value(x):\n if precision is not None and isinstance(x, Number):\n return str(round(x, precision))\n else:\n return str(x)", "def _tab_print_ ( t , title = '' , prefix = '' , alignment = 'll' , xfmt = '%+.5g' , yfmt = '%+-.5g' ) :\n rows = [ ('Abscis...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
All LabelledData subclasses must supply data to the constructor, which will be held on the .data attribute. This class also has an id instance attribute, which may be set to associate some custom options with the object.
def __init__(self, data, id=None, **params): self.data = data self.id = id if isinstance(params.get('label',None), tuple): (alias, long_name) = params['label'] label_sanitizer.add_aliases(**{alias:long_name}) params['label'] = long_name if isinstance(...
[ "def __init__(self,\n data,\n labels,\n fake_data=False,\n one_hot=False,\n dtype=dtypes.float32,\n seed=None):\n seed1, seed2 = random_seed.get_seed(seed)\n # If op level seed is not set, use whatever grap...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a clone of the object with matching parameter values containing the specified args and kwargs. If shared_data is set to True and no data explicitly supplied, the clone will share data with the original. May also supply a new_type, which will inherit all shared parameters.
def clone(self, data=None, shared_data=True, new_type=None, *args, **overrides): params = dict(self.get_param_values()) if new_type is None: clone_type = self.__class__ else: clone_type = new_type new_params = new_type.params() params = {k: v for k...
[ "def clone(self, **kwargs):\n\n # Sanity check\n clonekeys = set(kwargs.keys())\n objkeys = set(self.meta.keys())\n diffkeys = clonekeys - objkeys\n diffkeys.discard(\"sign\")\n\n if diffkeys:\n raise ValueError(\"Unknown field names: {}\".format(diffkeys))\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assign a new label and/or group to an existing LabelledData object, creating a clone of the object with the new settings.
def relabel(self, label=None, group=None, depth=0): keywords = [('label',label), ('group',group)] obj = self.clone(self.data, **{k:v for k,v in keywords if v is not None}) if (depth > 0) and getattr(obj, '_deep_indexable', False): for k, v in obj.items(): ...
[ "def new_labels(self, labels):\n updated_labels = copy(self.labels)\n updated_labels.update(labels)\n return self.__class__(\n key=self.metric_id,\n measurement_unit=self.measurement_unit,\n labels=updated_labels,\n *self.init_args,\n **sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A specification may be a class, a tuple or a string. Equivalent to isinstance if a class is supplied, otherwise matching occurs on type, group and label. These may be supplied as a tuple of strings or as a single string of the form "{type}.{group}.{label}". Matching may be done on {type} alone, {type}.{group}, or {type...
def matches(self, spec): if callable(spec) and not isinstance(spec, type): return spec(self) elif isinstance(spec, type): return isinstance(self, spec) specification = (self.__class__.__name__, self.group, self.label) split_spec = tuple(spec.split('.')) if not isinstance(spec, tuple) els...
[ "def match(cls, kind: 'Any') -> bool:\n return isinstance(kind, cls)", "def _is_spec(spec):\n if isinstance(spec, MakefileSpec):\n return True\n elif isinstance(spec, types.TypeType) and issubclass(spec, MakefileSpec):\n return True\n return False", "def matches(self, *args):\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Traverses any nested LabelledData object (i.e LabelledData objects containing LabelledData objects), applying the supplied function to each constituent element if the supplied specifications. The output of these function calls are collected and returned in the accumulator list. If specs is None, all constituent element...
def traverse(self, fn, specs=None, full_breadth=True): accumulator = [] matches = specs is None if not matches: for spec in specs: matches = self.matches(spec) if matches: break if matches: accumulator.append(fn(self)) # As...
[ "def get_health_checks_from_spec(self, spec):\n\n def name(check):\n assert check['Name'], (\n f'Service check for service \"{service[\"Name\"]}\" should have a name'\n )\n return check['Name']\n\n for group in spec['TaskGroups'] or []:\n for ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
When unpickled, restore the saved style and plotting options to ViewableElement.options.
def __setstate__(self, d): d = param_aliases(d) try: load_options = Store.load_counter_offset is not None if load_options: matches = [k for k in d if k.startswith('_custom_option')] for match in matches: custom_id = int(match.sp...
[ "def resetPlotOptions(self):\n self.options.plotOriginalData = True\n self.options.plotFilteredData = True\n self.options.plotSmoothedData = True\n self.options.plotPredictedData = True\n self.options.showDataPoint = False\n self.options.showFittedPoint = False\n sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Validates key dimension input Returns kdims if no dimensions are specified
def _valid_dimensions(self, dimensions): if dimensions is None: dimensions = self.kdims elif not isinstance(dimensions, list): dimensions = [dimensions] valid_dimensions = [] for dim in dimensions: if isinstance(dim, Dimension): dim = dim.name ...
[ "def _check_size_of_dimensions(self, input_shape):\n dim = input_shape[self.axis]\n if dim < self.groups:\n raise ValueError('Number of groups {} cannot be more than the number of '\n 'channels {}.'.format(self.groups, dim))\n\n if dim % self.groups != 0:\n raise ValueError(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Access a Dimension object by name or index. Returns the default value if the dimension is not found and strict is False. If strict is True, a KeyError is raised instead.
def get_dimension(self, dimension, default=None, strict=False): all_dims = self.dimensions() if isinstance(dimension, Dimension): dimension = dimension.name if isinstance(dimension, int): if 0 <= dimension < len(all_dims): return all_dims[dimension] ...
[ "def dimensionobject(self, dname, vname=None):\n if vname is None:\n try:\n return self.axes[dname]\n except KeyError:\n raise CDMSError(\"No axis named \" + dname + \" in file \" +\n self.id + \".\")\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the index of the requested dimension.
def get_dimension_index(self, dim): if isinstance(dim, Dimension): dim = dim.name if isinstance(dim, int): if (dim < (self.ndims + len(self.vdims)) or dim < len(self.dimensions())): return dim else: return IndexError('Dimension inde...
[ "def get_dimension_index(self, dimension, value):\n # Get an arbitrary dataset to look into the dimensions\n dataset = self.first_winter['u']\n\n # Get available levels for the dimension of interest\n available_levels = list(dataset[dimension][:].data)\n\n # Get the index where it...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the specified Dimension type if specified or if the dimension_values types are consistent otherwise None is returned.
def get_dimension_type(self, dim): dim_obj = self.get_dimension(dim) if dim_obj and dim_obj.type is not None: return dim_obj.type dim_vals = [type(v) for v in self.dimension_values(dim)] if len(set(dim_vals)) == 1: return dim_vals[0] else: retu...
[ "def guess_dim_type(dimension):\n\n dimclasses = {'T':_possiblet,\n 'Z':_possiblez,\n 'Y':_possibley,\n 'X':_possiblex}\n\n for dcname, dcvals in dimclasses.iteritems():\n if dimension in dcvals:\n return dcname\n\n return None", "def g...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Allows slicing or indexing into the Dimensioned object by supplying the dimension and index/slice as key value pairs. Select descends recursively through the data structure applying the key dimension selection. The 'value' keyword allows selecting the value dimensions on objects which have any declared. The selection m...
def select(self, selection_specs=None, **kwargs): # Apply all indexes applying on this object vdims = self.vdims+['value'] if self.vdims else [] kdims = self.kdims local_kwargs = {k: v for k, v in kwargs.items() if k in kdims+vdims} # Check selection_spe...
[ "def _seldict(ds, region_val, vdim_name, vdim_ind):\n\n seldict = {}\n if region_val is not None:\n seldict[\"region\"] = region_val\n if vdim_name is not None:\n if vdim_ind is None:\n vdim_ind_loc = -1 if vdim_name == \"lev\" else 0\n else:\n vdim_ind_loc = vdim...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the values along the specified dimension. This method must be implemented for all Dimensioned type.
def dimension_values(self, dimension, expanded=True, flat=True): val = self._cached_constants.get(dimension, None) if val: return np.array([val]) else: raise Exception("Dimension %s not found in %s." % (dimension, self.__class__.__name__))
[ "def list_dimension_values(self, **kwargs):\n return self._list('/dimensions/names/values', **kwargs)", "def dimensionarray(self, dname, vname=None):\n return self.dimensionobject(dname, vname).getValue()", "def dim_coords(self, dim):\n return self.spec.dim_coords(dim)", "def GetDimension...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the range of values along the specified dimension. If data_range is True, the data may be used to try and infer the appropriate range. Otherwise, (None,None) is returned to indicate that no range is defined.
def range(self, dimension, data_range=True): dimension = self.get_dimension(dimension) if dimension is None: return (None, None) if dimension.range != (None, None): return dimension.range elif not data_range: return (None, None) soft_range = [r...
[ "def get_range(dataset):\n min_max = []\n for col in dataset.columns:\n min_max.append([min(dataset[col]), max(dataset[col])])\n return min_max", "def resolve_range(relayoutData, axis='x'):\n log.debug(relayoutData)\n assert axis in 'xy'\n ax = axis\n if not relayoutData or ax+'axis.au...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Simple foo function that returns a string with an exclamation point
def foo(param): param = param + '!' return param
[ "def foo(n):\n if n >= 1:\n return \"foo \" * (n-1) + \"foo.\"\n else:\n return \"\"", "def negate(condition):\n if condition.startswith('!'):\n return condition[1:]\n return \"!\" + condition", "def str_if_not_none(value):\n ...", "def dup_string(self): # real signature un...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
public function to translate a lemma
def translateLemma(lemma, fromLang=None, toLang=None): dbName = __useDB(fromLang, toLang) if lemma in trans_dicts[dbName]: return trans_dicts[dbName][lemma] else: v = __translateLemma(lemma, dbName) trans_dicts[dbName][lemma] = v return v
[ "def translate(input_str, lang_source, lang_target):\n pass", "def lemmatize_word(word):\n return lemmatizer.lemmatize(word,'v')", "def luanerize_word(word):\n\n import nltk # pylint: disable=import-outside-toplevel\n\n wnl = nltk.stem.WordNetLemmatizer()\n lemma = wnl.lemmatize(word.strip(strin...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
query get last user's post
def get_last_post(db: _orm.Session, user_id: int): return (db .query(_models.Post.text, _models.Post.date_last_updated) .filter(_models.Post.user_id == user_id) .order_by(_models.Post.date_last_updated.desc()).first() )
[ "def get_latest_posts_from_user(self):\n post = Post.query.filter(Post.user_id == self.id).order_by(Post.id.desc()).first()\n latest = post.id + 1\n if latest < 11:\n oldest = 1\n else:\n oldest = latest - 10\n return self.get_latest_posts_from(latest, oldest...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
query get a user post by date
def get_post_by_date(db: _orm.Session, user_id: int, date: str, admin: bool): if admin: return (db .query(_models.Post) .filter(_models.Post.user_id == user_id) .filter(_models.Post.date_last_updated == date) .first() ) return (db ...
[ "def user_post(user_id):\n user_posts = Post.query.filter(Post.user_id == user_id).order_by(\n Post.created_date.desc()).all()\n return render_template('showAllPost.html', user_posts=user_posts)", "def get_user_posts(db: _orm.Session, user_id:int):\n\n return db.query(_models.Post).filter(_models....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
query get all posts from a user
def get_user_posts(db: _orm.Session, user_id:int): return db.query(_models.Post).filter(_models.Post.user_id == user_id).all()
[ "def list_posts_by_user():\n limit = request.args.get('length', default=25)\n token = request.args.get('token')\n\n user = User.query.filter_by(token=token).first()\n if user is None:\n return jsonify({\"error\": \"Access Denied!\"})\n\n posts = Post.query.filter_by(author_id=user.id).order_by...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
query get post by id
def get_post(db: _orm.Session, post_id: int): return db.query(_models.Post).filter(_models.Post.post_id == post_id).first()
[ "def get(self, id): \n post = get(id)\n return post", "def get_post(id):\n db = get_db()\n post = db.execute(\n 'SELECT * FROM post WHERE id=?',\n (id,)\n ).fetchone()\n return post", "def get(cls, id):\n post = Session.query(Post).filter_by(id=id).first()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the count of valid passwords given in a list
def count_valid_passwords(passwords: list, validity_check: Callable[[str], bool]) -> int: count = 0 for password in passwords: if validity_check(password): count += 1 return count
[ "def part_1(passwords: list) -> int:\n\n def correct_count(password: tuple) -> bool:\n cmin, cmax, pchr, pword = password\n return int(cmin) <= pword.count(pchr) <= int(cmax)\n\n return sum(map(correct_count, passwords))", "def solve(passwords: List[str], is_valid: Callable[[str], bool]) -> in...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a fasta sequence (SeqRecord) and removes all bases after a perfect match with the adapter_sequence.
def remove_adapter(fasta_sequence, adapter_sequence): raise NotImplementedError
[ "def trim_seqs(args, barcode_adapter_map):\n with open(args.fasta) as seqs_fna:\n with open(args.output_file, 'w') as seqs_trimmed:\n num_unmatched = 0\n line = seqs_fna.readline()\n lib_name = \"\"\n while line:\n # Random threshold of 1000 - cou...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes pysam.AlignmentFile and returns dictionary with point mutation types as keys and the number of the found point mutation events. E.g. {
def count_point_mutations(pysam_alignment_file): raise NotImplementedError
[ "def get_taxon_number_dict(alignment):\n taxon_number_dict = {}\n with open(alignment) as infh:\n started = False\n taxon_num = 0\n for i in infh:\n if i.startswith('matrix') or i.startswith('\\tMATRIX'):\n started = True\n if i.startswith(';'):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to calculate the range of a given list.
def range_of_list(l: list): return max(l) - min(l)
[ "def data_range(my_list):\n datoMenor = min_value(my_list)\n datoMayor = max_value(my_list)\n return datoMayor - datoMenor", "def data_range(xs: List[float]) -> float:\n return max(xs) - min(xs)", "def find_range(input_list,input_number):\n first_occurrence = input_list.index(input_number)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function total all given list values.
def list_total(l: list): return sum(l)
[ "def sum_list(self, list_values):\n total_sum = 0\n for value in list_values:\n total_sum += value\n\n return total_sum", "def list_sum(l):\n # replace the line below with your code\n return float(sum(l))", "def Total(listlikeArg):\n return np.sum(np.array(listlikeArg),a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a dictionary to a string of ``key=\"value\"`` pairs. If ``None`` is provided as the dictionary an empty string is returned, i.e. no html attributes are generated.
def _dict_to_html_attributes(d): if d is None: return "" return "".join(" {}=\"{}\"".format(key, value) for key, value in iter(d.items()))
[ "def attrs_to_str(attrs):\n dic = {}\n for k, v in attrs:\n dic.setdefault(k, []).append(str(v))\n\n lis = []\n for k, v in attrs:\n l = dic.pop(k, None)\n if not l:\n continue\n lis += ['%s=\"%s\"' % (k, ' '.join(l))]\n\n if not lis:\n return ''\n\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
calculate paeth predictor with c b | | a x With x is the point you are looking at.
def paeth_predictor(a, b, c): p = a + b - c pa = abs(p - a) pb = abs(p - b) pc = abs(p - c) # return nearest of a,b,c, # breaking ties in order a,b,c. if (pa <= pb) and (pa <= pc): return a elif (pb <= pc): return b else: return c
[ "def predict(x,m,c):\n return x*m+c", "def get_paeth_value(a, b, c, raw_val):\n return (raw_val - paeth_predictor(a, b, c))", "def prediction(theta, x):\n\n return theta[0] + theta[1]*x", "def predict(self, x):\n res = 0\n for arbre in self.arbres:\n res += arbre.predict(x)\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
get value To encode Paeth(i,j) = Raw(i,j) PaethPredictor(Raw(i1,j), Raw(i,j1), Raw(i1,j1)) To decode the Paeth filter Raw(i,j) = Paeth(i,j) PaethPredictor(Raw(i1,j), Raw(i,j1), Raw(i1,j1))
def get_paeth_value(a, b, c, raw_val): return (raw_val - paeth_predictor(a, b, c))
[ "def encoder(self, x):\n x = x.view(-1, *self.img_size)\n pi = self.enc_y(x)\n z_params = self.enc_z(x)\n return z_params, pi", "def getPrediction(self):\r\n \treturn self.prediction", "def getPrediction(nnOutput):\n\treturn [nnOutput, 1.0]", "def decode(self, y):\n asser...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
print the time and string
def tprint(s): print("[" + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + "] " + s)
[ "def time_display(time):\n return timeFormat(time)", "def timeprint(*args, **kwargs: Any) -> None:\n print(datetime.now().isoformat(), *args, **kwargs)", "def print_with_timestamp(text):\n now = time.time()\n now_string = datetime.datetime.fromtimestamp(now).strftime(\n '%Y%m%d-%H:%M:%S')\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test if a DVH can be calculated with a max dose limit.
def test_dvh_calculation_with_dose_limit(self): # Set the dose limit to 100 cGy limitdvh = self.calc_dvh(5, limit=500) # Volume self.assertAlmostEqual(limitdvh.volume, 440.212499999) # Min dose bin self.assertAlmostEqual(limitdvh.bins[0], 0) # Max dose bin ...
[ "def is_valid_max_limit(self) -> bool:\n if (self._end_dt is not None) and (self._start_dt is None):\n return True", "def hasMax(*args, **kwargs):\n \n pass", "def _get_hasMaximumValue(self) -> \"bool\" :\n return _core.DistanceValueCommandInput__get_hasMaximumValue(self)", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get data dictionary by the provided name.
def get_by_name(cls, name: str) -> "DataDictionary": cls.logger.debug("Get CDS data dictionary with %s name", name) return DataDictionary( data_dictionary_json=cls.send_message_json( "GET", f"Get {name} CDS data dictionary", f"{cls._url}/api/v1...
[ "def getfdict(dataset, name):\n import simplejson as json\n from urllib import urlopen\n from pprint import pprint\n if dataset not in DATASETS:\n raise web.notfound('No dataset with dataset %s. Choices are: %s' % (dataset, ', '.join(DATASETS)))\n fname = 'dataset_%s.txt' % (dataset)\n if d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Upload data dictionary using CDS API.
def upload(self) -> None: self.logger.debug("Upload %s data dictionary", self.name) self.send_message( "POST", "Publish CDS data dictionary", f"{self.url}", auth=self.auth, data=json.dumps(self.data_dictionary_json) )
[ "def upload(self) -> None:\n self.logger.debug(\"Upload data dictionary\")\n for data_dictionary in self.dd_set: # type DataDictionary\n data_dictionary.upload() # raise a relevant exception", "def upload_dictionary(self, bucket_name, file_name, dictionary):\n s3_object = self.s3...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fix data dictionary schema. "Raw" data dictionary can be passed during initialization, but this kind of data dictionary can't be uploaded to blueprintprocessor. That method tries to fix it. It can be done only if "raw" data dictionary
def fix_schema(self) -> None: try: self.data_dictionary_json = { "name": self.data_dictionary_json["name"], "tags": self.data_dictionary_json["tags"], "data_type": self.data_dictionary_json["property"]["type"], "description": self.data_...
[ "def _rebuild_invalid_data(self, data):\n new_data = {}\n for block_name, block_dict in data.items():\n new_data[block_name] = {}\n # rebuild block_dict\n for k, v in block_dict.items():\n if k == 'data':\n new_data[block_name]['data']...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Upload all data dictionaries using CDS API.
def upload(self) -> None: self.logger.debug("Upload data dictionary") for data_dictionary in self.dd_set: # type DataDictionary data_dictionary.upload() # raise a relevant exception
[ "def upload(self) -> None:\n self.logger.debug(\"Upload %s data dictionary\", self.name)\n self.send_message(\n \"POST\",\n \"Publish CDS data dictionary\",\n f\"{self.url}\",\n auth=self.auth,\n data=json.dumps(self.data_dictionary_json)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create data dictionary set from file. File has to have valid JSON with data dictionaries list.
def load_from_file(cls, dd_file_path: str, fix_schema: bool = True) -> "DataDictionarySet": dd_set: DataDictionarySet = DataDictionarySet() try: with open(dd_file_path, "r") as dd_file: # type file dd_json: dict = json.loads(dd_file.read()) for data_dictiona...
[ "def load_data(filepath):\n with open(filepath, \"r\") as input_file:\n json_data = json.load(input_file)\n return json_data", "def load_data_from_file(file_name, redis_instance):\n with open(file_name, \"r\") as file_obj:\n for line in file_obj:\n json_line = json.lo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete the temporary root before or after running the tests, depending on the env variable.
def delete_project_temp_root(): if ENV_TEST_DIR: # If the environment variable is configured, delete its contents before the tests. if TEMP_ROOT_PATH.exists(): shutil.rmtree(str(TEMP_ROOT_PATH)) TEMP_ROOT_PATH.mkdir() yield if not ENV_TEST_DIR: # If the environm...
[ "def _cleanup ( self ):\n super ( TemporaryDistroot, self )._cleanup()\n shutil.rmtree ( self._root )", "def cleanup_tempdir():\n devnull = open('/dev/null', 'w')\n # ignore non-zero return codes\n for disk in BOOT_DISK, SYSTEM_DISK, CACHE_DISK, DATA_DISK, \\\n SDCARD_DISK:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
returns the queryset needed to select Statistic rows for a weekly, monthly, or lifetime rollup, which may or may not be author specific.
def _queryset_for_interval_rollup(cls, repo=None, author=None, interval=None, start_day=None, end_date=None): if author is None: if interval != LIFETIME: return Statistic.objects.filter( author__isnull=True, interval=DAY, r...
[ "def get_queryset(self):\n queryset = (\n super()\n .get_queryset()\n .filter(items__branch__course__in=self.request.user.courses)\n .filter(items__branch__course=self.kwargs[\"course_id\"])\n .annotate(name=F(\"items__name\"))\n .annotate(gra...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the authors involved in the repo
def get_authors_for_repo(cls, repo): return Author.authors(repo)
[ "def fetch_authors_other_work(self):\r\n sumGPS = gitProfileSet(\"inverse_\"+self.name)\r\n repoList = []\r\n \r\n for author in tqdm(self.authors.values()):\r\n repoList.extend([repo.clone_url for repo in author.getRepos()])\r\n\r\n return repoList", "def get_fic_aut...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the day, week, month, and lifetime stats for a repo, but on a team basis, not a per author basis.
def rollup_team_stats(cls, repo): commits = Commit.objects.filter(repo=repo) commit_days = commits.datetimes('commit_date', 'day', order='ASC') total_instances = [] for start_day in commit_days: if repo.last_scanned and start_day < repo.last_scanned: break...
[ "def rollup_author_stats(cls, repo):\n\n # FIXME: very long function, refactor/simplify.\n\n total_instances = []\n\n authors = cls.get_authors_for_repo(repo)\n author_count = 0\n author_total = len(authors)\n\n for author in authors:\n\n print(\"author: %s/%s: %...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the day, week, month, and lifetime stats for a repo, for all authors in that repo. Contrast with rollup_team_stats.
def rollup_author_stats(cls, repo): # FIXME: very long function, refactor/simplify. total_instances = [] authors = cls.get_authors_for_repo(repo) author_count = 0 author_total = len(authors) for author in authors: print("author: %s/%s: %s" % (author_count...
[ "def rollup_team_stats(cls, repo):\n\n commits = Commit.objects.filter(repo=repo)\n\n commit_days = commits.datetimes('commit_date', 'day', order='ASC')\n\n total_instances = []\n for start_day in commit_days:\n\n if repo.last_scanned and start_day < repo.last_scanned:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute rollups for specified repo passed in by daemon
def rollup_repo(cls, repo): assert repo is not None commits = Commit.objects.filter(repo=repo) if commits.count() == 0: cls.finalize_scan(repo) return cls.rollup_team_stats(repo) cls.rollup_author_stats(repo) cls.finalize_scan(repo)
[ "def rollup_author_stats(cls, repo):\n\n # FIXME: very long function, refactor/simplify.\n\n total_instances = []\n\n authors = cls.get_authors_for_repo(repo)\n author_count = 0\n author_total = len(authors)\n\n for author in authors:\n\n print(\"author: %s/%s: %...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
model of surplus after time t, after considering investment income, premium, and liabilities
def U(t,kappa,P,N,U_0,initial,t_M,t_mu,c_hat,p1,p2,p3): """input: p is the percentage of money left as emergency/faciliatory use""" """ P is the monthly premium cost""" """ N is the number of insurance holders""" """ U_0 is the initial amount invested by others as seed money for insura...
[ "def strategy_v1_sl_tp_cap_cumul(data,fastperiod, slowperiod, stopLoss, takeProfit):\n\n ## EMA \n data['fastperiod'] = data['Close'].rolling(window=fastperiod).mean()\n data['slowperiod'] = data['Close'].rolling(window=slowperiod).mean()\n \n \n sigPriceBuy=[]\n sigPriceSell=[]\n long_state...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to return a PDF File all the Contacts
def getPDF(): try: contacts = collection.find( { "todo": "todo" }, { "_id": 0, "name": 1, "contact_number": 1, "date_time": 1 } ) text = "ALL CONTACTS ARE BELOW:\n...
[ "def pdf_builder():\n pdfs = []\n dir = '/home/pi/HoppoRoo/HoppoRoo/static/res/'\n # dir = 'C:\\\\Users\\\\Jake\\\\git3\\\\HoppoRoo\\\\static\\\\res\\\\'\n for pdf in os.listdir(dir):\n if 'pdf' in pdf:\n pdf = {'name': pdf,\n 'dir': dir+pdf}\n pdfs.append(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the conn_name of this DataBindingDir.
def conn_name(self) -> str: return self._conn_name
[ "def get_db_name(self):\n return self.dbname", "def get_connection_database_name(self):\n return self._database._engine.url.database", "def get_db_name(self):\n return self.config.get(\"db\", \"name\")", "def _getDBName(self):\n return getConfiguration().getDatabaseFactory(self._pa...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the conn_name of this DataBindingDir.
def conn_name(self, conn_name: str): self._conn_name = conn_name
[ "def set_dataset_name(self, dataset_name):\n self.name = dataset_name", "def conn_name(self) -> str:\n return self._conn_name", "def datacenter_name(self, datacenter_name):\n\n self._datacenter_name = datacenter_name", "def setDatabase(self, dbname):\n # TODO: Need to check if dbn...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the local_path of this DataBindingDir.
def local_path(self) -> str: return self._local_path
[ "def local_path(self) -> str:\n\n return self.__local_path", "def get_local_directory(self):\n \n # Gives Local Direcory path equivalent to URL Path in server\n rval = os.path.join(self.rootdir, self.domain)\n\n for diry in self.dirpath:\n if not diry: continue\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the local_path of this DataBindingDir.
def local_path(self, local_path: str): self._local_path = local_path
[ "def local_path(self) -> str:\n\n return self.__local_path", "def add_path_to_local_dataset_str(path: str) -> Path:\n local_dataset = get_project_root() / \"LocalDataset\"\n full_path = local_dataset / path\n return full_path", "def set_source_path(self, source_path):\n\n self.source_path...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the remote_path of this DataBindingDir.
def remote_path(self) -> str: return self._remote_path
[ "def local_path(self) -> str:\n\n return self.__local_path", "def remote(self):\n ret = self._get_attr(\"remote\")\n return ret", "def import_path(self):\n return os.path.join(self.remote_root, self.pkg) if self.pkg else self.remote_root", "def config_path(self):\n return self.g...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the remote_path of this DataBindingDir.
def remote_path(self, remote_path: str): self._remote_path = remote_path
[ "def folders_url(self, value):\n self.logger.warn(\n \"Setting values on folders_url will NOT update the remote Canvas instance.\"\n )\n self._folders_url = value", "def files_url(self, value):\n self.logger.warn(\n \"Setting values on files_url will NOT update th...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Take over suite_ids from the wrapped database but replace test_ids by suite_ids if there are parameters available for them.
def __init__(self, db, suite, suite_ids = None): Suite.__init__(self, {}, qmtest_id = suite.GetId(), qmtest_database = db) self.__suite = suite self.__suite_ids = suite_ids or []
[ "def build_suites_list(lang, include, exclude, application):\n defined_suites = {\n 'app_identity': app_identity_tests.suite(lang, application),\n 'blobstore' : blobstore_tests.suite(lang, application),\n 'channel': channel_tests.suite(lang, application),\n 'datastore' : datastore_tests.suite(lang, ap...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Construct a ParameterSuite. database The database this suite refers to. suite The original suite this suite parametrizes. parameter The value for the parameter to apply to the suite.
def __init__(self, database, suite, parameter): Suite.__init__(self, {}, qmtest_id = database.JoinLabels(suite.GetId(), parameter), qmtest_database = database) self.__suite = suite ...
[ "def __init__(self, db, suite, suite_ids = None):\n\n Suite.__init__(self, {},\n qmtest_id = suite.GetId(),\n qmtest_database = db)\n self.__suite = suite\n self.__suite_ids = suite_ids or []", "def __init__(self, value, suite):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of parameters that can be applied to the test 'test_id'.
def _GetParametersForTest(self, test_id): return []
[ "def _get_test_profile_params(self):\n return self.__test_profile_params", "def _GetArgumentsForParameter(self, test_id, parameter):\n\n return {}", "def getTestRunParams(config, testName):\n params = {}\n options = config.options(testName)\n for option in options:\n value = config.get...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the set of arguments for this parameter. 'test_id' The test id to which the parameter belongs. 'parameter' The parameter for which the arguments are queried. returns A dictionary containing the argument as name/value pairs.
def _GetArgumentsForParameter(self, test_id, parameter): return {}
[ "def get_arguments(self) -> dict:\n pass", "def _GetParametersForTest(self, test_id):\n \n return []", "def get_parameters(\n session: orm.Session,\n stgpr_version_id: int\n) -> Dict[str, Any]:\n # Make a skeleton of the parameters to return. This skeleton contains\n # d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
dump all record sets in all hosted zones.
def dump(self): LOG.info("[*] starts to dump all domains details") self.dump_hosted_zones() for hosted_zone_id in self.hosted_zones_ids: for resource_record_set in self.dump_record_sets(hosted_zone_id): resource_record_set['HostedZoneId'] = hosted_zone_id # Fix NS bu...
[ "def all_backupsets(self):\r\n return self._backupsets", "def pull_zones(*args, **kwargs):\n zones = api_call('/zones').json()\n for zone in zones:\n print(bcolors.HEADER + \"== Zone {zone[name]} [{zone[uuid]}] ==\".format(zone=zone) + bcolors.ENDC)\n\n # Retrieve the records for this z...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
save resource_record_set into mongodb database.
def save_record_set(self, resource_record_set): LOG.info("[+] save_record_set: %s" % str(resource_record_set)) recode_type = resource_record_set.get('Type') filter_opt = { 'HostedZoneId': resource_record_set.get('HostedZoneId'), # uniq Hosted-Zone 'Name': resource_reco...
[ "def record_to_mongo(record):\n # \n # # First check if the item info is already in our own database\n # item_obj = db.items.find_one({'uniqueid':id_str},{'call-number':True,'oclcnumber':True,'lcc':True})\n # \n # # Store item object in database for future use\n # db.items.save(item_obj)\n # \n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Settings to configure messaging and mentions in the team.
def messaging_settings(self): return self.properties.get('guestSettings', TeamMessagingSettings())
[ "async def configure(self, ctx):\n embed = discord.Embed(color=0x5643fd, title='🛠️ Configuration Options 🛠️', timestamp=ctx.message.created_at)\n embed.add_field(name='``n.configure server-mute``', value='Adds a #muted channel and a muted role to your '\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The visibility of the group and team. Defaults to Public.
def visibility(self): return self.properties.get('visibility', None)
[ "def visibility(self) -> ObservationsSummaryVisibility:\n return ObservationsSummaryVisibility(self.summary[\"visibility\"])", "def test_is_accessible_by_with_public_and_hidden(self):\n user = self.create_user()\n group = self.create_review_group(visible=False)\n\n self.assertTrue(grou...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The collection of channels & messages associated with the team.
def channels(self): return self.get_property('channels', ChannelCollection(self.context, ResourcePath("channels", self.resource_path)))
[ "def fetch_all_channels(self):\n try:\n response = self.client.conversations_list()\n channels = response['channels']\n except SlackApiError as error:\n self.logger.warning(\n f\"slack {self.fetch_all_channels.__name__} request failed and raised error: {...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The shifts of shifts for this team.
def schedule(self): return self.get_property('shifts', Schedule(self.context, ResourcePath("shifts", self.resource_path)))
[ "def chem_shifts_list(self):\n return [dim.chem_shift for dim in self]", "def chemshifts_list(self):\n return [dim.chemshift for dim in self]", "def shifts(self):\r\n self.schedule = self.schedule.loc[\r\n (self.schedule['Cyclotron_Offline'] != 'Shutdown') & (self.schedule['Cyclo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The apps installed in this team.
def installed_apps(self): return self.get_property('installedApps', EntityCollection(self.context, TeamsAppInstallation, ResourcePath("installedApps", self.resource_path)))
[ "def apps() -> List[str]:\n with Configuration() as config:\n return config.get_apps()", "def GetListOfAvailableApplications():\n kratos_path = GetKratosMultiphysicsPath()\n import os, re\n\n apps = [\n f.split('.')[0] for f in os.listdir(kratos_path) if re.match(r'.*Application*', f)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The async operations that ran or are running on this team.
def operations(self): return self.get_property('operations', EntityCollection(self.context, TeamsAsyncOperation, ResourcePath("installedApps", self.resource_path)))
[ "def tasks(self):\n return self._tasks", "def running(self) -> set[TaskInstanceKey]:\n return self.celery_executor.running.union(self.kubernetes_executor.running)", "def is_async(self):\n return all(map(lambda x: x.ASYNC, self.inputs))", "def eval_tasks(self):\n return self._eval_tasks", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The template this team was created from
def template(self): return self.get_property('template', TeamsTemplate(self.context, ResourcePath("template", self.resource_path)))
[ "def template_name(self) -> str:\n return self._template_name", "def get_template(self, template_name):\r\n return self.info.get_template(template_name)", "def template(self):\n type_name = self.type.replace('-','_')\n \n template = select_template([\n 'notification...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send an activity feed notification in the scope of a team. For more details about sending notifications and the requirements for doing so,
def send_activity_notification(self, topic, activity_type, chain_id, preview_text, template_parameters, recipient): payload = { "topic": topic, "activityType": activity_type, "chainId": chain_id, "previewText": preview_text, "templateParameters": templ...
[ "def send_approval_notification(self):\n if self.channel:\n link = \"\".join([\"http://\", Site.objects.get_current().domain, self.approval_link()])\n message = render_to_string('email/approval_notification.txt', {\n 'video': self,\n 'link': link\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The document will be signed by via an embedded signing ceremony.
def embedded_signing_ceremony(): # # Step 1. The envelope definition is created. # One signHere tab is added. # The document path supplied is relative to the working directory # env_def = EnvelopeDefinition() env_def.email_subject = 'PLEASE GOD HELP ME, I NEED THIS WORKING!...
[ "def sign(self, doc, private_key):\n\n\t\tif self.node.parent:\n\t\t\t# If voter is a subvoter, also send key to parent.\n\t\t\tself.node.vote(doc, key=data_to_key(private_key,self.n))\n\n\t\tif self.pubKey:\n\t\t\t# Sign the document. \n\t\t\tkey = DSA.construct((self.pubKey.y, self.pubKey.g, self.pubKey.p, self.p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the SQLAlchemy row for the item called ``item_name``. When dealing with a group source, the item is a user. And when dealing with a permission source, the item is a group.
def _get_item_as_row(self, item_name): # "field" usually equals to {tg_package}.model.User.user_name # or {tg_package}.model.Group.group_name field = getattr(self.children_class, self.translations['item_name']) query = self.dbsession.query(self.children_class).options(eagerload(self.translations['sectio...
[ "def getSpecificItem(itemName):\r\n return session.query(Item).filter_by(name=itemName).one()", "def get_item_with_role(item_name, user, item_id):\n if item_name == 'group':\n obj = TaskGroup\n elif item_name == 'task':\n obj = Task.objects.select_related('task_group')\n else:\n r...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute AUC of the aggregated predictions on a dataset using current weights.
def compute_AUC(self, dataset=None, labels=None, binary_preds=False): if dataset is None: dataset = self.validation_set labels = self.validation_labels return sklearn.metrics.roc_auc_score(labels, self.predict(dataset, binary_preds=binary_preds))
[ "def accuracy(X, Y, weights, bias):\n\tsum_correct = 0.0\n\n\tfor row_X, row_Y in zip(X,Y):\n\t\tif row_Y == predict(row_X, weights, bias): sum_correct +=1.0\n\n\treturn sum_correct/float(len(Y))", "def calculate_AUROC(y_true, y_pred):\n return roc_auc_score(y_true, y_pred)", "def compute_auprc(pred, label):...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate hallucinated labels for dataset using given scores, which default to current unlabeled scores. Treats borderline labels as clipped, to avoid problems of zero gradient upon initialization. We also set labels on hedged examples to zero instead of random fair binary coin flips, to reduce variance and improve per...
def _hallucinate_labels(self, scores=None): # TODO(Akshay): Implement different labels for different losses. if scores is None: scores = self._scoresunl ghlabels = np.sign(scores) ghlabels[np.where(np.abs(scores) < 1)] = 0 # if self.logloss: ghlabels = 2.0*scipy.spec...
[ "def get_onehot_label_threshold(scores, threshold=0.5):\n scores = np.array(scores)\n predicted_onehot_labels = np.zeros(scores.shape)\n predicted_onehot_labels[np.array(scores) >= threshold] = 1\n scores_max = np.argmax(scores, axis=-1)\n predicted_onehot_labels[np.array(list(range(len(scores)))), s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a line search direction from the current weight vector, returns a function (closure) that projects the slack function in that direction.
def _proj_slack_func(self, search_dir): def _toret(alpha): scores = self._scoresunl + self.unlabeled_set.dot(alpha * search_dir) return -np.dot(self.b_vector, self.weights + alpha * search_dir) + np.mean(np.maximum(np.abs(scores), 1.0)) return _toret
[ "def lincross(x0, y0, x1, y1, x, y, d):\n\tout = []\n\n\t# first determine the nearest actual coordinates with rounding\n\tx0 = findkey(x0, x)\n\ty0 = findkey(y0, y)\n\tx1 = findkey(x1, x)\n\ty1 = findkey(y1, y)\n\t# now calculate the distance traveled in each direction, to get both the equation for the slope\n\t# ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Contract CIDER descriptors into rotationinvariant quantities.
def contract_exchange_descriptors(desc): # desc[0:6] = rho_data # desc[6:7] = g0 # desc[7:10] = g1 # desc[10:15] = g2 # desc[15] = g0-r^2 # g1 order: x, y, z # g2 order: xy, yz, z^2, xz, x^2-y^2 N = desc.shape[1] res = np.zeros((12,N)) rho_data = desc[:6] rho, s, alpha, tau...
[ "def _dependent_bec_construction(self):\n # Calculate Z-parameter for each of N polarized channels.\n erasure_prob = self._channel.get_erasure_prob()\n bhatt_z_array = np.array([PolarCode.bhatt_z(i, self._N, erasure_prob) for i in range(self._N)])\n\n # Sort all the channels in the ascen...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Yield (field_name, field_type, materialized_expr, default_expr) tuples
def iter_fields(self): yield "date", "Date", "", "" yield "ts", "DateTime", "", "" yield "metric_type", "String", "", "" for f in self.key_fields: yield f.field_name, f.field_type, "", "" yield "labels", "Array(LowCardinality(String))", "", "" if self.enable_...
[ "def field(f):\n yield f", "def field_repeat():\n yield None, 'all fields are placed in the same logical register.'\n yield 1, 'each field gets its own logical register.'\n yield (2, None), 'the given amount of fields are placed in each logical register.'", "def get_fields(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get CREATE TABLE for Distributed engine
def get_create_distributed_sql(self): return ( "CREATE TABLE IF NOT EXISTS %s " "AS %s " "ENGINE = Distributed(%s, %s, %s)" % ( self._get_distributed_db_table(), self._get_raw_db_table(), config.clickhouse.cluster, ...
[ "def _table_creation_command(cls):\n return sql.Card.create_table()", "def _table_creation_command(cls) -> str:\n return sql.Metacard.create_table()", "def _create_table(self, table_name):\n raise NotImplementedError()", "def create_basic_table_in_dev(self):\n dev_table_sql = \"cre...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates relevance probabilities and labels for questionanswer pairs. Parameter 'top_k' specifies the number of queryanswer pairs labeled as relevant based on predicted probability. Returns list(float) List of relevance probabilities corresponding to questionsanswers pairs. list(float) List of binary relevances corre...
def predict_relevances(qids, questions, answers, top_k = 3): qids = np.array(qids) questions = np.array(questions) answers = np.array(answers) unique_qids = np.unique(qids) probs_full = np.zeros(answers.shape[0]) rel_full = np.zeros(answers.shape[0]) for qid in unique_qids: ...
[ "def get_top_k_accuracy(raw_predictions, synset, k, label_lst):\n # type: (numpy.ndarray, str, str, int) -> float\n\n predictions = get_predictions(raw_predictions, synset, k, label_lst)\n\n top_k = 0\n for example in predictions:\n for pred_idx, _, _ in example['predictions']:\n if pr...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Busco los periodos para el rango de fechas seteado
def search_periods(self): # Busco la cantidad de meses entre esas dos fechas period_qty = relativedelta(datetime.strptime(self.date_to + ' 00:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime(self.date_from, '%Y-%m-%d')) # Primer dia del primer mes es el seteado ...
[ "def pedidosPorPeriodo(self,fechaIni,fechaFin):", "def rangoFechas():\n anio = int(strftime(\"%Y\", gmtime()))\n mes = int(strftime(\"%m\", gmtime()))\n l = []\n for x in [0]:\n \n diff = mes - x\n if diff <= 0:\n l.append([anio - 1, 12+ diff])\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Obtiene los items del reporte segun las oportunidades y los contratos
def _get_report_items(self, period, col): res = [] sale_lines = self.search_sale_lines(period) contracts = self.search_contracts(period) for line in sale_lines: rate = self.calculate_amount(line, period) if line.admission_date \ and period[1]...
[ "def generarReporteItem(self, item):\n story = []\n #\n parrafo = self.titulo()\n story.append(parrafo) \n # \n parrafo2 = self.encabezado('Historial del Item ' + item.nombre)\n story.append(parrafo2)\n story.append(Spacer(0, 20))\n #\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Met la bonne categorie de mot.
def __ajout_categories__(self): for n in self.name: self.type_mot[n] = 1 for n in self.description: self.type_mot[n] = 2 for n in self.caption: self.type_mot[n] = 3
[ "def categories(self, word):\n ...", "def categorize(self, noun, level=1):\n try: \n noun_syns = wn.synsets(noun, 'n')[0]\n category = noun_syns.hypernym_paths()[0][level]\n return category.name()\n except IndexError:\n return False", "def _categorize(sel...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calcul les moyennes de good pour chaque mots.
def calcul_moyenne_good(self): for mot in self.mots: somme = 0.0 tot = 0.0 for pred in graph.getInNodes(mot): somme += self.good[pred] tot += 1.0 self.good[mot] = somme/tot
[ "def calculate_moles_masses(mass, met, aer_particles, inc_soot=False):\n\n # molecular mass of each molecule\n mol_mass_amm_sulp = 132\n mol_mass_amm_nit = 80\n mol_mass_nh4 = 18\n mol_mass_n03 = 62\n mol_mass_s04 = 96\n mol_mass_Cl = 35.45\n\n # Convert into moles\n # calculate number of...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Colore les noeud en fonction de leur degre. On ajoute les albums en relations avec ces mots.
def reduction_degre_mot_album(self): inter = [] for n in self.graph.getNodes(): if self.degree[n] > self.seuil_degree: if not self.est_album[n]: inter.append(n) intere = [] for n in inter: if self.good[n] < self.ratio or self.good[n] > (1 - self.ratio): intere.append(n) ...
[ "def noircir_albums(self):\n\t\t\n\t\tnoir = tlp.Color(0, 0, 0)\n\t\tblanc.setA(255)\n\t\tfor n in self.albums:\n\t\t\tself.viewColor[n] = noir", "def estomper_albums(self):\n\t\t\n\t\tblanc = tlp.Color(255, 255, 255)\n\t\tblanc.setA(0)\n\t\tfor n in self.albums:\n\t\t\t#a = self.viewColor[n]\n\t\t\t#a.setA(0)\n\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
appel de l'algo "Betweenness Centrality" de tulip
def bet_cen(self): dataSet = tlp.getDefaultPluginParameters("Betweenness Centrality", graph) tlp.applyAlgorithm(self.graph, dataSet, "Betweenness Centrality")
[ "def betweenness_centrality(self, node):\n np=0\n # node_pairs have all pairs of nodes ( except node )\n node_pairs=[]\n while np<len(self.vertices):\n if self.vertices[np] != node:\n np1=np+1\n while np1<len(self.vertices):\n i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
rend les album moins visible.
def estomper_albums(self): blanc = tlp.Color(255, 255, 255) blanc.setA(0) for n in self.albums: #a = self.viewColor[n] #a.setA(0) self.viewColor[n] = blanc
[ "def noircir_albums(self):\n\t\t\n\t\tnoir = tlp.Color(0, 0, 0)\n\t\tblanc.setA(255)\n\t\tfor n in self.albums:\n\t\t\tself.viewColor[n] = noir", "def reduction_degre_mot_album(self):\t\t\t\t\n\t\t\t\t\n\t\tinter = []\t\t\t\t\t\n\t\tfor n in self.graph.getNodes():\n\t\t\tif self.degree[n] > self.seuil_degree:\n\t...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
rend les album moins visible.
def noircir_albums(self): noir = tlp.Color(0, 0, 0) blanc.setA(255) for n in self.albums: self.viewColor[n] = noir
[ "def estomper_albums(self):\n\t\t\n\t\tblanc = tlp.Color(255, 255, 255)\n\t\tblanc.setA(0)\n\t\tfor n in self.albums:\n\t\t\t#a = self.viewColor[n]\n\t\t\t#a.setA(0)\n\t\t\tself.viewColor[n] = blanc", "def reduction_degre_mot_album(self):\t\t\t\t\n\t\t\t\t\n\t\tinter = []\t\t\t\t\t\n\t\tfor n in self.graph.getNod...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clean the outputs dir.
def clean_up_output(): yield if os.path.isdir('output'): rmtree('output')
[ "def CleanBuildOutputDirectory(self):\n PrintStatus('Removing '+ self.BuildOutputRootDir())\n if os.path.isdir(self.BuildOutputRootDir()):\n _SmartDeleteDirectory(self.BuildOutputRootDir())", "def clear_output_folder(self):\n fh.delete_directory_tree(self.analysis_root)", "def clean():\n fo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
count each kind of nucleotide, return the result as a dict
def nucleotide_counts(dna): counts = {n: 0 for n in NUCLEOTIDES} for n in dna: counts[n] += 1 return counts
[ "def countInstanceByType(verbose,dinstances):\n instancesByType = {}\n for instanceId, details in dinstances.items():\n try:\n instancesByType[details['flavor']] += 1\n except:\n instancesByType[details['flavor']] = 1\n return instancesByType", "def count_gene_occ(d):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test graceful shutdown function.
async def test_graceful_shutdown(self): await graceful_shutdown(self.mock_application) self.mock_db_conn.close.assert_awaited_once()
[ "def test_deploy_shutdown(self) -> None:\n super().test_deploy_shutdown()", "def shutdown():\n if platform.system() == \"Windows\":\n os.system(\"shutdown -s -t 0\")\n else:\n os.system(\"shutdown -h now\")", "def shut_down():\n sudo(\"shutdown -P 0\")", "def shutdown() -> None:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse header options based on their kind. Default to SizedOption.
def parse_options(data: bytearray) -> Generator[BaseOption, None, None]: while data: kind = data[0] opt = _PARSE_KIND_TBL.get(kind, SizedOption).from_bytes(data) yield opt if opt is end_of_options: return
[ "def parse_options(self, options):\n\n self.count_lines = options.get('lines', False) or options.get('l', False)\n self.count_words = options.get('words', False) or options.get('w', False)\n self.count_bytes = options.get('bytes', False) or options.get('c', False)\n\n # by default, count...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a new column with the truth array of the charge conservation cut.
def charge_conservation_cut(df_analysis): nsigma = 2 sig0 = df_analysis['std_energy_nodecor_ion_conservation'].unique()[0] sig10 = df_analysis['std_calib_energy_nodecor_ion_conservation'].unique()[0] sigma = sigma_function( df_analysis.energy_heat, sig0, sig10 )...
[ "def cnot() -> np.ndarray:\n return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])", "def threshold_binary(heatmap: np.ndarray, threshold: float) -> np.ndarray:\n arr = np.zeros_like(heatmap, dtype=np.bool)\n arr[np.where(heatmap >= threshold)] = True\n return arr", "def add_chur...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Apply the socalled FID cuts, which is a way to discriminate events happening in the bulk (or in the guard) region from the others. Create new columns with the truth array for the bulk and guard events.
def fid_cuts(df): nsigma = 2 sigma_dict = dict() for chan in 'ABCD': sig0 = df['std_energy_ion{}'.format(chan)].unique()[0] sig10 = df['std_calib_energy_ion{}'.format(chan)].unique()[0] sigma_dict[chan] = sigma_function( df.energy_heat, sig0, ...
[ "def apply_cuts(self, data, cuts):\n cut_mask = np.array([True], dtype=np.bool)\n for cut_key, [cut_low, cut_high] in cuts.items():\n if \"{reco}\" in cut_key:\n cut_key = cut_key.replace(\"{reco}\", self.reco)\n\n if cut_low is not None:\n cut_mask ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Extra cut to select a specific range in energy. Quite handy to drop event with negative energy (not physical) and event with a high energy (nonlinearity becomes problematic).
def energy_cut(df, energy_bounds=[0.025, 50]): inf, sup = energy_bounds df['energy_cut'] = ( ( df['energy_heat'] > inf ) & ( df['energy_heat'] < sup ) ) # add a condition on the ionization energy return None
[ "def energy_range(self):\n energy = self._energy_axis.edges\n e_min, e_max = energy[:-1], energy[1:]\n\n if self.mask_safe is not None:\n if self.mask_safe.any():\n e_min = e_min[self.mask_safe]\n e_max = e_max[self.mask_safe]\n else:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }