query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
listlengths
19
20
metadata
dict
Variable at birth and elongation rate mean over npoint
def at_birth(df,variable,npoint): return df.groupby('cell')[['{}'.format('{}'.format(variable)),'pred_growth_rate']].apply(lambda x: x.head(npoint).mean()).rename(columns={'pred_length_box_um':'{}_at_birth'.format(variable)})
[ "def _get_mean_var_estimates(self):", "def mean(self):\r\n return self.tGuess + num.sum(self.pdf*self.x)/num.sum(self.pdf)", "def mean_age(self):\n return np.mean([p.age for p in self.people])", "def get_average_life_expectancy(country, year):\n return (get_life_expectancy(\"female\", country...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Connect cells between genealogies and return dataframe with super_cell id and variable
def connect_cells(dfte,vari): # Create the variabel cell for mother, grand mother and grand grand mother if 'g_parent_cell' not in dfte.columns: dfte = rl.genalogy(dfte,'parent_cell') #Create genealogy if 'g_g_parent_cell' not in dfte.columns: dfte = rl.genalogy(dfte,'g_parent_cell') if ...
[ "def findCellsByGeneCoex(adata, gene1, gene2=None, g1thresh=0.6, g2thresh=0.6, gene1up=True, gene2up=True, use_raw=False):\n if type(adata.X)==scipy.sparse.csr.csr_matrix:\n if use_raw:\n mtx = pd.DataFrame(adata.raw.X.toarray())\n mtx.columns=adata.raw.var_names\n mtx.ind...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find autocorrelation even between genalogy frmo t=0 to t=maxt with step steps
def autocorrelation(df,maxt,step,vari,acquisiton_time,division_time): maxt = int(maxt/acquisiton_time) step = int(step/acquisiton_time) df = connect_cells(df,vari) return np.vstack([correlation(df,Dt,vari) for Dt in\ np.arange(0,maxt,step)]),\ np.arange(0,maxt,step)*acquisi...
[ "def step_autocorrelation(self):\n\n max_hops = max([len(x) for x in self.steps])\n\n self.acf = np.zeros([len(self.steps), max_hops])\n\n keep = [] # list to hold indices of trajectories with a non-zero amount of hops\n for i in range(len(self.steps)):\n hops = self.steps[i]...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
qq plot with normal dist
def qq_plot(obs,var,fname): plt.figure() z = (obs-np.mean(obs))/np.std(obs) stats.probplot(z, dist="norm", plot=plt) plt.plot(np.arange(-3,3),np.arange(-3,3)) plt.xlim([-3,3]) plt.ylim([-3,3]) plt.title("Normal Q-Q plot {} in {}".format(var,fname)) plt.savefig("qq_{}".format(var))
[ "def plot_qq(self):\n \n fig, ax = _plt.subplots(figsize=(8, 6))\n \n # data\n y = _np.sort(self.data)\n N = len(y)\n x = _np.arange(1, N + 1) / (N + 1)\n x = self.distr.ppf(x)\n \n # plot\n ax = self._plot(ax, 'Q-Q Plot', 'model', 'empiri...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Instantiate a `Surrogate` from a preinstantiated Botorch `Model`.
def from_botorch( cls, model: Model, mll_class: Type[MarginalLogLikelihood] = ExactMarginalLogLikelihood, ) -> Surrogate: surrogate = cls(botorch_model_class=model.__class__, mll_class=mll_class) surrogate._model = model # Temporarily disallowing `update` for surrogat...
[ "def model_to_instance(model):\n pass", "def instantiate_model(model_params):\n \n if model_params['model_class'] == 'SeqToSeq':\n m = SeqToSeq(model_params)\n elif model_params['model_class'] == 'SeqToPoint':\n m = SeqToPoint(model_params)\n elif model_par...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates the surrogate model with new data. In the base ``Surrogate``, just calls ``fit`` after checking that this surrogate was not created via ``Surrogate.from_botorch`` (in which case the ``Model`` comes premade, constructed manually and then supplied to ``Surrogate``).
def update( self, datasets: List[SupervisedDataset], metric_names: List[str], search_space_digest: SearchSpaceDigest, candidate_metadata: Optional[List[List[TCandidateMetadata]]] = None, state_dict: Optional[Dict[str, Tensor]] = None, refit: bool = True, *...
[ "def train_surrogate(self):\n # Following https://gitlab.com/florianlprt/wsao, we re-train the model\n # ---------------------------------------------------------------------------\n # cli_restart.py problem=nd3d,size=30,filename=\"data/statistics_extended_svdn\" \\\n # model=lass...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
For multiobjective optimization, retrieve Pareto frontier instead of best point.
def pareto_frontier(self) -> Tuple[Tensor, Tensor]: raise NotImplementedError("Pareto frontier not yet implemented.")
[ "def pareto_frontier(self) -> Tuple[Tensor, Tensor]:\n raise NotImplementedError(\n \"Pareto frontier not yet implemented.\"\n ) # pragma: no cover", "def pareto_front_cut(self):\n return self.NDA([kernel.objective_values for kernel in self.kernels \\\n if ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Serialize attributes of this surrogate, to be passed back to it as kwargs on reinstantiation.
def _serialize_attributes_as_kwargs(self) -> Dict[str, Any]: if self._constructed_manually: raise UnsupportedError( "Surrogates constructed manually (ie Surrogate.from_botorch) may not " "be serialized. If serialization is necessary please initialize from " ...
[ "def _serialize_attributes_as_kwargs(self) -> Dict[str, Any]:\n return {\n \"botorch_model_class\": self.botorch_model_class,\n \"mll_class\": self.mll_class,\n \"model_options\": self.model_options,\n }", "def _serialise(self):\n # TODO (M Foley)\n pas...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the maximum time, between all the velocities, required to reach the goal. By default, it only synchronizes linear velocities. If angular_synchronization is True, then it also synchronizes for angular velocities.
def calculate_max_time(error, velocity, angular_synchronization=False, zero=0.001): if angular_synchronization: assert len(error) == len(velocity) == 6 else: assert len(error) == len(velocity) == 3 # calculate_duration = lambda distance, speed: abs(float(distance) / speed) def calculate...
[ "def calculate_sync_velocity(error, velocity, max_time, angular_synchronization=False):\n if angular_synchronization:\n assert len(error) == len(velocity) == 6\n else:\n assert len(error) == len(velocity) == 3\n\n # A velocity is computed to cover a distance (dist) in a given time (max_time),...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the synchronized velocity for all velocities to reach their goal at the same time. By default, it only synchronizes linear velocities. If angular_synchronization is True, then it also synchronizes for angular velocities.
def calculate_sync_velocity(error, velocity, max_time, angular_synchronization=False): if angular_synchronization: assert len(error) == len(velocity) == 6 else: assert len(error) == len(velocity) == 3 # A velocity is computed to cover a distance (dist) in a given time (max_time), # wher...
[ "def update_motor_set_velocities(self):\n\n steer_angle_radians = self.robot_odom.steer_angle*math.pi/180.0\n rotation_curvature = math.tan(steer_angle_radians)/self.robot_odom.robot_length\n angular_velocity = self.robot_odom.linear_velocity_command*(rotation_curvature)\n \n mid_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Color edge based on the mean of its node colors
def get_edge_color( row ): rgb = 0.5 * ( node_color_dict[ row[ 'source' ] ] + \ node_color_dict[ row[ 'target' ] ] ) return rgb2hex( rgb )
[ "def merge_mean_color(graph, src ,dst):\n\n graph.nodes[dst]['total color'] += graph.nodes[src]['total color']\n graph.nodes[dst]['pixel count'] += graph.nodes[src]['pixel count']\n graph.nodes[dst]['mean color'] = (graph.nodes[dst]['total color'] / graph.nodes[dst]['pixel count'])", "def merge_mean_colo...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Import functions from module ``mmgroup.mm_order``. We import these functions from module ``mmgroup.mm_order`` on demand. This avoids an infinite recursion of imports.
def import_mm_order_functions(): global check_mm_order, check_mm_equal global check_mm_half_order, check_mm_in_g_x0 from mmgroup.mm_order import check_mm_order as f check_mm_order = f from mmgroup.mm_order import check_mm_equal as f check_mm_equal = f from mmgroup.mm_order import check_mm_ha...
[ "def import_groups():", "def import_processors_modules(self):\n\n for module in IMPORT_PROCESSORS():\n __import__(module)", "def _import_function(cwd, function_location, function_name, setup_name):\n sys.path.append(cwd)\n try:\n imported = __import__(function_location...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
String representation of Completeness object When the command 'print' is used on the Completeness object, this method will return the values contained in the object
def __str__(self): for att in self.__dict__.keys(): print '%s: %r' % (att, getattr(self, att)) return 'Completeness class object attributes'
[ "def __str__(self):\n struct_repr = \", \".join([\n \"was_available_once: \" + str(self.was_available_once),\n \"is_available: \" + str(self.is_available),\n \"signal_strength_percent: \" + str(self.signal_strength_percent)\n ])\n\n return f\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates completeness values for target stars This method is called from TargetList __init__ method.
def target_completeness(self, TL): comp0 = np.array([0.2]*TL.nStars) return comp0
[ "def check_progress(self):\n self.max_fitness = max(self.fitness)\n index = self.fitness.index(self.max_fitness)\n self.closest_target = self.population[index]\n if self.max_fitness == 1.0:\n self.target_acquired = True\n self.target_population = self.closest_target...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates completeness value for stars previously observed
def completeness_update(self, sInd, TL, obsbegin, obsend, nexttime): # prototype returns the "virgin" completeness value return TL.comp0
[ "def target_completeness(self, TL):\r\n \r\n comp0 = np.array([0.2]*TL.nStars)\r\n \r\n return comp0", "def update_percent(self):", "def percent_updated(self):\n return self.percent_complete - self.previous_percent_complete", "def update_score():\n pass", "def updat...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set up a default Dogstatsd instance and mock the proc filesystem.
def setUp(self): # self.statsd = DogStatsd(telemetry_min_flush_interval=0) self.statsd.socket = FakeSocket() self.statsd._reset_telemetry() # Mock the proc filesystem route_data = load_fixtures('route') self._procfs_mock = patch('datadog.util.compat.builtins.open...
[ "def init_statsd():\n statsd.init_statsd({\n 'STATSD_HOST': config.secrets.server('statsd.host'),\n 'STATSD_PORT': config.secrets.server('statsd.port'),\n 'STATSD_BUCKET_PREFIX': 'linkr',\n })", "def test_initialization(self):\n options = {\n 'statsd_host': \"myhost\",...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Unmock the proc filesystem.
def tearDown(self): self._procfs_mock.stop()
[ "def teardown_module():\n os.chdir(retriever_root_dir)\n subprocess.call(['rm', '-r', 'raw_data'])\n subprocess.call(['rm', '-r', test_engine.format_data_dir()])", "def test_unmount_changed_path(self):\n\n # Check if old mount paths were read correctly\n self.assertFalse(len(self.service.ol...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send and then asserts that a chain of metrics arrive in the right order and with expected telemetry values.
def send_and_assert( self, dogstatsd, expected_metrics, last_telemetry_size=0, buffered=False, ): expected_messages = [] for metric_type, metric_name, metric_value in expected_metrics: # Construct the expected message data metric_type_...
[ "def test_wait_for_dispatched_metrics(self):\n worker_helper = WorkerHelper()\n d = worker_helper.wait_for_dispatched_metrics()\n self.assertEqual(self.successResultOf(d), [])\n\n self._add_to_dispatched_metrics(worker_helper.broker, MetricMessage())\n msg = MetricMessage()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
`initialize` overrides `statsd` default instance attributes.
def test_initialization(self): options = { 'statsd_host': "myhost", 'statsd_port': 1234 } # Default values self.assertEqual(statsd.host, "localhost") self.assertEqual(statsd.port, 8125) # After initialization initialize(**options) ...
[ "def init_statsd():\n statsd.init_statsd({\n 'STATSD_HOST': config.secrets.server('statsd.host'),\n 'STATSD_PORT': config.secrets.server('statsd.port'),\n 'STATSD_BUCKET_PREFIX': 'linkr',\n })", "def __init__(self, collectd):\n self.collectd = collectd\n self.conf = self.d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dogstatsd can retrieve its config from env vars when not provided in constructor.
def test_dogstatsd_initialization_with_env_vars(self): # Setup with preserve_environment_variable('DD_AGENT_HOST'): os.environ['DD_AGENT_HOST'] = 'myenvvarhost' with preserve_environment_variable('DD_DOGSTATSD_PORT'): os.environ['DD_DOGSTATSD_PORT'] = '4321' ...
[ "def __init__(self):\n\n self.config = load_config()\n self.set_env_var()", "def __init__(self):\n try:\n # For local testing\n from dotenv import load_dotenv\n load_dotenv(os.path.join(os.path.dirname(__file__), ENV_FILE))\n dotenv = os.environ\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dogstatsd host can be dynamically set to the default route.
def test_default_route(self): self.assertEqual( DogStatsd(use_default_route=True).host, "172.17.0.1" )
[ "def set_host(self, host):\n self.host = host", "def set_default_hostname(self, hostname):\r\n \r\n self.default_hostname = hostname", "def host_add_handler(cls, host):\n if host.port.dpid in cls._SWITCHES:\n cls._SWITCHES[host.port.dpid].add_host(host)\n cls._HOSTS_TO_ADD....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Timed value is reported in ms when statsd.use_ms is True.
def test_timed_in_ms(self): # Arm statsd to use_ms self.statsd.use_ms = True # Sample a function run time @self.statsd.timed('timed.test') def func(arg1, arg2, kwarg1=1, kwarg2=1): """docstring""" time.sleep(0.5) return (arg1, arg2, kwarg1, kw...
[ "def time_ms(self):\n return self._time_ms", "def milliseconds(self) -> pli.Series:", "def _unit_ms(self):\n return (self.time_base / 1000.0) / 60.0", "def ms_from_timedelta(td):\n return (td.seconds * 1000) + (td.microseconds / 1000.0)", "def timing(metric_name, value, use_ms=True, *args, ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Exception bubbles out of the `timed` context manager.
def test_timed_context_exception(self): class ContextException(Exception): pass def func(self): with self.statsd.timed('timed_context.test.exception'): time.sleep(0.5) raise ContextException() # Ensure the exception was raised. wi...
[ "def raise_timeout(self, *args, **kwargs):\n\n self.log.error(\"Task timeout encountered.\")\n raise TimeoutError", "def _timeout(signum, frame):\n # Raise TimeoutException with system default timeout message\n raise TimeoutException()", "def test_timeout_elapsed_exception(self):\n de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dogstatsd should automatically use DD_ENV, DD_SERVICE, and DD_VERSION (if present) to set {env, service, version} as global tags for all metrics emitted.
def test_dogstatsd_initialization_with_dd_env_service_version(self): cases = [ # Test various permutations of setting DD_* env vars, as well as other global tag configuration. # An empty string signifies that the env var either isn't set or that it is explicitly set to empty string. ...
[ "def test_globaltags_override():\n host = socket.gethostname()\n token = \"asdashdsauh_8aeraerf\"\n tags = {\"region\": \"us-east-1\"}\n registry = MetricsRegistry()\n reporter = ApptuitReporter(sanitize_mode=None, registry=registry,\n api_endpoint=\"http://localhost\",\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The Eflo VPD ID.
def vpd_id(self) -> str: return pulumi.get(self, "vpd_id")
[ "def vat_id(self) -> str:\n return self._vat_id", "def getVendorId(self):\r\n vendID= self.readReg(37, 3, False)\r\n return vendID", "def cve_id(self):\n return self._cve_id", "def vul_id(self):\n return self._vul_id", "def vat_id(self):\n return self._vat_id", "d...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The id of the vpd.
def vpd_id(self) -> str: return pulumi.get(self, "vpd_id")
[ "def vat_id(self) -> str:\n return self._vat_id", "def vat_id(self):\n return self._vat_id", "def vul_id(self):\n return self._vul_id", "def cve_id(self):\n return self._cve_id", "def vm_id(self) -> str:\n return self._vm_id", "def vmid(self):\n return self.raw[\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The Name of the VPD.
def vpd_name(self) -> str: return pulumi.get(self, "vpd_name")
[ "def get_name(self):\n return self.nvPublic.get_name()", "def vul_name(self):\n return self._vul_name", "def getVhdlName(self):\n return self.name.replace(TOP_NODE_NAME + '.', '').replace('.', '_')", "def vpd_id(self) -> str:\n return pulumi.get(self, \"vpd_id\")", "def getName(s...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the axis1 limits
def set_axis1_limits(self, start, end): if start > end: raise ValueError("Start point over end for this view.") self.axis1_limits = start, end
[ "def axes_limits_set_default(self):\n self.ax1.set_ylim(0.00, 100.0)\n self.ax1.set_xlim(0, 1)", "def axes_limits_set(self, data):\n xmax = self.calcs.iterations - 1 if self.calcs.iterations > 1 else 1\n\n if data:\n ymin, ymax = self.axes_data_get_min_max(data)\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the axis2 limits
def set_axis2_limits(self, start, end): if start > end: raise ValueError("Start point over end for this view.") self.axis2_limits = start, end
[ "def adjust_ylimits(self, ylim1, ylim2):\n self.axplot.set_ylim(ylim1, ylim2)\n self.fig.canvas.draw()\n return", "def axes_limits_set_default(self):\n self.ax1.set_ylim(0.00, 100.0)\n self.ax1.set_xlim(0, 1)", "def axes_limits_set(self, data):\n xmax = self.calcs.itera...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a _Head for linear regression.
def _regression_head(label_name=None, weight_column_name=None, label_dimension=1, enable_centered_bias=False, head_name=None): return _RegressionHead( label_name=label_name, weight_column_name=weight_column_name, lab...
[ "def __make_header__(self):\n header = lashead.Header(point_format=0)\n return header", "def build_head(self, n_features, device=None):\n # By default this is a linear layer\n self.head = self.create_compatible_head(n_features, device)", "def linear_regression():\n return LinearRe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a _Head for multi class single label classification. The Head uses softmax cross entropy loss.
def _multi_class_head(n_classes, label_name=None, weight_column_name=None, enable_centered_bias=False, head_name=None, thresholds=None, metric_class_ids=None): if (n_classes is None) or ...
[ "def _multi_label_head(n_classes,\n label_name=None,\n weight_column_name=None,\n enable_centered_bias=False,\n head_name=None,\n thresholds=None,\n metric_class_ids=None):\n if n_classes <...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a `_Head` for binary classification with SVMs. The head uses binary hinge loss.
def _binary_svm_head( label_name=None, weight_column_name=None, enable_centered_bias=False, head_name=None, thresholds=None,): return _BinarySvmHead( label_name=label_name, weight_column_name=weight_column_name, enable_centered_bias=enable_centered_bias, head_name=head_name...
[ "def add_classification_head(\n self,\n head_name,\n num_labels=2,\n layers=2,\n activation_function=\"tanh\",\n overwrite_ok=False,\n multilabel=False,\n id2label=None,\n ):\n\n if multilabel:\n head = MultiLabelClassificationHead(head_na...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a _Head for multi label classification. The Head uses softmax cross entropy loss.
def _multi_label_head(n_classes, label_name=None, weight_column_name=None, enable_centered_bias=False, head_name=None, thresholds=None, metric_class_ids=None): if n_classes < 2: rais...
[ "def add_classification_head(\n self,\n head_name,\n num_labels=2,\n layers=2,\n activation_function=\"tanh\",\n overwrite_ok=False,\n multilabel=False,\n id2label=None,\n ):\n\n if multilabel:\n head = MultiLabelClassificationHead(head_na...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
_Head to combine multiple _Head objects.
def __init__(self, heads, loss_combiner): # TODO(zakaria): Keep _Head a pure interface. super(_MultiHead, self).__init__(head_name=None) self._logits_dimension = 0 for head in heads: if not head.head_name: raise ValueError("Head must have a name.") self._logits_dimension += head.logi...
[ "def generateHead(self, headType):\n # load the multi-head models\n filePrefix, phase = ModelDict[self.style.body]\n headModel = loader.loadModel(\"phase_\" + str(phase) + filePrefix + \"heads\")\n\n # search for the appropriate parts\n headReferences = headModel.findAllMatches(\"...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Splits logits for heads.
def _split_logits(self, logits): all_logits = [] begin = 0 for head in self._heads: current_logits_size = head.logits_dimension current_logits = array_ops.slice(logits, [0, begin], [-1, current_logits_size]) all_logits.append(current_logits) beg...
[ "def _split_heads(self, x: torch.Tensor) -> torch.Tensor:\n depth = x.size(-1)\n split_x = torch.reshape(x, (\n x.size(0), x.size(1),\n self._hparams.num_heads, depth // self._hparams.num_heads))\n return split_x.permute((0, 2, 1, 3))", "def split_outputs_per_head(output...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Combines list of ModelFnOps for training.
def _combine_train(self, all_model_fn_ops, train_op_fn): losses = [] additional_train_ops = [] for m in all_model_fn_ops: losses.append(m.loss) additional_train_ops.append(m.train_op) loss = self._loss_combiner(losses) train_op = train_op_fn(loss) train_op = control_flow_ops.group(t...
[ "def _combine_eval(self, all_model_fn_ops):\n predictions = {}\n metrics = {}\n losses = []\n for head, m in zip(self._heads, all_model_fn_ops):\n losses.append(m.loss)\n head_name = head.head_name\n for k, v in m.predictions.items():\n predictions[(head_name, k)] = v\n for k,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Combines list of ModelFnOps for inference.
def _combine_infer(self, all_model_fn_ops): predictions = {} output_alternatives = {} for head, m in zip(self._heads, all_model_fn_ops): head_name = head.head_name output_alternatives[head_name] = m.output_alternatives[head_name] for k, v in m.predictions.items(): predictions[(head...
[ "def _combine_eval(self, all_model_fn_ops):\n predictions = {}\n metrics = {}\n losses = []\n for head, m in zip(self._heads, all_model_fn_ops):\n losses.append(m.loss)\n head_name = head.head_name\n for k, v in m.predictions.items():\n predictions[(head_name, k)] = v\n for k,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Combines list of ModelFnOps for eval.
def _combine_eval(self, all_model_fn_ops): predictions = {} metrics = {} losses = [] for head, m in zip(self._heads, all_model_fn_ops): losses.append(m.loss) head_name = head.head_name for k, v in m.predictions.items(): predictions[(head_name, k)] = v for k, v in m.eval_m...
[ "def _combine_train(self, all_model_fn_ops, train_op_fn):\n losses = []\n additional_train_ops = []\n for m in all_model_fn_ops:\n losses.append(m.loss)\n additional_train_ops.append(m.train_op)\n loss = self._loss_combiner(losses)\n\n train_op = train_op_fn(loss)\n train_op = control_fl...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a tuple of (loss, weighted_average_loss).
def _loss(loss_unweighted, weight, name): with ops.name_scope(name, values=(loss_unweighted, weight)) as name_scope: if weight is None: loss = math_ops.reduce_mean(loss_unweighted, name=name_scope) return loss, loss loss_weighted = _weighted_loss(loss_unweighted, weight) weighted_average_loss ...
[ "def average_loss(self, epoch_loss, num_epoch_examples):\n return tuple([l / num_epoch_examples for l in epoch_loss])", "def _weighted_loss(loss, sample_weights=None):\n if sample_weights is not None:\n loss = loss * sample_weights\n return loss.sum() / sample_weights.sum()\n\n return l...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Raises ValueError if the given mode is invalid.
def _check_mode_valid(mode): if (mode != model_fn.ModeKeys.TRAIN and mode != model_fn.ModeKeys.INFER and mode != model_fn.ModeKeys.EVAL): raise ValueError("mode=%s unrecognized." % str(mode))
[ "def _assert_valid_mode(mode:str):\n if not mode in [_TRAIN, _EVAL, _PREDICT]:\n raise ValueError(\"Invalid mode.\")", "def _check_mode(self, mode=None):\n\n if mode is None:\n try:\n mode = self.entries['mode']\n except KeyError:\n self._logger.error...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns training loss tensor. Training loss is different from the loss reported on the tensorboard as we should respect the example weights when computing the gradient. L = sum_{i} w_{i} l_{i} / B where B is the number of examples in the batch, l_{i}, w_{i} are individual losses, and example weight.
def _training_loss(features, labels, logits, loss_fn, weight_column_name=None, head_name=None): with ops.name_scope(None, "training_loss", tuple(six.itervalues(features)) + (label...
[ "def compute_loss(self):\n\tself.elbo_l = tf.reduce_mean(self.labeled_loss(self.x_l, self.y_l))\n\tself.qy_ll = tf.reduce_mean(self.qy_loss(self.x_l, self.y_l))\n\tself.elbo_u = tf.reduce_mean(self.unlabeled_loss(self.x_u))\n\t#weight_priors = self.l2_reg*self.weight_prior()/self.n_train\t\n\tweight_priors = -self....
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tries to determine the libc version that the file executable (which defaults to the Python interpreter) is linked against. Returns a tuple of strings (lib,version) which default to the given parameters in case the lookup fails. Note that the function has intimate knowledge of how different libc versions add symbols to ...
def libc_ver(executable=None, lib='', version='', chunksize=16384): if not executable: try: ver = os.confstr('CS_GNU_LIBC_VERSION') # parse 'glibc 2.28' as ('glibc', '2.28') parts = ver.split(maxsplit=1) if len(parts) == 2: return tuple(parts) ...
[ "def libc_ver(executable=sys.executable, lib='', version='',\n\n chunksize=16384):\n if hasattr(os.path, 'realpath'):\n # Python 2.2 introduced os.path.realpath(); it is used\n # here to work around problems with Cygwin not being\n # able to open symlinks for reading\n exe...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Normalize the version and build strings and return a single version string using the format major.minor.build (or patchlevel).
def _norm_version(version, build=''): l = version.split('.') if build: l.append(build) try: strings = list(map(str, map(int, l))) except ValueError: strings = l version = '.'.join(strings[:3]) return version
[ "def _norm_version(version, build=''):\n l = version.split('.')\n if build:\n l.append(build)\n try:\n ints = map(int, l)\n except ValueError:\n strings = l\n else:\n strings = list(map(str, ints))\n version = '.'.join(strings[:3])\n return version", "def get_min_b...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tries to figure out the OS version used and returns a tuple (system, release, version). It uses the "ver" shell command for this which is known to exists on Windows, DOS. XXX Others too ? In case this fails, the given parameters are used as defaults.
def _syscmd_ver(system='', release='', version='', supported_platforms=('win32', 'win16', 'dos')): if sys.platform not in supported_platforms: return system, release, version # Try some common cmd strings import subprocess for cmd in ('ver', 'command /c ver', 'cmd /c ver'): ...
[ "def _syscmd_ver(system='', release='', version='',\n\n supported_platforms=('win32', 'win16', 'dos')):\n if sys.platform not in supported_platforms:\n return system, release, version\n\n # Try some common cmd strings\n for cmd in ('ver', 'command /c ver', 'cmd /c ver'):\n try:\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get macOS version information and return it as tuple (release, versioninfo, machine) with versioninfo being a tuple (version, dev_stage, non_release_version). Entries which cannot be determined are set to the parameter values which default to ''. All tuple entries are strings.
def mac_ver(release='', versioninfo=('', '', ''), machine=''): # First try reading the information from an XML file which should # always be present info = _mac_ver_xml() if info is not None: return info # If that also doesn't work return the default values return release, versioninfo,...
[ "def java_ver(release='', vendor='', vminfo=('', '', ''), osinfo=('', '', '')):\n # Import the needed APIs\n try:\n import java.lang\n except ImportError:\n return release, vendor, vminfo, osinfo\n\n vendor = _java_getprop('java.vendor', vendor)\n release = _java_getprop('java.version',...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Version interface for Jython. Returns a tuple (release, vendor, vminfo, osinfo) with vminfo being a tuple (vm_name, vm_release, vm_vendor) and osinfo being a tuple (os_name, os_version, os_arch). Values which cannot be determined are set to the defaults given as parameters (which all default to '').
def java_ver(release='', vendor='', vminfo=('', '', ''), osinfo=('', '', '')): # Import the needed APIs try: import java.lang except ImportError: return release, vendor, vminfo, osinfo vendor = _java_getprop('java.vendor', vendor) release = _java_getprop('java.version', release) ...
[ "def version_info():\r\n Version = namedtuple('Version', 'major, minor, micro, releaselevel, serial')\r\n from jedi import __version__\r\n tupl = re.findall('[a-z]+|\\d+', __version__)\r\n return Version(*[x if i == 3 else int(x) for i, x in enumerate(tupl)])", "def version_info(): \n re...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns (system, release, version) aliased to common marketing names used for some systems. It also does some reordering of the information in some cases where it would otherwise cause confusion.
def system_alias(system, release, version): if system == 'SunOS': # Sun's OS if release < '5': # These releases use the old name SunOS return system, release, version # Modify release (marketing release = SunOS release - 3) l = release.split('.') if l:...
[ "def system_alias(system, release, version):\n if system == 'Rhapsody':\n # Apple's BSD derivative\n # XXX How can we determine the marketing release number ?\n return 'MacOS X Server', system+release, version\n\n elif system == 'SunOS':\n # Sun's OS\n if release < '5':\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper to format the platform string in a filename compatible format e.g. "systemversionmachine".
def _platform(*args): # Format the platform string platform = '-'.join(x.strip() for x in filter(len, args)) # Cleanup some possible filename obstacles... platform = platform.replace(' ', '_') platform = platform.replace('/', '-') platform = platform.replace('\\', '-') platform = platform.r...
[ "def _getPlatformString(dist=None):\r\n if dist=='bdist':\r\n #get platform-specific info\r\n if os.sys.platform=='darwin':\r\n OSXver, junk, architecture = platform.mac_ver()\r\n systemInfo = \"OSX_%s_%s\" %(OSXver, architecture)\r\n elif os.sys.platform=='linux':\r\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
In case filepath is a symlink, follow it until a real file is reached.
def _follow_symlinks(filepath): filepath = os.path.abspath(filepath) while os.path.islink(filepath): filepath = os.path.normpath( os.path.join(os.path.dirname(filepath), os.readlink(filepath))) return filepath
[ "def dereference_symlinks(src):\n while os.path.islink(src):\n src = os.path.join(os.path.dirname(src), os.readlink(src))\n return src", "def symlink(self, path, target, *args, **kwargs): # pragma: no cover", "def test_symlink_rel(self):\n src_link = join(self.dfuse.dir, 'source')\n\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Interface to the system's file command. The function uses the b option of the file command to have it omit the filename in its output. Follow the symlinks. It returns default in case the command should fail.
def _syscmd_file(target, default=''): if sys.platform in ('dos', 'win32', 'win16'): # XXX Others too ? return default try: import subprocess except ImportError: return default target = _follow_symlinks(target) # "file" output is locale dependent: force the usage of t...
[ "def file_link(source, destination, symbolic=True, mode=None, owner=None, group=None):\n if symbolic:\n run('ln -sf \"%s\" \"%s\"' % (source, destination))\n else:\n run('ln -f \"%s\" \"%s\"' % (source, destination))\n file_attribs(destination, mode, owner, group)", "def try_as_file(inp):\n...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Queries the given executable (defaults to the Python interpreter binary) for various architecture information. Returns a tuple (bits, linkage) which contains information about the bit architecture and the linkage format used for the executable. Both values are returned as strings. Values that cannot be determined are r...
def architecture(executable=sys.executable, bits='', linkage=''): # Use the sizeof(pointer) as default number of bits if nothing # else is given as default. if not bits: import struct size = struct.calcsize('P') bits = str(size * 8) + 'bit' # Get data from the 'file' system comm...
[ "def architecture(executable=sys.executable, bits='', linkage=''):\n # Use the sizeof(pointer) as default number of bits if nothing\n # else is given as default.\n if not bits:\n import struct\n try:\n size = struct.calcsize('P')\n except struct.error:\n # Older i...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fairly portable uname interface. Returns a tuple of strings (system, node, release, version, machine, processor) identifying the underlying platform. Note that unlike the os.uname function this also returns possible processor information as an additional tuple entry. Entries which cannot be determined are set to ''.
def uname(): global _uname_cache if _uname_cache is not None: return _uname_cache # Get some infos from the builtin os.uname API... try: system, node, release, version, machine = infos = os.uname() except AttributeError: system = sys.platform node = _node() ...
[ "def uname():\n global _uname_cache\n no_os_uname = 0\n\n if _uname_cache is not None:\n return _uname_cache\n\n processor = ''\n\n # Get some infos from the builtin os.uname API...\n try:\n system, node, release, version, machine = os.uname()\n except AttributeError:\n no_...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the (true) processor name, e.g. 'amdk6' An empty string is returned if the value cannot be determined. Note that many platforms do not provide this information or simply return the same value as for machine(), e.g. NetBSD does this.
def processor(): return uname().processor
[ "def get_processor_info():\n with open('/proc/cpuinfo') as cpuinfo:\n processor = re.findall(r'model name\\s+: (.+)\\n', cpuinfo.read())[0]\n return processor.replace('(R)', '').replace('(TM)', '')", "def get_processor_name() -> bytes:\n return collective.get_processor_name().encode()", "def _de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a parsed version of Python's sys.version as tuple (name, version, branch, revision, buildno, builddate, compiler) referring to the Python implementation name, version, branch, revision, build number, build date/time as string and the compiler identification string. Note that unlike the Python sys.version, the r...
def _sys_version(sys_version=None): # Get the Python version if sys_version is None: sys_version = sys.version # Try the cache first result = _sys_version_cache.get(sys_version, None) if result is not None: return result # Parse it if 'IronPython' in sys_version: # ...
[ "def _sys_version(sys_version=None):\n # Get the Python version\n if sys_version is None:\n sys_version = sys.version\n\n # Try the cache first\n result = _sys_version_cache.get(sys_version, None)\n if result is not None:\n return result\n\n # Parse it\n if 'IronPython' in sys_ver...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a string identifying the Python implementation.
def python_implementation(): return _sys_version()[0]
[ "def _implementation():\n implementation = platform.python_implementation()\n\n if implementation == 'CPython':\n implementation_version = platform.python_version()\n elif implementation == 'PyPy':\n implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the Python version as tuple (major, minor, patchlevel) of strings. Note that unlike the Python sys.version, the returned value will always include the patchlevel (it defaults to 0).
def python_version_tuple(): return tuple(_sys_version()[1].split('.'))
[ "def get_major_minor_version() -> str:\n return \"{}.{}\".format(*sys.version_info)", "def version_info():\r\n return tuple(map(int, __version__.split('.')))", "def determine_python_version():\n py_version_cmd = 'import sys; print(\"%s %s\" % sys.version_info[0:2])'\n check_version = subprocess.chec...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a string identifying the Python implementation branch. For CPython this is the SCM branch from which the Python binary was built. If not available, an empty string is returned.
def python_branch(): return _sys_version()[2]
[ "def scm_branch(self):\n return self._data.get('scm_branch')", "def get_branch(project_root: str) -> str:\n if os.path.isfile(os.path.join(os.path.abspath(project_root), os.pardir, os.pardir) + '/VERSION'):\n with open(os.path.join(os.path.abspath(project_root), os.pardir, os.pardir) + '/VERSION'...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a string identifying the Python implementation revision. For CPython this is the SCM revision from which the Python binary was built. If not available, an empty string is returned.
def python_revision(): return _sys_version()[3]
[ "def version_string():\n return '%s %s' % (__release__, __svn_revision__)", "def pyversion(self):\n return self.eval(\"platform.python_version()\", ['platform']).strip()", "def get_revision(self) -> str:\n try:\n return self.cmd.rev_parse(verify=True, args=\"HEAD\", check_returncode=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a string identifying the compiler used for compiling Python.
def python_compiler(): return _sys_version()[6]
[ "def get_compiler(filenames):\n\n any_cxx = False\n suffixes = set()\n for f in filenames:\n prefix, suffix = os.path.splitext(f)\n if suffix == '.o':\n prefix, suffix = os.path.splitext(prefix)\n\n suffixes.add(suffix[1:])\n\n if 'cpp' in suffixes:\n return 'g++'\n else:\n return 'gcc'", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to find the source and target of floating edges, for now it only works for edges that support the relation between two other edges, it can be extended for other cases
def find_missing_source_target(property_restrictions, object_properties, sources_targets): # Under this scope restrictions are all relations that indicate relationships # between other object properties for restriction in property_restrictions: child = restriction["xml_object"] geom_prope...
[ "def referenceEdge(u,v):\n v1 = u\n v2 = v\n\n e1 = u.getEdge().getPrev()\n e2 = v.getEdge().getPrev()\n\n aux = None #aux is an half-edge incident to u \n while aux != e1:\n if aux is None: aux = e1\n aux2 = None #aux2 is an half-edge incident to v\n\n while aux2 != e2:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function keeps album table clean without any empty albums Empty albums are albums that has no picture associated with that album This is necessary when uploading SenseCam images which are uploaded in a temporary album at the beginning and a temporary album is created for this purposes
def remove_empty_albums(aid): print "aid" print aid if aid is None: return con = mdb.connect('localhost', 'root', 'sensepass', 'sensecambrowser') with con: query = "SELECT count(*) from fileuploader_picture WHERE album_id=%s" % (aid) cur = con.cursor() cur.execute(query) data = cur.fetchall() ...
[ "def clean_up(self, graph):\n # Delete albums associated with place\n if len(self.albums) != 0:\n for album in self.albums:\n album.clean_up()\n album.delete(graph)", "def delete_image_album(self):\n if self.album:\n self.album.delete()", ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function retrieves sensor type id according to abbreviation
def get_sensor_type_id(abbreviation): if abbreviation is None: return con = mdb.connect('localhost', 'root', 'sensepass', 'sensecambrowser') with con: query = "SELECT id from fileuploader_sensortype WHERE abbreviation=%s" % (abbreviation) cur = con.cursor() cur.execute(query) data = cur.fetchall() ...
[ "def get_sensor_type_id(sensor_type_name):\n query = db.session.query(\n TypeClass.id,\n ).filter(TypeClass.sensor_type == sensor_type_name)\n sensor_id = db.session.execute(query).fetchone()\n if isinstance(sensor_id, Iterable):\n sensor_id = sensor_id[0]\n return sensor_id", "def ge...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Plot a performance metric vs. forecast horizon from cross validation. Cross validation produces a collection of outofsample model predictions that can be compared to actual values, at a range of different horizons (distance from the cutoff). This computes a specified performance metric for each prediction, and aggregat...
def plot_cross_validation_metric( df_cv, metric, rolling_window=0.1, ax=None, figsize=(10, 6) ): if ax is None: fig = plt.figure(facecolor='w', figsize=figsize) ax = fig.add_subplot(111) else: fig = ax.get_figure() # Get the metric at the level of individual predictions, and with...
[ "def plot_performance(y_true, y_pred, y_true_valid=None, y_pred_valid=None, metrics=None, **kwargs):\n\n validation = (y_true_valid is not None) & (y_pred_valid is not None)\n\n if metrics is None:\n metrics = ['MAE', 'mean_squared_error',\n 'root_mean_squared_error', 'r2']\n elif ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" $\alpha$geodesic between two probability distributions
def alpha_geodesic( a: torch.Tensor, b: torch.Tensor, alpha: float, lmd: float ) -> torch.Tensor: a_ = a + 1e-12 b_ = b + 1e-12 if alpha == 1: return torch.exp((1 - lmd) * torch.log(a_) + lmd * torch.log(b_)) elif alpha >= 1e+9: return torch.min(a_, b_) elif alpha <=...
[ "def minkowski_dist(p, ds1, ds2):\n for ((r1, g1, b1), (r2, g2, b2)) in itertools.izip(ds1.rgbs, ds2.rgbs):\n s = ( abs(r1 - r2) ** p ) + \\\n ( abs(g1 - g2) ** p ) + \\\n ( abs(b1 - b2) ** p )\n\n return s ** (1.0 / float(p))", "def alpha_jsd(G1, G2):\n p1 = alph...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate and Upload the Discharge Summary
def generate_discharge_summary_task(consultation_ext_id: str): logger.info(f"Generating Discharge Summary for {consultation_ext_id}") try: consultation = PatientConsultation.objects.get(external_id=consultation_ext_id) except PatientConsultation.DoesNotExist as e: raise CeleryTaskException( ...
[ "def generate_summary(self) -> None:\n\n headers = [\"Location\", \"record high <br/> for Dec 25\",\n \"december <br/> average\",\n \"contiguous <br/> precipitation\",\n \"percentage <br/> snowfall\"]\n\n with open(\"report.md\", 'w') as f:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
iterate over all available hbtasks
def available_hbtasks(): out = [] for tname in dir(tasks): t = getattr(tasks,tname) if inspect.isclass(t) and issubclass(t,tasks.HbTask) and t is not tasks.HbTask: yield tname, t
[ "def get_all_system_command():\n pass", "def executables(self):\n return []", "async def list_tasks():", "def getTasks():", "def test_get_executables_available(self):\n pass", "def available_shells(self):", "def all_work_paths():\n for path in (resources_root / \"work_dirs\").ite...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
check the settings of a HbTask return json with result hash, which you can then use to set up the next e.g. /check/Add/?x=1&y=5
def check(request,task_name): try: todo = getattr(tasks,task_name,None) except KeyError: return JsonResponse( {'error':'This {} is not a known task'.format(taskname)}) parameters = todo().settings.get.keys() try: kwargs = {par:request.GET[par] for par in par...
[ "def available(request):\n hashes = request.GET.getlist('h',None)\n available = {}\n for h in hashes:\n \n available.update({h:check_available_object(h)})\n\n return JsonResponse(available)", "def simhash():\n try:\n url = request.args.get('url')\n if not url:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
add a task result to a project provide hash and project name
def add_to_project(resulthash,project): t = models.HBTask.objects.get(resulthash=resulthash) p = models.Project.objects.get(name=projectname) p.tasks.add(t)
[ "def add_task_results(self, tasks):\n\n task_list = self._task_results['tasks']\n for task in tasks:\n task_dict = {'task_id': task.id, 'type': task.task_type, 'was_launched': task.has_been_launched}\n if task.has_been_launched:\n task_dict.update(launched=datetime...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if hashes are already available E.g. /available/?h=1&h=2&h=1adf22a49521bb4da13686d2560953a6
def available(request): hashes = request.GET.getlist('h',None) available = {} for h in hashes: available.update({h:check_available_object(h)}) return JsonResponse(available)
[ "def hash_exists_remotely(self):\n try:\n self.lookup_by_hash()\n except requests.exceptions.HTTPError:\n return False\n return True", "def has_hash(self, h):\n rsp = h.hashlist(self.path)\n if re.search(\"\\n[0-9a-f]+\\smd5\\s%s\" % self.path, rsp):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find most probable region in HarvardOxford Atlas of a vox coord.
def locate_peaks(vox_coords): sub_names = harvard_oxford_sub_names ctx_names = harvard_oxford_ctx_names at_dir = op.join(os.environ["FSLDIR"], "data", "atlases") ctx_data = nib.load(op.join(at_dir, "HarvardOxford", "HarvardOxford-cort-prob-2mm.nii.gz")).get_data() sub_dat...
[ "def _find_region(self, x, y):\n result = None\n for region in self.regions:\n if region.contains(x, y):\n if result is None or result.area() > region.area():\n result = region\n return result", "def find_borough(self,lat,lon):\n pt = Point(...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
update location of an incident
def update_location_only(self, location, incident_id): self.cursor.execute("""UPDATE incidents SET location='%s' WHERE incident_id='%s'"""%(location,incident_id)) self.commiting()
[ "def update_location(self, id, location):\n sql = f\"UPDATE incidences SET location = \\'{location}\\'\\\n WHERE incidences.id = {id}\"\n conn = Db().con\n curr = conn.cursor()\n curr.execute(sql)\n conn.commit()", "def upsert_location(self, location):", "def upd...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
update comment of an incident
def update_comment_only(self, comment, incident_id): self.cursor.execute("""UPDATE incidents SET comment='%s' WHERE incident_id='%s'"""%(comment ,incident_id)) self.commiting()
[ "def update_comment(self, id, comment):\n sql = f\"UPDATE incidences SET comment = \\'{comment}\\'\\\n WHERE incidences.id = {id}\"\n conn = Db().con\n curr = conn.cursor()\n curr.execute(sql)\n conn.commit()", "def update_comment(Session, com...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
update title of an incident
def update_title_only(self, title, incident_id): self.cursor.execute("""UPDATE incidents SET title='%s' WHERE incident_id='%s'"""%(title, incident_id)) self.commiting()
[ "def updateHealtTitle (self, title) : \n\t\tself._healthTitle = self._healthTitle + title", "def update_title(self, title: str) -> None:\n self.title = title", "def _update_title(self, title, tag, lid):\n return title", "def _UpdateTitle(self, event, new_title='Updated event title'):\n\n prev...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
delete a specific incident
def delete_specific_incident(self, incident_id): self.cursor.execute("""DELETE FROM incidents WHERE incident_id ='%s' AND status='draft' """ %(incident_id)) self.commiting() return incident_id
[ "def delete_incident(*, db_session: Session = Depends(get_db), incident_id: str):\n incident = get(db_session=db_session, incident_id=incident_id)\n if not incident:\n raise HTTPException(status_code=404, detail=\"The requested incident does not exist.\")\n delete(db_session=db_session, incident_id=...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return branches that should be used as bases to check for branches that are already contained within them. The first branch in the list is the default branch for the origin remote.
def base_branches() -> list[str]: branches = [] default = sh("git rev-parse --abbrev-ref origin/HEAD").removeprefix("origin/") branches.append(default) releases = sh( "git branch --all --sort=-committerdate --list *release/* | head -10" ).splitlines() releases = [b.removeprefix("*").st...
[ "def branches(self):\n return self.get_branches(\n include_except_branches=False,\n include_reraise_branches=False)", "def branches(self) -> list[str]:\n _args: list[Arg] = []\n _ctx = self._select(\"branches\", _args)\n return _ctx.execute_sync(list[str])", "def get_remote...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create the Refund for the Invoice.
def post(invoice_id): current_app.logger.info(f'<Refund.post : {invoice_id}') request_json = request.get_json(silent=True) try: valid_format, errors = schema_utils.validate(request_json, 'refund') if request_json else (True, None) if not valid_format: retu...
[ "def gen_refund(account_id, invoice_id, invoice_date, amount, payment_id, payment_date):\n refund = Refund(\n refund_id=gen_id(),\n account_id=account_id,\n invoice_id=invoice_id,\n payment_id=payment_id,\n refund_amount=amount * random.choice(range(10, 101))/100,\n refu...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Flag=1 for subproblem of ALR Flag=2 for subproblem of LR Flag=3 for subproblemmean of ALR Flag=4 for subproblemmean of LR
def g_solving_subproblem_of_ALR(self,vehicle_id): global_LB = -10000 global_UB = 10000 iteration_for_RSP = 20 optimal_solution_for_RSP = None self.multiplier_v = 0.5 # solve the expected shortest path problem self.g_dynamic_programming_algorithm(vehicle_i...
[ "def get_mean_fit(flag='L'):\n if flag == 'L':\n return np.mean(np.vstack(l_coeff_queue), axis =0) if len(l_coeff_queue)>1 else l_coeff_queue[-1]\n else:\n return np.mean(np.vstack(r_coeff_queue), axis =0) if len(r_coeff_queue)>1 else r_coeff_queue[-1]", "def aspcapflag(aspcapfield) :\n\n p...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns True if another observation have been associated with the scan It also replaces that observation if the new one is closer in time to the respective wifi scan
def scan_observed(scan_timestamp, ap_scan, location, previous_observations, user_id): location_timestamp = location[0] latitude = location[1] longitude = location[2] distance_to_closest_scan = abs(location_timestamp - scan_timestamp) for observation in previous_observations: observed_wifi_scan_time = observat...
[ "def isDuplicate(self, a, b):\n\n isDuplicate = (\n abs(a['distance_in_km'] - b['distance_in_km']) \n < RunDataProcessor.KM_SIMILARITY_THRESHOLD and \n abs((a['start_timestamp'].tz_convert(None) - b['start_timestamp'].tz_convert(None)).total_seconds()) \n <...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a user by phone_num
def get_user_by_phone(phone_num): user = db.session.query(User).filter(phone_num == User.phone_num).first() return user # SELECT * FROM users WHERE phone_num == phone_num # User.query.filter(User.phone_num == phone_num).one()
[ "def get_user_by_phone(phone_num):\n\n user = db.session.query(User).filter(phone_num == User.phone_num)\n return user\n \n # User.query.filter(User.phone_num == phone_num).one()", "def get_user_by_phone(self, phone):\n sql = 'select id ,first_name' \\\n ',last_name' \\\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete a user from DB by phone num
def remove_user(user): # user.confirmed = False # user = get_user_by_phone(phone_num) db.session.delete(user) db.session.commit() return user # DELETE FROM users WHERE user.phone_num == phone)
[ "def deleterecord(phones,username,phonenum):\r\n if username in phones:\r\n del phones[username]\r\n else:\r\n raise ValueError(\"This username are not exist\")", "def delete_user():", "async def delete_phone(self, code: int, prefix: int, phone: int, password: str):\n data = {\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merge two sorted lists. Returns a new sorted list containing all of the elements that are in both list1 and list2. This function can be iterative.
def merge(list1, list2): answer = [] assert answer == sorted(answer) idx1 = 0 idx2 = 0 while (idx1 < len(list1)) and (idx2 < len(list2)): if list1[idx1] < list2[idx2]: answer.append(list1[idx1]) idx1 += 1 elif list1[idx1] > list2[idx2]: answer.app...
[ "def merge(list1, list2): \n merge_list = []\n _ind1 = _ind2 = 0\n while _ind1 < len(list1) and _ind2 < len(list2) :\n if list1[_ind1] < list2[_ind2]:\n merge_list.append(list1[_ind1])\n _ind1 += 1\n else:\n merge_list.append(list2[_ind2])\n _ind2...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Override initializer for Grid, add queue to store boundary of fire
def __init__(self, grid_height, grid_width): poc_grid.Grid.__init__(self, grid_height, grid_width) self._fire_boundary = poc_queue.Queue()
[ "def __init__(self):\n self.global_queue = Queue()\n self.queues = []", "def __init__(self):\n self._grid = [[None]]", "def __init__(self, queue: CityOverheadTimeQueue):\n super().__init__()\n self.queue = queue\n self.data_incoming = True", "def __init__(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the size of the boundary of the fire
def boundary_size(self): return len(self._fire_boundary)
[ "def _rect_size(self):\n bnd = self._bounds\n return (bnd[1][0] - bnd[0][0], bnd[1][1] - bnd[0][1])", "def size(self):\n bbox = self.bbox\n return bbox[1] - bbox[0]", "def size (self):\n return self.eman.component_for_entity (self.e, Hitbox).size", "def size(self):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generator for the boundary of the fire
def fire_boundary(self): for cell in self._fire_boundary: yield cell # alternative syntax #return (cell for cell in self._fire_boundary)
[ "def boundary(self):\n\t\ttemp = 0;\n\t\tfor i in xrange(self.min_tags_in_window): temp += self.poisson_value[i];\n\t\ttemp = pow(temp, self.gap_size+1); \n\t\treturn temp*temp; # start & end ", "def boundary(self): # -> BaseGeometry:\n ...", "def fire_boundary(forest_sim):\n boundary = []\n\n for...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function that spreads the wild fire using one step of BFS Updates both the cells and the fire_boundary
def update_boundary(self): cell = self._fire_boundary.dequeue() neighbors = self.four_neighbors(cell[0], cell[1]) #neighbors = self.eight_neighbors(cell[0], cell[1]) for neighbor in neighbors: if self.is_empty(neighbor[0], neighbor[1]): self.set_full(neighbor...
[ "def build_fireball():\n # build the right part\n build_rightpart()\n\n # copy it to 4.\n copy(0, 4)\n\n # build the left part, now it's in 0\n build_leftpart()\n\n # copy right part from 4 to 1.\n copy(4, 1)\n # smash together for whole fireball.\n smash()", "def simulate_fire(self,...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Class invariant that checks whether every cell on the boundary also has the corresponding grid cell set to FULL
def boundary_invariant(self): for cell in self.fire_boundary(): if self.is_empty(cell[0], cell[1]): print "Cell " + str(cell) + " in fire boundary is empty." return False return True
[ "def isGridFull(self):\r\n for i, j in itertools.product(range(1, self.size + 1), range(1, self.size + 1)):\r\n if self.getCell(i, j) == 0:\r\n return False\r\n return True", "def check_grid_full(self):\n for row in self.game_state:\n for e in row:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a range from a to b, including both endpoints
def from_inclusive(a, b): c = int(b > a)*2-1 return range(a, b+c, c)
[ "def bidirectional_inclusive_range(start, end):\n if end > start:\n return list(range(start, end + 1))\n else:\n return list(range(start, end - 1, -1))", "def get_range(self, start, end):", "def new_range(r):\n if isinstance(r, list) or isinstance(r, tuple) and len(r) == 2:\n lower...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the chunk at given chunk coordinates
def get_chunk(self, x: int, z: int) -> EmptyChunk: if not self.inside(x, 0, z, chunk=True): raise OutOfBoundsCoordinates(f'Chunk ({x}, {z}) is not inside this region') return self.chunks[z % 32 * 32 + x % 32]
[ "def get_block(self, chunk, coords):\n\n return chunk.get_block(coords)", "def at_coord(x, y):\n return (x // CHUNK_SIZE, y // CHUNK_SIZE)", "def chunkCoordAt(pos, scale):\n chunk_x = int((0.5 + pos.x / scale) // 1)\n chunk_y = int((0.5 + pos.y / scale) // 1)\n return chunk_x, chunk_y", "de...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds given chunk to this region. Will overwrite if a chunk already exists in this location
def add_chunk(self, chunk: EmptyChunk): if not self.inside(chunk.x, 0, chunk.z, chunk=True): raise OutOfBoundsCoordinates(f'Chunk ({chunk.x}, {chunk.z}) is not inside this region') self.chunks[chunk.z % 32 * 32 + chunk.x % 32] = chunk
[ "def add_chunk(self, chunk):\n self.list_of_chunks.append(chunk)", "def add_chunk(self, chunk):\n self.chunkbuffer.appendleft(chunk)", "def add(self, chunkOrToken):\n chunkOrToken.setParent(self)\n self.dtrs.append(chunkOrToken)\n self.positionCount += 1", "def _add_cached_c...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds section to chunk at (x, z). Same as ``EmptyChunk.add_section(section)``
def add_section(self, section: EmptySection, x: int, z: int, replace: bool=True): if not self.inside(x, 0, z, chunk=True): raise OutOfBoundsCoordinates(f'Chunk ({x}, {z}) is not inside this region') chunk = self.chunks[z % 32 * 32 + x % 32] if chunk is None: chunk = Empty...
[ "def add_section(self, section):\n\n section.index = len(self.sections)\n self.sections[section.name] = section", "def add_section(self, section):\n pass # pragma: nocover", "def add_section(self, section):\n self.sections.append(section)", "def add_chunk(self, chunk: EmptyChunk...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Helper function that only sets the block if ``self.inside(x, y, z)`` is true
def set_if_inside(self, block: Block, x: int, y: int, z: int): if self.inside(x, y, z): self.set_block(block, x, y, z)
[ "def is_inside(self,x_point:float, y_point:float, z_point:float) -> bool:\n mid_to_point = Shape.eu_dis(self.x, x_point, self.y, y_point, self.z, z_point)\n return mid_to_point <= self.radius", "def in_block(x, y, input_suduko_3d):\n block_id = 100\n\n if x < 3 and y < 3: # First if statement...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fills in blocks from ``(x1, y1, z1)`` to ``(x2, y2, z2)`` in a rectangle.
def fill(self, block: Block, x1: int, y1: int, z1: int, x2: int, y2: int, z2: int, ignore_outside: bool=False): if not ignore_outside: if not self.inside(x1, y1, z1): raise OutOfBoundsCoordinates(f'First coords ({x1}, {y1}, {z1}) is not inside this region') if not self.in...
[ "def block(self,xy1,xy2,color,color2=None):\n x1 = self._int(min((xy1[0],xy2[0])))\n x2 = self._int(max((xy1[0],xy2[0])))\n y1 = self._int(min((xy1[1],xy2[1])))\n y2 = self._int(max((xy1[1],xy2[1])))\n for x in range(x1,x2):\n if color2 is not None and x%2 != 0:\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the last element from an iterator.
def last(iterator): item = None for item in iterator: pass return item
[ "def last(iter):\n x = None\n for x in iter:\n pass\n return x", "def last(iterable):\n it = iter(iterable)\n item = next(it)\n for item in it:\n pass\n return item", "def return_last(iter):\n for thing in iter:\n pass\n return thing", "def last(iterable):\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Normalize key. If the input key is "."delimit nested keys, it is split on the delimiter. If the input key is a tuple of keys, they are simply yielded. In the string form, escaped "." characters are not interpreted as delimiters.
def normkey(key: Key) -> Generator[str, None, None]: if isinstance(key, tuple): for k in key: yield k elif isinstance(key, str): if "." in key: for k in re.split(r"(?<!\\)\.", key): yield k else: yield key else: raise TypeEr...
[ "def _key_split(self, key):\n if isinstance(key, str):\n return key.split('.')\n else:\n raise ValueError(\"Expected a <str> type\")", "def sanitize_key(key):\n if not isinstance(key, six.string_types):\n raise TypeError('String key expected.')\n # remove leading a...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes population, cache and storage
def initialize(self): self.population.initialize() self.cache.initialize() if self.storage: self.storage.initialize()
[ "def __init__(self):\n self.init(**self.get_init_storage())", "def initialize_storage():\n pass", "def _initialisation(self):\n\n self._maps_initialisation()\n self._distr_initialisation()\n self._em_initialisation()", "def __init__(self):\n self._store: ObservationStore ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Store an individual in the storage backend.
def store_individual(self, hash, individual): if self.storage: self.storage.write_individual(hash, self.generation, individual )
[ "def put(self, name, obj, lifetime=ObjectLifetime.Event):\n\n # check if object with the same name is already stored?\n if name in self.store.keys():\n raise AlreadyInDataStore()\n # no, store it!\n self.store[name] = (lifetime, obj)", "def store(self, key, blob, metadata):\...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns best individual in population (relies on Population method)
def best_individual(self): return self.population.best_individual()
[ "def best_individual(self, population):\n\n best_fitness = -np.inf\n for individual in population:\n fitness = self.fitness(individual)\n if fitness > best_fitness:\n best_fitness = fitness\n best_individual = individual\n\n return best_indivi...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
creates a new population with population.evolve, sets the new population as the current and increment generation. After this the population in the previous generation is lost
def evolve_population(self, **options): new_population = self.population.evolve(**options) self.population = new_population self.generation = new_population.generation_number
[ "def regenerate_population(self):\n self.population = [\n self._generator() for _ in range(len(self.population))\n ]", "def newGeneration(self):\n for i in range(0, len(self.population)):\n #[ind1, ind2] = self.randomSelection()\n [ind1, ind2] = self.bestSelec...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Performs the evolution cycle. This is the main method that should be normally called. Evolution goes on until a termination criterium becomes True. At the end the best individual is returned.
def evolve(self, **options): if not self.termination_criteria: raise TypeError("You Must set one or more termination criteria") self.initialize() self.evaluate_population(**options) while 1: if self.should_terminate(): break ...
[ "def run(param):\n\n # Create population\n population = initialize_population(param)\n # Start evolutionary search\n best_ever = search_loop(population, param)\n return best_ever\n #return best_ever", "def evolve(self):\n self.generation = 0\n start_time = time.time()\n\n # ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Allows user to place bets, returns None.
def get_player_bet(self) -> None: print("Please enter the amount you want to bet.") while self.user.bet == 0: input_ = input(">>> ") try: input_ = float(input_) self.user.bet = input_ except ValueError as e: print(str(e)...
[ "def place_bet(self, bet):\n pass", "def place_bet(self) -> None:\n amount = self.get_bet()\n while not self.valid_bet(amount):\n print(f\"That is an invalid bet. Please input an amount within ${MINIMUM_BET()} and ${self.balance}\\n\")\n amount = self.get_bet()\n ...
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }